linux/drivers/peci/controller/peci-aspeed.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2// Copyright (c) 2012-2017 ASPEED Technology Inc.
   3// Copyright (c) 2018-2021 Intel Corporation
   4
   5#include <asm/unaligned.h>
   6
   7#include <linux/bitfield.h>
   8#include <linux/clk.h>
   9#include <linux/clkdev.h>
  10#include <linux/clk-provider.h>
  11#include <linux/delay.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/iopoll.h>
  15#include <linux/jiffies.h>
  16#include <linux/math.h>
  17#include <linux/module.h>
  18#include <linux/of.h>
  19#include <linux/peci.h>
  20#include <linux/platform_device.h>
  21#include <linux/reset.h>
  22
  23/* ASPEED PECI Registers */
  24/* Control Register */
  25#define ASPEED_PECI_CTRL                        0x00
  26#define   ASPEED_PECI_CTRL_SAMPLING_MASK        GENMASK(19, 16)
  27#define   ASPEED_PECI_CTRL_RD_MODE_MASK         GENMASK(13, 12)
  28#define     ASPEED_PECI_CTRL_RD_MODE_DBG        BIT(13)
  29#define     ASPEED_PECI_CTRL_RD_MODE_COUNT      BIT(12)
  30#define   ASPEED_PECI_CTRL_CLK_SRC_HCLK         BIT(11)
  31#define   ASPEED_PECI_CTRL_CLK_DIV_MASK         GENMASK(10, 8)
  32#define   ASPEED_PECI_CTRL_INVERT_OUT           BIT(7)
  33#define   ASPEED_PECI_CTRL_INVERT_IN            BIT(6)
  34#define   ASPEED_PECI_CTRL_BUS_CONTENTION_EN    BIT(5)
  35#define   ASPEED_PECI_CTRL_PECI_EN              BIT(4)
  36#define   ASPEED_PECI_CTRL_PECI_CLK_EN          BIT(0)
  37
  38/* Timing Negotiation Register */
  39#define ASPEED_PECI_TIMING_NEGOTIATION          0x04
  40#define   ASPEED_PECI_T_NEGO_MSG_MASK           GENMASK(15, 8)
  41#define   ASPEED_PECI_T_NEGO_ADDR_MASK          GENMASK(7, 0)
  42
  43/* Command Register */
  44#define ASPEED_PECI_CMD                         0x08
  45#define   ASPEED_PECI_CMD_PIN_MONITORING        BIT(31)
  46#define   ASPEED_PECI_CMD_STS_MASK              GENMASK(27, 24)
  47#define     ASPEED_PECI_CMD_STS_ADDR_T_NEGO     0x3
  48#define   ASPEED_PECI_CMD_IDLE_MASK             \
  49          (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MONITORING)
  50#define   ASPEED_PECI_CMD_FIRE                  BIT(0)
  51
  52/* Read/Write Length Register */
  53#define ASPEED_PECI_RW_LENGTH                   0x0c
  54#define   ASPEED_PECI_AW_FCS_EN                 BIT(31)
  55#define   ASPEED_PECI_RD_LEN_MASK               GENMASK(23, 16)
  56#define   ASPEED_PECI_WR_LEN_MASK               GENMASK(15, 8)
  57#define   ASPEED_PECI_TARGET_ADDR_MASK          GENMASK(7, 0)
  58
  59/* Expected FCS Data Register */
  60#define ASPEED_PECI_EXPECTED_FCS                0x10
  61#define   ASPEED_PECI_EXPECTED_RD_FCS_MASK      GENMASK(23, 16)
  62#define   ASPEED_PECI_EXPECTED_AW_FCS_AUTO_MASK GENMASK(15, 8)
  63#define   ASPEED_PECI_EXPECTED_WR_FCS_MASK      GENMASK(7, 0)
  64
  65/* Captured FCS Data Register */
  66#define ASPEED_PECI_CAPTURED_FCS                0x14
  67#define   ASPEED_PECI_CAPTURED_RD_FCS_MASK      GENMASK(23, 16)
  68#define   ASPEED_PECI_CAPTURED_WR_FCS_MASK      GENMASK(7, 0)
  69
  70/* Interrupt Register */
  71#define ASPEED_PECI_INT_CTRL                    0x18
  72#define   ASPEED_PECI_TIMING_NEGO_SEL_MASK      GENMASK(31, 30)
  73#define     ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO    0
  74#define     ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO    1
  75#define     ASPEED_PECI_MESSAGE_NEGO            2
  76#define   ASPEED_PECI_INT_MASK                  GENMASK(4, 0)
  77#define     ASPEED_PECI_INT_BUS_TIMEOUT         BIT(4)
  78#define     ASPEED_PECI_INT_BUS_CONTENTION      BIT(3)
  79#define     ASPEED_PECI_INT_WR_FCS_BAD          BIT(2)
  80#define     ASPEED_PECI_INT_WR_FCS_ABORT        BIT(1)
  81#define     ASPEED_PECI_INT_CMD_DONE            BIT(0)
  82
  83/* Interrupt Status Register */
  84#define ASPEED_PECI_INT_STS                     0x1c
  85#define   ASPEED_PECI_INT_TIMING_RESULT_MASK    GENMASK(29, 16)
  86          /* bits[4..0]: Same bit fields in the 'Interrupt Register' */
  87
  88/* Rx/Tx Data Buffer Registers */
  89#define ASPEED_PECI_WR_DATA0                    0x20
  90#define ASPEED_PECI_WR_DATA1                    0x24
  91#define ASPEED_PECI_WR_DATA2                    0x28
  92#define ASPEED_PECI_WR_DATA3                    0x2c
  93#define ASPEED_PECI_RD_DATA0                    0x30
  94#define ASPEED_PECI_RD_DATA1                    0x34
  95#define ASPEED_PECI_RD_DATA2                    0x38
  96#define ASPEED_PECI_RD_DATA3                    0x3c
  97#define ASPEED_PECI_WR_DATA4                    0x40
  98#define ASPEED_PECI_WR_DATA5                    0x44
  99#define ASPEED_PECI_WR_DATA6                    0x48
 100#define ASPEED_PECI_WR_DATA7                    0x4c
 101#define ASPEED_PECI_RD_DATA4                    0x50
 102#define ASPEED_PECI_RD_DATA5                    0x54
 103#define ASPEED_PECI_RD_DATA6                    0x58
 104#define ASPEED_PECI_RD_DATA7                    0x5c
 105#define   ASPEED_PECI_DATA_BUF_SIZE_MAX         32
 106
 107/* Timing Negotiation */
 108#define ASPEED_PECI_CLK_FREQUENCY_MIN           2000
 109#define ASPEED_PECI_CLK_FREQUENCY_DEFAULT       1000000
 110#define ASPEED_PECI_CLK_FREQUENCY_MAX           2000000
 111#define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT   8
 112/* Timeout */
 113#define ASPEED_PECI_IDLE_CHECK_TIMEOUT_US       (50 * USEC_PER_MSEC)
 114#define ASPEED_PECI_IDLE_CHECK_INTERVAL_US      (10 * USEC_PER_MSEC)
 115#define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT      1000
 116#define ASPEED_PECI_CMD_TIMEOUT_MS_MAX          1000
 117
 118#define ASPEED_PECI_CLK_DIV1(msg_timing) (4 * (msg_timing) + 1)
 119#define ASPEED_PECI_CLK_DIV2(clk_div_exp) BIT(clk_div_exp)
 120#define ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp) \
 121        (4 * ASPEED_PECI_CLK_DIV1(msg_timing) * ASPEED_PECI_CLK_DIV2(clk_div_exp))
 122
 123struct aspeed_peci {
 124        struct peci_controller *controller;
 125        struct device *dev;
 126        void __iomem *base;
 127        struct reset_control *rst;
 128        int irq;
 129        spinlock_t lock; /* to sync completion status handling */
 130        struct completion xfer_complete;
 131        struct clk *clk;
 132        u32 clk_frequency;
 133        u32 status;
 134        u32 cmd_timeout_ms;
 135};
 136
 137struct clk_aspeed_peci {
 138        struct clk_hw hw;
 139        struct aspeed_peci *aspeed_peci;
 140};
 141
 142static void aspeed_peci_controller_enable(struct aspeed_peci *priv)
 143{
 144        u32 val = readl(priv->base + ASPEED_PECI_CTRL);
 145
 146        val |= ASPEED_PECI_CTRL_PECI_CLK_EN;
 147        val |= ASPEED_PECI_CTRL_PECI_EN;
 148
 149        writel(val, priv->base + ASPEED_PECI_CTRL);
 150}
 151
 152static void aspeed_peci_init_regs(struct aspeed_peci *priv)
 153{
 154        u32 val;
 155
 156        /* Clear interrupts */
 157        writel(ASPEED_PECI_INT_MASK, priv->base + ASPEED_PECI_INT_STS);
 158
 159        /* Set timing negotiation mode and enable interrupts */
 160        val = FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK, ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO);
 161        val |= ASPEED_PECI_INT_MASK;
 162        writel(val, priv->base + ASPEED_PECI_INT_CTRL);
 163
 164        val = FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK, ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT);
 165        writel(val, priv->base + ASPEED_PECI_CTRL);
 166}
 167
 168static int aspeed_peci_check_idle(struct aspeed_peci *priv)
 169{
 170        u32 cmd_sts = readl(priv->base + ASPEED_PECI_CMD);
 171        int ret;
 172
 173        /*
 174         * Under normal circumstances, we expect to be idle here.
 175         * In case there were any errors/timeouts that led to the situation
 176         * where the hardware is not in idle state - we need to reset and
 177         * reinitialize it to avoid potential controller hang.
 178         */
 179        if (FIELD_GET(ASPEED_PECI_CMD_STS_MASK, cmd_sts)) {
 180                ret = reset_control_assert(priv->rst);
 181                if (ret) {
 182                        dev_err(priv->dev, "cannot assert reset control\n");
 183                        return ret;
 184                }
 185
 186                ret = reset_control_deassert(priv->rst);
 187                if (ret) {
 188                        dev_err(priv->dev, "cannot deassert reset control\n");
 189                        return ret;
 190                }
 191
 192                aspeed_peci_init_regs(priv);
 193
 194                ret = clk_set_rate(priv->clk, priv->clk_frequency);
 195                if (ret < 0) {
 196                        dev_err(priv->dev, "cannot set clock frequency\n");
 197                        return ret;
 198                }
 199
 200                aspeed_peci_controller_enable(priv);
 201        }
 202
 203        return readl_poll_timeout(priv->base + ASPEED_PECI_CMD,
 204                                  cmd_sts,
 205                                  !(cmd_sts & ASPEED_PECI_CMD_IDLE_MASK),
 206                                  ASPEED_PECI_IDLE_CHECK_INTERVAL_US,
 207                                  ASPEED_PECI_IDLE_CHECK_TIMEOUT_US);
 208}
 209
 210static int aspeed_peci_xfer(struct peci_controller *controller,
 211                            u8 addr, struct peci_request *req)
 212{
 213        struct aspeed_peci *priv = dev_get_drvdata(controller->dev.parent);
 214        unsigned long timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
 215        u32 peci_head;
 216        int ret, i;
 217
 218        if (req->tx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX ||
 219            req->rx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX)
 220                return -EINVAL;
 221
 222        /* Check command sts and bus idle state */
 223        ret = aspeed_peci_check_idle(priv);
 224        if (ret)
 225                return ret; /* -ETIMEDOUT */
 226
 227        spin_lock_irq(&priv->lock);
 228        reinit_completion(&priv->xfer_complete);
 229
 230        peci_head = FIELD_PREP(ASPEED_PECI_TARGET_ADDR_MASK, addr) |
 231                    FIELD_PREP(ASPEED_PECI_WR_LEN_MASK, req->tx.len) |
 232                    FIELD_PREP(ASPEED_PECI_RD_LEN_MASK, req->rx.len);
 233
 234        writel(peci_head, priv->base + ASPEED_PECI_RW_LENGTH);
 235
 236        for (i = 0; i < req->tx.len; i += 4) {
 237                u32 reg = (i < 16 ? ASPEED_PECI_WR_DATA0 : ASPEED_PECI_WR_DATA4) + i % 16;
 238
 239                writel(get_unaligned_le32(&req->tx.buf[i]), priv->base + reg);
 240        }
 241
 242#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
 243        dev_dbg(priv->dev, "HEAD : %#08x\n", peci_head);
 244        print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, req->tx.buf, req->tx.len);
 245#endif
 246
 247        priv->status = 0;
 248        writel(ASPEED_PECI_CMD_FIRE, priv->base + ASPEED_PECI_CMD);
 249        spin_unlock_irq(&priv->lock);
 250
 251        ret = wait_for_completion_interruptible_timeout(&priv->xfer_complete, timeout);
 252        if (ret < 0)
 253                return ret;
 254
 255        if (ret == 0) {
 256                dev_dbg(priv->dev, "timeout waiting for a response\n");
 257                return -ETIMEDOUT;
 258        }
 259
 260        spin_lock_irq(&priv->lock);
 261
 262        if (priv->status != ASPEED_PECI_INT_CMD_DONE) {
 263                spin_unlock_irq(&priv->lock);
 264                dev_dbg(priv->dev, "no valid response, status: %#02x\n", priv->status);
 265                return -EIO;
 266        }
 267
 268        spin_unlock_irq(&priv->lock);
 269
 270        /*
 271         * We need to use dword reads for register access, make sure that the
 272         * buffer size is multiple of 4-bytes.
 273         */
 274        BUILD_BUG_ON(PECI_REQUEST_MAX_BUF_SIZE % 4);
 275
 276        for (i = 0; i < req->rx.len; i += 4) {
 277                u32 reg = (i < 16 ? ASPEED_PECI_RD_DATA0 : ASPEED_PECI_RD_DATA4) + i % 16;
 278                u32 rx_data = readl(priv->base + reg);
 279
 280                put_unaligned_le32(rx_data, &req->rx.buf[i]);
 281        }
 282
 283#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
 284        print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, req->rx.buf, req->rx.len);
 285#endif
 286        return 0;
 287}
 288
 289static irqreturn_t aspeed_peci_irq_handler(int irq, void *arg)
 290{
 291        struct aspeed_peci *priv = arg;
 292        u32 status;
 293
 294        spin_lock(&priv->lock);
 295        status = readl(priv->base + ASPEED_PECI_INT_STS);
 296        writel(status, priv->base + ASPEED_PECI_INT_STS);
 297        priv->status |= (status & ASPEED_PECI_INT_MASK);
 298
 299        /*
 300         * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit
 301         * set even in an error case.
 302         */
 303        if (status & ASPEED_PECI_INT_CMD_DONE)
 304                complete(&priv->xfer_complete);
 305
 306        writel(0, priv->base + ASPEED_PECI_CMD);
 307
 308        spin_unlock(&priv->lock);
 309
 310        return IRQ_HANDLED;
 311}
 312
 313static void clk_aspeed_peci_find_div_values(unsigned long rate, int *msg_timing, int *clk_div_exp)
 314{
 315        unsigned long best_diff = ~0ul, diff;
 316        int msg_timing_temp, clk_div_exp_temp, i, j;
 317
 318        for (i = 1; i <= 255; i++)
 319                for (j = 0; j < 8; j++) {
 320                        diff = abs(rate - ASPEED_PECI_CLK_DIV1(i) * ASPEED_PECI_CLK_DIV2(j));
 321                        if (diff < best_diff) {
 322                                msg_timing_temp = i;
 323                                clk_div_exp_temp = j;
 324                                best_diff = diff;
 325                        }
 326                }
 327
 328        *msg_timing = msg_timing_temp;
 329        *clk_div_exp = clk_div_exp_temp;
 330}
 331
 332static int clk_aspeed_peci_get_div(unsigned long rate, const unsigned long *prate)
 333{
 334        unsigned long this_rate = *prate / (4 * rate);
 335        int msg_timing, clk_div_exp;
 336
 337        clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
 338
 339        return ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
 340}
 341
 342static int clk_aspeed_peci_set_rate(struct clk_hw *hw, unsigned long rate,
 343                                    unsigned long prate)
 344{
 345        struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
 346        struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
 347        unsigned long this_rate = prate / (4 * rate);
 348        int clk_div_exp, msg_timing;
 349        u32 val;
 350
 351        clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
 352
 353        val = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
 354        val |= FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, clk_div_exp);
 355        writel(val, aspeed_peci->base + ASPEED_PECI_CTRL);
 356
 357        val = FIELD_PREP(ASPEED_PECI_T_NEGO_MSG_MASK, msg_timing);
 358        val |= FIELD_PREP(ASPEED_PECI_T_NEGO_ADDR_MASK, msg_timing);
 359        writel(val, aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
 360
 361        return 0;
 362}
 363
 364static long clk_aspeed_peci_round_rate(struct clk_hw *hw, unsigned long rate,
 365                                       unsigned long *prate)
 366{
 367        int div = clk_aspeed_peci_get_div(rate, prate);
 368
 369        return DIV_ROUND_UP_ULL(*prate, div);
 370}
 371
 372static unsigned long clk_aspeed_peci_recalc_rate(struct clk_hw *hw, unsigned long prate)
 373{
 374        struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
 375        struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
 376        int div, msg_timing, addr_timing, clk_div_exp;
 377        u32 reg;
 378
 379        reg = readl(aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
 380        msg_timing = FIELD_GET(ASPEED_PECI_T_NEGO_MSG_MASK, reg);
 381        addr_timing = FIELD_GET(ASPEED_PECI_T_NEGO_ADDR_MASK, reg);
 382
 383        if (msg_timing != addr_timing)
 384                return 0;
 385
 386        reg = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
 387        clk_div_exp = FIELD_GET(ASPEED_PECI_CTRL_CLK_DIV_MASK, reg);
 388
 389        div = ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
 390
 391        return DIV_ROUND_UP_ULL(prate, div);
 392}
 393
 394static const struct clk_ops clk_aspeed_peci_ops = {
 395        .set_rate = clk_aspeed_peci_set_rate,
 396        .round_rate = clk_aspeed_peci_round_rate,
 397        .recalc_rate = clk_aspeed_peci_recalc_rate,
 398};
 399
 400/*
 401 * PECI HW contains a clock divider which is a combination of:
 402 *  div0: 4 (fixed divider)
 403 *  div1: x + 1
 404 *  div2: 1 << y
 405 * In other words, out_clk = in_clk / (div0 * div1 * div2)
 406 * The resulting frequency is used by PECI Controller to drive the PECI bus to
 407 * negotiate optimal transfer rate.
 408 */
 409static struct clk *devm_aspeed_peci_register_clk_div(struct device *dev, struct clk *parent,
 410                                                     struct aspeed_peci *priv)
 411{
 412        struct clk_aspeed_peci *peci_clk;
 413        struct clk_init_data init;
 414        const char *parent_name;
 415        char name[32];
 416        int ret;
 417
 418        snprintf(name, sizeof(name), "%s_div", dev_name(dev));
 419
 420        parent_name = __clk_get_name(parent);
 421
 422        init.ops = &clk_aspeed_peci_ops;
 423        init.name = name;
 424        init.parent_names = (const char* []) { parent_name };
 425        init.num_parents = 1;
 426        init.flags = 0;
 427
 428        peci_clk = devm_kzalloc(dev, sizeof(struct clk_aspeed_peci), GFP_KERNEL);
 429        if (!peci_clk)
 430                return ERR_PTR(-ENOMEM);
 431
 432        peci_clk->hw.init = &init;
 433        peci_clk->aspeed_peci = priv;
 434
 435        ret = devm_clk_hw_register(dev, &peci_clk->hw);
 436        if (ret)
 437                return ERR_PTR(ret);
 438
 439        return peci_clk->hw.clk;
 440}
 441
 442static void aspeed_peci_property_sanitize(struct device *dev, const char *propname,
 443                                          u32 min, u32 max, u32 default_val, u32 *propval)
 444{
 445        u32 val;
 446        int ret;
 447
 448        ret = device_property_read_u32(dev, propname, &val);
 449        if (ret) {
 450                val = default_val;
 451        } else if (val > max || val < min) {
 452                dev_warn(dev, "invalid %s: %u, falling back to: %u\n",
 453                         propname, val, default_val);
 454
 455                val = default_val;
 456        }
 457
 458        *propval = val;
 459}
 460
 461static void aspeed_peci_property_setup(struct aspeed_peci *priv)
 462{
 463        aspeed_peci_property_sanitize(priv->dev, "clock-frequency",
 464                                      ASPEED_PECI_CLK_FREQUENCY_MIN, ASPEED_PECI_CLK_FREQUENCY_MAX,
 465                                      ASPEED_PECI_CLK_FREQUENCY_DEFAULT, &priv->clk_frequency);
 466        aspeed_peci_property_sanitize(priv->dev, "cmd-timeout-ms",
 467                                      1, ASPEED_PECI_CMD_TIMEOUT_MS_MAX,
 468                                      ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT, &priv->cmd_timeout_ms);
 469}
 470
 471static struct peci_controller_ops aspeed_ops = {
 472        .xfer = aspeed_peci_xfer,
 473};
 474
 475static void aspeed_peci_reset_control_release(void *data)
 476{
 477        reset_control_assert(data);
 478}
 479
 480static int devm_aspeed_peci_reset_control_deassert(struct device *dev, struct reset_control *rst)
 481{
 482        int ret;
 483
 484        ret = reset_control_deassert(rst);
 485        if (ret)
 486                return ret;
 487
 488        return devm_add_action_or_reset(dev, aspeed_peci_reset_control_release, rst);
 489}
 490
 491static void aspeed_peci_clk_release(void *data)
 492{
 493        clk_disable_unprepare(data);
 494}
 495
 496static int devm_aspeed_peci_clk_enable(struct device *dev, struct clk *clk)
 497{
 498        int ret;
 499
 500        ret = clk_prepare_enable(clk);
 501        if (ret)
 502                return ret;
 503
 504        return devm_add_action_or_reset(dev, aspeed_peci_clk_release, clk);
 505}
 506
 507static int aspeed_peci_probe(struct platform_device *pdev)
 508{
 509        struct peci_controller *controller;
 510        struct aspeed_peci *priv;
 511        struct clk *ref_clk;
 512        int ret;
 513
 514        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 515        if (!priv)
 516                return -ENOMEM;
 517
 518        priv->dev = &pdev->dev;
 519        dev_set_drvdata(priv->dev, priv);
 520
 521        priv->base = devm_platform_ioremap_resource(pdev, 0);
 522        if (IS_ERR(priv->base))
 523                return PTR_ERR(priv->base);
 524
 525        priv->irq = platform_get_irq(pdev, 0);
 526        if (!priv->irq)
 527                return priv->irq;
 528
 529        ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
 530                               0, "peci-aspeed", priv);
 531        if (ret)
 532                return ret;
 533
 534        init_completion(&priv->xfer_complete);
 535        spin_lock_init(&priv->lock);
 536
 537        priv->rst = devm_reset_control_get(&pdev->dev, NULL);
 538        if (IS_ERR(priv->rst))
 539                return dev_err_probe(priv->dev, PTR_ERR(priv->rst),
 540                                     "failed to get reset control\n");
 541
 542        ret = devm_aspeed_peci_reset_control_deassert(priv->dev, priv->rst);
 543        if (ret)
 544                return dev_err_probe(priv->dev, ret, "cannot deassert reset control\n");
 545
 546        aspeed_peci_property_setup(priv);
 547
 548        aspeed_peci_init_regs(priv);
 549
 550        ref_clk = devm_clk_get(priv->dev, NULL);
 551        if (IS_ERR(ref_clk))
 552                return dev_err_probe(priv->dev, PTR_ERR(ref_clk), "failed to get ref clock\n");
 553
 554        priv->clk = devm_aspeed_peci_register_clk_div(priv->dev, ref_clk, priv);
 555        if (IS_ERR(priv->clk))
 556                return dev_err_probe(priv->dev, PTR_ERR(priv->clk), "cannot register clock\n");
 557
 558        ret = clk_set_rate(priv->clk, priv->clk_frequency);
 559        if (ret < 0)
 560                return dev_err_probe(priv->dev, ret, "cannot set clock frequency\n");
 561
 562        ret = devm_aspeed_peci_clk_enable(priv->dev, priv->clk);
 563        if (ret)
 564                return dev_err_probe(priv->dev, ret, "failed to enable clock\n");
 565
 566        aspeed_peci_controller_enable(priv);
 567
 568        controller = devm_peci_controller_add(priv->dev, &aspeed_ops);
 569        if (IS_ERR(controller))
 570                return dev_err_probe(priv->dev, PTR_ERR(controller),
 571                                     "failed to add aspeed peci controller\n");
 572
 573        priv->controller = controller;
 574
 575        return 0;
 576}
 577
 578static const struct of_device_id aspeed_peci_of_table[] = {
 579        { .compatible = "aspeed,ast2400-peci", },
 580        { .compatible = "aspeed,ast2500-peci", },
 581        { .compatible = "aspeed,ast2600-peci", },
 582        { }
 583};
 584MODULE_DEVICE_TABLE(of, aspeed_peci_of_table);
 585
 586static struct platform_driver aspeed_peci_driver = {
 587        .probe  = aspeed_peci_probe,
 588        .driver = {
 589                .name           = "peci-aspeed",
 590                .of_match_table = aspeed_peci_of_table,
 591        },
 592};
 593module_platform_driver(aspeed_peci_driver);
 594
 595MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
 596MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
 597MODULE_DESCRIPTION("ASPEED PECI driver");
 598MODULE_LICENSE("GPL");
 599MODULE_IMPORT_NS(PECI);
 600