linux/drivers/platform/x86/intel_scu_ipc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for the Intel SCU IPC mechanism
   4 *
   5 * (C) Copyright 2008-2010,2015 Intel Corporation
   6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
   7 *
   8 * SCU running in ARC processor communicates with other entity running in IA
   9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
  10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
  11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
  12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
  13 * along with other APIs.
  14 */
  15
  16#include <linux/delay.h>
  17#include <linux/device.h>
  18#include <linux/errno.h>
  19#include <linux/init.h>
  20#include <linux/interrupt.h>
  21#include <linux/io.h>
  22#include <linux/module.h>
  23#include <linux/slab.h>
  24
  25#include <asm/intel_scu_ipc.h>
  26
  27/* IPC defines the following message types */
  28#define IPCMSG_PCNTRL         0xff /* Power controller unit read/write */
  29
  30/* Command id associated with message IPCMSG_PCNTRL */
  31#define IPC_CMD_PCNTRL_W      0 /* Register write */
  32#define IPC_CMD_PCNTRL_R      1 /* Register read */
  33#define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
  34
  35/*
  36 * IPC register summary
  37 *
  38 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
  39 * To read or write information to the SCU, driver writes to IPC-1 memory
  40 * mapped registers. The following is the IPC mechanism
  41 *
  42 * 1. IA core cDMI interface claims this transaction and converts it to a
  43 *    Transaction Layer Packet (TLP) message which is sent across the cDMI.
  44 *
  45 * 2. South Complex cDMI block receives this message and writes it to
  46 *    the IPC-1 register block, causing an interrupt to the SCU
  47 *
  48 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
  49 *    message handler is called within firmware.
  50 */
  51
  52#define IPC_WWBUF_SIZE    20            /* IPC Write buffer Size */
  53#define IPC_RWBUF_SIZE    20            /* IPC Read buffer Size */
  54#define IPC_IOC           0x100         /* IPC command register IOC bit */
  55
  56struct intel_scu_ipc_dev {
  57        struct device dev;
  58        struct resource mem;
  59        struct module *owner;
  60        int irq;
  61        void __iomem *ipc_base;
  62        struct completion cmd_complete;
  63};
  64
  65#define IPC_STATUS              0x04
  66#define IPC_STATUS_IRQ          BIT(2)
  67#define IPC_STATUS_ERR          BIT(1)
  68#define IPC_STATUS_BUSY         BIT(0)
  69
  70/*
  71 * IPC Write/Read Buffers:
  72 * 16 byte buffer for sending and receiving data to and from SCU.
  73 */
  74#define IPC_WRITE_BUFFER        0x80
  75#define IPC_READ_BUFFER         0x90
  76
  77/* Timeout in jiffies */
  78#define IPC_TIMEOUT             (3 * HZ)
  79
  80static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
  81static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
  82
  83static struct class intel_scu_ipc_class = {
  84        .name = "intel_scu_ipc",
  85        .owner = THIS_MODULE,
  86};
  87
  88/**
  89 * intel_scu_ipc_dev_get() - Get SCU IPC instance
  90 *
  91 * The recommended new API takes SCU IPC instance as parameter and this
  92 * function can be called by driver to get the instance. This also makes
  93 * sure the driver providing the IPC functionality cannot be unloaded
  94 * while the caller has the instance.
  95 *
  96 * Call intel_scu_ipc_dev_put() to release the instance.
  97 *
  98 * Returns %NULL if SCU IPC is not currently available.
  99 */
 100struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
 101{
 102        struct intel_scu_ipc_dev *scu = NULL;
 103
 104        mutex_lock(&ipclock);
 105        if (ipcdev) {
 106                get_device(&ipcdev->dev);
 107                /*
 108                 * Prevent the IPC provider from being unloaded while it
 109                 * is being used.
 110                 */
 111                if (!try_module_get(ipcdev->owner))
 112                        put_device(&ipcdev->dev);
 113                else
 114                        scu = ipcdev;
 115        }
 116
 117        mutex_unlock(&ipclock);
 118        return scu;
 119}
 120EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
 121
 122/**
 123 * intel_scu_ipc_dev_put() - Put SCU IPC instance
 124 * @scu: SCU IPC instance
 125 *
 126 * This function releases the SCU IPC instance retrieved from
 127 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
 128 * unloaded.
 129 */
 130void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
 131{
 132        if (scu) {
 133                module_put(scu->owner);
 134                put_device(&scu->dev);
 135        }
 136}
 137EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
 138
 139struct intel_scu_ipc_devres {
 140        struct intel_scu_ipc_dev *scu;
 141};
 142
 143static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
 144{
 145        struct intel_scu_ipc_devres *dr = res;
 146        struct intel_scu_ipc_dev *scu = dr->scu;
 147
 148        intel_scu_ipc_dev_put(scu);
 149}
 150
 151/**
 152 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
 153 * @dev: Device requesting the SCU IPC device
 154 *
 155 * The recommended new API takes SCU IPC instance as parameter and this
 156 * function can be called by driver to get the instance. This also makes
 157 * sure the driver providing the IPC functionality cannot be unloaded
 158 * while the caller has the instance.
 159 *
 160 * Returns %NULL if SCU IPC is not currently available.
 161 */
 162struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
 163{
 164        struct intel_scu_ipc_devres *dr;
 165        struct intel_scu_ipc_dev *scu;
 166
 167        dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
 168        if (!dr)
 169                return NULL;
 170
 171        scu = intel_scu_ipc_dev_get();
 172        if (!scu) {
 173                devres_free(dr);
 174                return NULL;
 175        }
 176
 177        dr->scu = scu;
 178        devres_add(dev, dr);
 179
 180        return scu;
 181}
 182EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
 183
 184/*
 185 * Send ipc command
 186 * Command Register (Write Only):
 187 * A write to this register results in an interrupt to the SCU core processor
 188 * Format:
 189 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
 190 */
 191static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
 192{
 193        reinit_completion(&scu->cmd_complete);
 194        writel(cmd | IPC_IOC, scu->ipc_base);
 195}
 196
 197/*
 198 * Write ipc data
 199 * IPC Write Buffer (Write Only):
 200 * 16-byte buffer for sending data associated with IPC command to
 201 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
 202 */
 203static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
 204{
 205        writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
 206}
 207
 208/*
 209 * Status Register (Read Only):
 210 * Driver will read this register to get the ready/busy status of the IPC
 211 * block and error status of the IPC command that was just processed by SCU
 212 * Format:
 213 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
 214 */
 215static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
 216{
 217        return __raw_readl(scu->ipc_base + IPC_STATUS);
 218}
 219
 220/* Read ipc byte data */
 221static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
 222{
 223        return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
 224}
 225
 226/* Read ipc u32 data */
 227static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
 228{
 229        return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
 230}
 231
 232/* Wait till scu status is busy */
 233static inline int busy_loop(struct intel_scu_ipc_dev *scu)
 234{
 235        unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
 236
 237        do {
 238                u32 status;
 239
 240                status = ipc_read_status(scu);
 241                if (!(status & IPC_STATUS_BUSY))
 242                        return (status & IPC_STATUS_ERR) ? -EIO : 0;
 243
 244                usleep_range(50, 100);
 245        } while (time_before(jiffies, end));
 246
 247        return -ETIMEDOUT;
 248}
 249
 250/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
 251static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
 252{
 253        int status;
 254
 255        if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
 256                return -ETIMEDOUT;
 257
 258        status = ipc_read_status(scu);
 259        if (status & IPC_STATUS_ERR)
 260                return -EIO;
 261
 262        return 0;
 263}
 264
 265static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
 266{
 267        return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
 268}
 269
 270/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
 271static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
 272                        u32 count, u32 op, u32 id)
 273{
 274        int nc;
 275        u32 offset = 0;
 276        int err;
 277        u8 cbuf[IPC_WWBUF_SIZE];
 278        u32 *wbuf = (u32 *)&cbuf;
 279
 280        memset(cbuf, 0, sizeof(cbuf));
 281
 282        mutex_lock(&ipclock);
 283        if (!scu)
 284                scu = ipcdev;
 285        if (!scu) {
 286                mutex_unlock(&ipclock);
 287                return -ENODEV;
 288        }
 289
 290        for (nc = 0; nc < count; nc++, offset += 2) {
 291                cbuf[offset] = addr[nc];
 292                cbuf[offset + 1] = addr[nc] >> 8;
 293        }
 294
 295        if (id == IPC_CMD_PCNTRL_R) {
 296                for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
 297                        ipc_data_writel(scu, wbuf[nc], offset);
 298                ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
 299        } else if (id == IPC_CMD_PCNTRL_W) {
 300                for (nc = 0; nc < count; nc++, offset += 1)
 301                        cbuf[offset] = data[nc];
 302                for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
 303                        ipc_data_writel(scu, wbuf[nc], offset);
 304                ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
 305        } else if (id == IPC_CMD_PCNTRL_M) {
 306                cbuf[offset] = data[0];
 307                cbuf[offset + 1] = data[1];
 308                ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
 309                ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
 310        }
 311
 312        err = intel_scu_ipc_check_status(scu);
 313        if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
 314                /* Workaround: values are read as 0 without memcpy_fromio */
 315                memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
 316                for (nc = 0; nc < count; nc++)
 317                        data[nc] = ipc_data_readb(scu, nc);
 318        }
 319        mutex_unlock(&ipclock);
 320        return err;
 321}
 322
 323/**
 324 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
 325 * @scu: Optional SCU IPC instance
 326 * @addr: Register on SCU
 327 * @data: Return pointer for read byte
 328 *
 329 * Read a single register. Returns %0 on success or an error code. All
 330 * locking between SCU accesses is handled for the caller.
 331 *
 332 * This function may sleep.
 333 */
 334int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
 335{
 336        return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
 337}
 338EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
 339
 340/**
 341 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
 342 * @scu: Optional SCU IPC instance
 343 * @addr: Register on SCU
 344 * @data: Byte to write
 345 *
 346 * Write a single register. Returns %0 on success or an error code. All
 347 * locking between SCU accesses is handled for the caller.
 348 *
 349 * This function may sleep.
 350 */
 351int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
 352{
 353        return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
 354}
 355EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
 356
 357/**
 358 * intel_scu_ipc_dev_readv() - Read a set of registers
 359 * @scu: Optional SCU IPC instance
 360 * @addr: Register list
 361 * @data: Bytes to return
 362 * @len: Length of array
 363 *
 364 * Read registers. Returns %0 on success or an error code. All locking
 365 * between SCU accesses is handled for the caller.
 366 *
 367 * The largest array length permitted by the hardware is 5 items.
 368 *
 369 * This function may sleep.
 370 */
 371int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
 372                            size_t len)
 373{
 374        return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
 375}
 376EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
 377
 378/**
 379 * intel_scu_ipc_dev_writev() - Write a set of registers
 380 * @scu: Optional SCU IPC instance
 381 * @addr: Register list
 382 * @data: Bytes to write
 383 * @len: Length of array
 384 *
 385 * Write registers. Returns %0 on success or an error code. All locking
 386 * between SCU accesses is handled for the caller.
 387 *
 388 * The largest array length permitted by the hardware is 5 items.
 389 *
 390 * This function may sleep.
 391 */
 392int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
 393                             size_t len)
 394{
 395        return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
 396}
 397EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
 398
 399/**
 400 * intel_scu_ipc_dev_update() - Update a register
 401 * @scu: Optional SCU IPC instance
 402 * @addr: Register address
 403 * @data: Bits to update
 404 * @mask: Mask of bits to update
 405 *
 406 * Read-modify-write power control unit register. The first data argument
 407 * must be register value and second is mask value mask is a bitmap that
 408 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
 409 * modify this bit. returns %0 on success or an error code.
 410 *
 411 * This function may sleep. Locking between SCU accesses is handled
 412 * for the caller.
 413 */
 414int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
 415                             u8 mask)
 416{
 417        u8 tmp[2] = { data, mask };
 418        return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
 419}
 420EXPORT_SYMBOL(intel_scu_ipc_dev_update);
 421
 422/**
 423 * intel_scu_ipc_dev_simple_command() - Send a simple command
 424 * @scu: Optional SCU IPC instance
 425 * @cmd: Command
 426 * @sub: Sub type
 427 *
 428 * Issue a simple command to the SCU. Do not use this interface if you must
 429 * then access data as any data values may be overwritten by another SCU
 430 * access by the time this function returns.
 431 *
 432 * This function may sleep. Locking for SCU accesses is handled for the
 433 * caller.
 434 */
 435int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
 436                                     int sub)
 437{
 438        u32 cmdval;
 439        int err;
 440
 441        mutex_lock(&ipclock);
 442        if (!scu)
 443                scu = ipcdev;
 444        if (!scu) {
 445                mutex_unlock(&ipclock);
 446                return -ENODEV;
 447        }
 448        scu = ipcdev;
 449        cmdval = sub << 12 | cmd;
 450        ipc_command(scu, cmdval);
 451        err = intel_scu_ipc_check_status(scu);
 452        mutex_unlock(&ipclock);
 453        if (err)
 454                dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
 455        return err;
 456}
 457EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
 458
 459/**
 460 * intel_scu_ipc_command_with_size() - Command with data
 461 * @scu: Optional SCU IPC instance
 462 * @cmd: Command
 463 * @sub: Sub type
 464 * @in: Input data
 465 * @inlen: Input length in bytes
 466 * @size: Input size written to the IPC command register in whatever
 467 *        units (dword, byte) the particular firmware requires. Normally
 468 *        should be the same as @inlen.
 469 * @out: Output data
 470 * @outlen: Output length in bytes
 471 *
 472 * Issue a command to the SCU which involves data transfers. Do the
 473 * data copies under the lock but leave it for the caller to interpret.
 474 */
 475int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
 476                                        int sub, const void *in, size_t inlen,
 477                                        size_t size, void *out, size_t outlen)
 478{
 479        size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
 480        size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
 481        u32 cmdval, inbuf[4] = {};
 482        int i, err;
 483
 484        if (inbuflen > 4 || outbuflen > 4)
 485                return -EINVAL;
 486
 487        mutex_lock(&ipclock);
 488        if (!scu)
 489                scu = ipcdev;
 490        if (!scu) {
 491                mutex_unlock(&ipclock);
 492                return -ENODEV;
 493        }
 494
 495        memcpy(inbuf, in, inlen);
 496        for (i = 0; i < inbuflen; i++)
 497                ipc_data_writel(scu, inbuf[i], 4 * i);
 498
 499        cmdval = (size << 16) | (sub << 12) | cmd;
 500        ipc_command(scu, cmdval);
 501        err = intel_scu_ipc_check_status(scu);
 502
 503        if (!err) {
 504                u32 outbuf[4] = {};
 505
 506                for (i = 0; i < outbuflen; i++)
 507                        outbuf[i] = ipc_data_readl(scu, 4 * i);
 508
 509                memcpy(out, outbuf, outlen);
 510        }
 511
 512        mutex_unlock(&ipclock);
 513        if (err)
 514                dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
 515        return err;
 516}
 517EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
 518
 519/*
 520 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
 521 * When ioc bit is set to 1, caller api must wait for interrupt handler called
 522 * which in turn unlocks the caller api. Currently this is not used
 523 *
 524 * This is edge triggered so we need take no action to clear anything
 525 */
 526static irqreturn_t ioc(int irq, void *dev_id)
 527{
 528        struct intel_scu_ipc_dev *scu = dev_id;
 529        int status = ipc_read_status(scu);
 530
 531        writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
 532        complete(&scu->cmd_complete);
 533
 534        return IRQ_HANDLED;
 535}
 536
 537static void intel_scu_ipc_release(struct device *dev)
 538{
 539        struct intel_scu_ipc_dev *scu;
 540
 541        scu = container_of(dev, struct intel_scu_ipc_dev, dev);
 542        if (scu->irq > 0)
 543                free_irq(scu->irq, scu);
 544        iounmap(scu->ipc_base);
 545        release_mem_region(scu->mem.start, resource_size(&scu->mem));
 546        kfree(scu);
 547}
 548
 549/**
 550 * __intel_scu_ipc_register() - Register SCU IPC device
 551 * @parent: Parent device
 552 * @scu_data: Data used to configure SCU IPC
 553 * @owner: Module registering the SCU IPC device
 554 *
 555 * Call this function to register SCU IPC mechanism under @parent.
 556 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
 557 * failure. The caller may use the returned instance if it needs to do
 558 * SCU IPC calls itself.
 559 */
 560struct intel_scu_ipc_dev *
 561__intel_scu_ipc_register(struct device *parent,
 562                         const struct intel_scu_ipc_data *scu_data,
 563                         struct module *owner)
 564{
 565        int err;
 566        struct intel_scu_ipc_dev *scu;
 567        void __iomem *ipc_base;
 568
 569        mutex_lock(&ipclock);
 570        /* We support only one IPC */
 571        if (ipcdev) {
 572                err = -EBUSY;
 573                goto err_unlock;
 574        }
 575
 576        scu = kzalloc(sizeof(*scu), GFP_KERNEL);
 577        if (!scu) {
 578                err = -ENOMEM;
 579                goto err_unlock;
 580        }
 581
 582        scu->owner = owner;
 583        scu->dev.parent = parent;
 584        scu->dev.class = &intel_scu_ipc_class;
 585        scu->dev.release = intel_scu_ipc_release;
 586        dev_set_name(&scu->dev, "intel_scu_ipc");
 587
 588        if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
 589                                "intel_scu_ipc")) {
 590                err = -EBUSY;
 591                goto err_free;
 592        }
 593
 594        ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
 595        if (!ipc_base) {
 596                err = -ENOMEM;
 597                goto err_release;
 598        }
 599
 600        scu->ipc_base = ipc_base;
 601        scu->mem = scu_data->mem;
 602        scu->irq = scu_data->irq;
 603        init_completion(&scu->cmd_complete);
 604
 605        if (scu->irq > 0) {
 606                err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
 607                if (err)
 608                        goto err_unmap;
 609        }
 610
 611        /*
 612         * After this point intel_scu_ipc_release() takes care of
 613         * releasing the SCU IPC resources once refcount drops to zero.
 614         */
 615        err = device_register(&scu->dev);
 616        if (err) {
 617                put_device(&scu->dev);
 618                goto err_unlock;
 619        }
 620
 621        /* Assign device at last */
 622        ipcdev = scu;
 623        mutex_unlock(&ipclock);
 624
 625        return scu;
 626
 627err_unmap:
 628        iounmap(ipc_base);
 629err_release:
 630        release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
 631err_free:
 632        kfree(scu);
 633err_unlock:
 634        mutex_unlock(&ipclock);
 635
 636        return ERR_PTR(err);
 637}
 638EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
 639
 640/**
 641 * intel_scu_ipc_unregister() - Unregister SCU IPC
 642 * @scu: SCU IPC handle
 643 *
 644 * This unregisters the SCU IPC device and releases the acquired
 645 * resources once the refcount goes to zero.
 646 */
 647void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
 648{
 649        mutex_lock(&ipclock);
 650        if (!WARN_ON(!ipcdev)) {
 651                ipcdev = NULL;
 652                device_unregister(&scu->dev);
 653        }
 654        mutex_unlock(&ipclock);
 655}
 656EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
 657
 658static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
 659{
 660        struct intel_scu_ipc_devres *dr = res;
 661        struct intel_scu_ipc_dev *scu = dr->scu;
 662
 663        intel_scu_ipc_unregister(scu);
 664}
 665
 666/**
 667 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
 668 * @parent: Parent device
 669 * @scu_data: Data used to configure SCU IPC
 670 * @owner: Module registering the SCU IPC device
 671 *
 672 * Call this function to register managed SCU IPC mechanism under
 673 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
 674 * case of failure. The caller may use the returned instance if it needs
 675 * to do SCU IPC calls itself.
 676 */
 677struct intel_scu_ipc_dev *
 678__devm_intel_scu_ipc_register(struct device *parent,
 679                              const struct intel_scu_ipc_data *scu_data,
 680                              struct module *owner)
 681{
 682        struct intel_scu_ipc_devres *dr;
 683        struct intel_scu_ipc_dev *scu;
 684
 685        dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
 686        if (!dr)
 687                return NULL;
 688
 689        scu = __intel_scu_ipc_register(parent, scu_data, owner);
 690        if (IS_ERR(scu)) {
 691                devres_free(dr);
 692                return scu;
 693        }
 694
 695        dr->scu = scu;
 696        devres_add(parent, dr);
 697
 698        return scu;
 699}
 700EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
 701
 702static int __init intel_scu_ipc_init(void)
 703{
 704        return class_register(&intel_scu_ipc_class);
 705}
 706subsys_initcall(intel_scu_ipc_init);
 707
 708static void __exit intel_scu_ipc_exit(void)
 709{
 710        class_unregister(&intel_scu_ipc_class);
 711}
 712module_exit(intel_scu_ipc_exit);
 713