linux/drivers/spi/spi-mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2018 Exceet Electronics GmbH
   4 * Copyright (C) 2018 Bootlin
   5 *
   6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
   7 */
   8#include <linux/dmaengine.h>
   9#include <linux/iopoll.h>
  10#include <linux/pm_runtime.h>
  11#include <linux/spi/spi.h>
  12#include <linux/spi/spi-mem.h>
  13
  14#include "internals.h"
  15
  16#define SPI_MEM_MAX_BUSWIDTH            8
  17
  18/**
  19 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
  20 *                                        memory operation
  21 * @ctlr: the SPI controller requesting this dma_map()
  22 * @op: the memory operation containing the buffer to map
  23 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
  24 *       function
  25 *
  26 * Some controllers might want to do DMA on the data buffer embedded in @op.
  27 * This helper prepares everything for you and provides a ready-to-use
  28 * sg_table. This function is not intended to be called from spi drivers.
  29 * Only SPI controller drivers should use it.
  30 * Note that the caller must ensure the memory region pointed by
  31 * op->data.buf.{in,out} is DMA-able before calling this function.
  32 *
  33 * Return: 0 in case of success, a negative error code otherwise.
  34 */
  35int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
  36                                       const struct spi_mem_op *op,
  37                                       struct sg_table *sgt)
  38{
  39        struct device *dmadev;
  40
  41        if (!op->data.nbytes)
  42                return -EINVAL;
  43
  44        if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
  45                dmadev = ctlr->dma_tx->device->dev;
  46        else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
  47                dmadev = ctlr->dma_rx->device->dev;
  48        else
  49                dmadev = ctlr->dev.parent;
  50
  51        if (!dmadev)
  52                return -EINVAL;
  53
  54        return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
  55                           op->data.dir == SPI_MEM_DATA_IN ?
  56                           DMA_FROM_DEVICE : DMA_TO_DEVICE);
  57}
  58EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
  59
  60/**
  61 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
  62 *                                          memory operation
  63 * @ctlr: the SPI controller requesting this dma_unmap()
  64 * @op: the memory operation containing the buffer to unmap
  65 * @sgt: a pointer to an sg_table previously initialized by
  66 *       spi_controller_dma_map_mem_op_data()
  67 *
  68 * Some controllers might want to do DMA on the data buffer embedded in @op.
  69 * This helper prepares things so that the CPU can access the
  70 * op->data.buf.{in,out} buffer again.
  71 *
  72 * This function is not intended to be called from SPI drivers. Only SPI
  73 * controller drivers should use it.
  74 *
  75 * This function should be called after the DMA operation has finished and is
  76 * only valid if the previous spi_controller_dma_map_mem_op_data() call
  77 * returned 0.
  78 *
  79 * Return: 0 in case of success, a negative error code otherwise.
  80 */
  81void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
  82                                          const struct spi_mem_op *op,
  83                                          struct sg_table *sgt)
  84{
  85        struct device *dmadev;
  86
  87        if (!op->data.nbytes)
  88                return;
  89
  90        if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
  91                dmadev = ctlr->dma_tx->device->dev;
  92        else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
  93                dmadev = ctlr->dma_rx->device->dev;
  94        else
  95                dmadev = ctlr->dev.parent;
  96
  97        spi_unmap_buf(ctlr, dmadev, sgt,
  98                      op->data.dir == SPI_MEM_DATA_IN ?
  99                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
 100}
 101EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
 102
 103static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
 104{
 105        u32 mode = mem->spi->mode;
 106
 107        switch (buswidth) {
 108        case 1:
 109                return 0;
 110
 111        case 2:
 112                if ((tx &&
 113                     (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
 114                    (!tx &&
 115                     (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
 116                        return 0;
 117
 118                break;
 119
 120        case 4:
 121                if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
 122                    (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
 123                        return 0;
 124
 125                break;
 126
 127        case 8:
 128                if ((tx && (mode & SPI_TX_OCTAL)) ||
 129                    (!tx && (mode & SPI_RX_OCTAL)))
 130                        return 0;
 131
 132                break;
 133
 134        default:
 135                break;
 136        }
 137
 138        return -ENOTSUPP;
 139}
 140
 141static bool spi_mem_check_buswidth(struct spi_mem *mem,
 142                                   const struct spi_mem_op *op)
 143{
 144        if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
 145                return false;
 146
 147        if (op->addr.nbytes &&
 148            spi_check_buswidth_req(mem, op->addr.buswidth, true))
 149                return false;
 150
 151        if (op->dummy.nbytes &&
 152            spi_check_buswidth_req(mem, op->dummy.buswidth, true))
 153                return false;
 154
 155        if (op->data.dir != SPI_MEM_NO_DATA &&
 156            spi_check_buswidth_req(mem, op->data.buswidth,
 157                                   op->data.dir == SPI_MEM_DATA_OUT))
 158                return false;
 159
 160        return true;
 161}
 162
 163bool spi_mem_dtr_supports_op(struct spi_mem *mem,
 164                             const struct spi_mem_op *op)
 165{
 166        if (op->cmd.nbytes != 2)
 167                return false;
 168
 169        return spi_mem_check_buswidth(mem, op);
 170}
 171EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
 172
 173bool spi_mem_default_supports_op(struct spi_mem *mem,
 174                                 const struct spi_mem_op *op)
 175{
 176        if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
 177                return false;
 178
 179        if (op->cmd.nbytes != 1)
 180                return false;
 181
 182        return spi_mem_check_buswidth(mem, op);
 183}
 184EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
 185
 186static bool spi_mem_buswidth_is_valid(u8 buswidth)
 187{
 188        if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
 189                return false;
 190
 191        return true;
 192}
 193
 194static int spi_mem_check_op(const struct spi_mem_op *op)
 195{
 196        if (!op->cmd.buswidth || !op->cmd.nbytes)
 197                return -EINVAL;
 198
 199        if ((op->addr.nbytes && !op->addr.buswidth) ||
 200            (op->dummy.nbytes && !op->dummy.buswidth) ||
 201            (op->data.nbytes && !op->data.buswidth))
 202                return -EINVAL;
 203
 204        if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
 205            !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
 206            !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
 207            !spi_mem_buswidth_is_valid(op->data.buswidth))
 208                return -EINVAL;
 209
 210        return 0;
 211}
 212
 213static bool spi_mem_internal_supports_op(struct spi_mem *mem,
 214                                         const struct spi_mem_op *op)
 215{
 216        struct spi_controller *ctlr = mem->spi->controller;
 217
 218        if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
 219                return ctlr->mem_ops->supports_op(mem, op);
 220
 221        return spi_mem_default_supports_op(mem, op);
 222}
 223
 224/**
 225 * spi_mem_supports_op() - Check if a memory device and the controller it is
 226 *                         connected to support a specific memory operation
 227 * @mem: the SPI memory
 228 * @op: the memory operation to check
 229 *
 230 * Some controllers are only supporting Single or Dual IOs, others might only
 231 * support specific opcodes, or it can even be that the controller and device
 232 * both support Quad IOs but the hardware prevents you from using it because
 233 * only 2 IO lines are connected.
 234 *
 235 * This function checks whether a specific operation is supported.
 236 *
 237 * Return: true if @op is supported, false otherwise.
 238 */
 239bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
 240{
 241        if (spi_mem_check_op(op))
 242                return false;
 243
 244        return spi_mem_internal_supports_op(mem, op);
 245}
 246EXPORT_SYMBOL_GPL(spi_mem_supports_op);
 247
 248static int spi_mem_access_start(struct spi_mem *mem)
 249{
 250        struct spi_controller *ctlr = mem->spi->controller;
 251
 252        /*
 253         * Flush the message queue before executing our SPI memory
 254         * operation to prevent preemption of regular SPI transfers.
 255         */
 256        spi_flush_queue(ctlr);
 257
 258        if (ctlr->auto_runtime_pm) {
 259                int ret;
 260
 261                ret = pm_runtime_get_sync(ctlr->dev.parent);
 262                if (ret < 0) {
 263                        pm_runtime_put_noidle(ctlr->dev.parent);
 264                        dev_err(&ctlr->dev, "Failed to power device: %d\n",
 265                                ret);
 266                        return ret;
 267                }
 268        }
 269
 270        mutex_lock(&ctlr->bus_lock_mutex);
 271        mutex_lock(&ctlr->io_mutex);
 272
 273        return 0;
 274}
 275
 276static void spi_mem_access_end(struct spi_mem *mem)
 277{
 278        struct spi_controller *ctlr = mem->spi->controller;
 279
 280        mutex_unlock(&ctlr->io_mutex);
 281        mutex_unlock(&ctlr->bus_lock_mutex);
 282
 283        if (ctlr->auto_runtime_pm)
 284                pm_runtime_put(ctlr->dev.parent);
 285}
 286
 287/**
 288 * spi_mem_exec_op() - Execute a memory operation
 289 * @mem: the SPI memory
 290 * @op: the memory operation to execute
 291 *
 292 * Executes a memory operation.
 293 *
 294 * This function first checks that @op is supported and then tries to execute
 295 * it.
 296 *
 297 * Return: 0 in case of success, a negative error code otherwise.
 298 */
 299int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
 300{
 301        unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
 302        struct spi_controller *ctlr = mem->spi->controller;
 303        struct spi_transfer xfers[4] = { };
 304        struct spi_message msg;
 305        u8 *tmpbuf;
 306        int ret;
 307
 308        ret = spi_mem_check_op(op);
 309        if (ret)
 310                return ret;
 311
 312        if (!spi_mem_internal_supports_op(mem, op))
 313                return -ENOTSUPP;
 314
 315        if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
 316                ret = spi_mem_access_start(mem);
 317                if (ret)
 318                        return ret;
 319
 320                ret = ctlr->mem_ops->exec_op(mem, op);
 321
 322                spi_mem_access_end(mem);
 323
 324                /*
 325                 * Some controllers only optimize specific paths (typically the
 326                 * read path) and expect the core to use the regular SPI
 327                 * interface in other cases.
 328                 */
 329                if (!ret || ret != -ENOTSUPP)
 330                        return ret;
 331        }
 332
 333        tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
 334
 335        /*
 336         * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
 337         * we're guaranteed that this buffer is DMA-able, as required by the
 338         * SPI layer.
 339         */
 340        tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
 341        if (!tmpbuf)
 342                return -ENOMEM;
 343
 344        spi_message_init(&msg);
 345
 346        tmpbuf[0] = op->cmd.opcode;
 347        xfers[xferpos].tx_buf = tmpbuf;
 348        xfers[xferpos].len = op->cmd.nbytes;
 349        xfers[xferpos].tx_nbits = op->cmd.buswidth;
 350        spi_message_add_tail(&xfers[xferpos], &msg);
 351        xferpos++;
 352        totalxferlen++;
 353
 354        if (op->addr.nbytes) {
 355                int i;
 356
 357                for (i = 0; i < op->addr.nbytes; i++)
 358                        tmpbuf[i + 1] = op->addr.val >>
 359                                        (8 * (op->addr.nbytes - i - 1));
 360
 361                xfers[xferpos].tx_buf = tmpbuf + 1;
 362                xfers[xferpos].len = op->addr.nbytes;
 363                xfers[xferpos].tx_nbits = op->addr.buswidth;
 364                spi_message_add_tail(&xfers[xferpos], &msg);
 365                xferpos++;
 366                totalxferlen += op->addr.nbytes;
 367        }
 368
 369        if (op->dummy.nbytes) {
 370                memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
 371                xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
 372                xfers[xferpos].len = op->dummy.nbytes;
 373                xfers[xferpos].tx_nbits = op->dummy.buswidth;
 374                xfers[xferpos].dummy_data = 1;
 375                spi_message_add_tail(&xfers[xferpos], &msg);
 376                xferpos++;
 377                totalxferlen += op->dummy.nbytes;
 378        }
 379
 380        if (op->data.nbytes) {
 381                if (op->data.dir == SPI_MEM_DATA_IN) {
 382                        xfers[xferpos].rx_buf = op->data.buf.in;
 383                        xfers[xferpos].rx_nbits = op->data.buswidth;
 384                } else {
 385                        xfers[xferpos].tx_buf = op->data.buf.out;
 386                        xfers[xferpos].tx_nbits = op->data.buswidth;
 387                }
 388
 389                xfers[xferpos].len = op->data.nbytes;
 390                spi_message_add_tail(&xfers[xferpos], &msg);
 391                xferpos++;
 392                totalxferlen += op->data.nbytes;
 393        }
 394
 395        ret = spi_sync(mem->spi, &msg);
 396
 397        kfree(tmpbuf);
 398
 399        if (ret)
 400                return ret;
 401
 402        if (msg.actual_length != totalxferlen)
 403                return -EIO;
 404
 405        return 0;
 406}
 407EXPORT_SYMBOL_GPL(spi_mem_exec_op);
 408
 409/**
 410 * spi_mem_get_name() - Return the SPI mem device name to be used by the
 411 *                      upper layer if necessary
 412 * @mem: the SPI memory
 413 *
 414 * This function allows SPI mem users to retrieve the SPI mem device name.
 415 * It is useful if the upper layer needs to expose a custom name for
 416 * compatibility reasons.
 417 *
 418 * Return: a string containing the name of the memory device to be used
 419 *         by the SPI mem user
 420 */
 421const char *spi_mem_get_name(struct spi_mem *mem)
 422{
 423        return mem->name;
 424}
 425EXPORT_SYMBOL_GPL(spi_mem_get_name);
 426
 427/**
 428 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
 429 *                            match controller limitations
 430 * @mem: the SPI memory
 431 * @op: the operation to adjust
 432 *
 433 * Some controllers have FIFO limitations and must split a data transfer
 434 * operation into multiple ones, others require a specific alignment for
 435 * optimized accesses. This function allows SPI mem drivers to split a single
 436 * operation into multiple sub-operations when required.
 437 *
 438 * Return: a negative error code if the controller can't properly adjust @op,
 439 *         0 otherwise. Note that @op->data.nbytes will be updated if @op
 440 *         can't be handled in a single step.
 441 */
 442int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
 443{
 444        struct spi_controller *ctlr = mem->spi->controller;
 445        size_t len;
 446
 447        if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
 448                return ctlr->mem_ops->adjust_op_size(mem, op);
 449
 450        if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
 451                len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
 452
 453                if (len > spi_max_transfer_size(mem->spi))
 454                        return -EINVAL;
 455
 456                op->data.nbytes = min3((size_t)op->data.nbytes,
 457                                       spi_max_transfer_size(mem->spi),
 458                                       spi_max_message_size(mem->spi) -
 459                                       len);
 460                if (!op->data.nbytes)
 461                        return -EINVAL;
 462        }
 463
 464        return 0;
 465}
 466EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
 467
 468static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
 469                                      u64 offs, size_t len, void *buf)
 470{
 471        struct spi_mem_op op = desc->info.op_tmpl;
 472        int ret;
 473
 474        op.addr.val = desc->info.offset + offs;
 475        op.data.buf.in = buf;
 476        op.data.nbytes = len;
 477        ret = spi_mem_adjust_op_size(desc->mem, &op);
 478        if (ret)
 479                return ret;
 480
 481        ret = spi_mem_exec_op(desc->mem, &op);
 482        if (ret)
 483                return ret;
 484
 485        return op.data.nbytes;
 486}
 487
 488static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
 489                                       u64 offs, size_t len, const void *buf)
 490{
 491        struct spi_mem_op op = desc->info.op_tmpl;
 492        int ret;
 493
 494        op.addr.val = desc->info.offset + offs;
 495        op.data.buf.out = buf;
 496        op.data.nbytes = len;
 497        ret = spi_mem_adjust_op_size(desc->mem, &op);
 498        if (ret)
 499                return ret;
 500
 501        ret = spi_mem_exec_op(desc->mem, &op);
 502        if (ret)
 503                return ret;
 504
 505        return op.data.nbytes;
 506}
 507
 508/**
 509 * spi_mem_dirmap_create() - Create a direct mapping descriptor
 510 * @mem: SPI mem device this direct mapping should be created for
 511 * @info: direct mapping information
 512 *
 513 * This function is creating a direct mapping descriptor which can then be used
 514 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
 515 * If the SPI controller driver does not support direct mapping, this function
 516 * falls back to an implementation using spi_mem_exec_op(), so that the caller
 517 * doesn't have to bother implementing a fallback on his own.
 518 *
 519 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
 520 */
 521struct spi_mem_dirmap_desc *
 522spi_mem_dirmap_create(struct spi_mem *mem,
 523                      const struct spi_mem_dirmap_info *info)
 524{
 525        struct spi_controller *ctlr = mem->spi->controller;
 526        struct spi_mem_dirmap_desc *desc;
 527        int ret = -ENOTSUPP;
 528
 529        /* Make sure the number of address cycles is between 1 and 8 bytes. */
 530        if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
 531                return ERR_PTR(-EINVAL);
 532
 533        /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
 534        if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
 535                return ERR_PTR(-EINVAL);
 536
 537        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 538        if (!desc)
 539                return ERR_PTR(-ENOMEM);
 540
 541        desc->mem = mem;
 542        desc->info = *info;
 543        if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
 544                ret = ctlr->mem_ops->dirmap_create(desc);
 545
 546        if (ret) {
 547                desc->nodirmap = true;
 548                if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
 549                        ret = -ENOTSUPP;
 550                else
 551                        ret = 0;
 552        }
 553
 554        if (ret) {
 555                kfree(desc);
 556                return ERR_PTR(ret);
 557        }
 558
 559        return desc;
 560}
 561EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
 562
 563/**
 564 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
 565 * @desc: the direct mapping descriptor to destroy
 566 *
 567 * This function destroys a direct mapping descriptor previously created by
 568 * spi_mem_dirmap_create().
 569 */
 570void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
 571{
 572        struct spi_controller *ctlr = desc->mem->spi->controller;
 573
 574        if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
 575                ctlr->mem_ops->dirmap_destroy(desc);
 576
 577        kfree(desc);
 578}
 579EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
 580
 581static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
 582{
 583        struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
 584
 585        spi_mem_dirmap_destroy(desc);
 586}
 587
 588/**
 589 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
 590 *                                it to a device
 591 * @dev: device the dirmap desc will be attached to
 592 * @mem: SPI mem device this direct mapping should be created for
 593 * @info: direct mapping information
 594 *
 595 * devm_ variant of the spi_mem_dirmap_create() function. See
 596 * spi_mem_dirmap_create() for more details.
 597 *
 598 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
 599 */
 600struct spi_mem_dirmap_desc *
 601devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
 602                           const struct spi_mem_dirmap_info *info)
 603{
 604        struct spi_mem_dirmap_desc **ptr, *desc;
 605
 606        ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
 607                           GFP_KERNEL);
 608        if (!ptr)
 609                return ERR_PTR(-ENOMEM);
 610
 611        desc = spi_mem_dirmap_create(mem, info);
 612        if (IS_ERR(desc)) {
 613                devres_free(ptr);
 614        } else {
 615                *ptr = desc;
 616                devres_add(dev, ptr);
 617        }
 618
 619        return desc;
 620}
 621EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
 622
 623static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
 624{
 625        struct spi_mem_dirmap_desc **ptr = res;
 626
 627        if (WARN_ON(!ptr || !*ptr))
 628                return 0;
 629
 630        return *ptr == data;
 631}
 632
 633/**
 634 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
 635 *                                 to a device
 636 * @dev: device the dirmap desc is attached to
 637 * @desc: the direct mapping descriptor to destroy
 638 *
 639 * devm_ variant of the spi_mem_dirmap_destroy() function. See
 640 * spi_mem_dirmap_destroy() for more details.
 641 */
 642void devm_spi_mem_dirmap_destroy(struct device *dev,
 643                                 struct spi_mem_dirmap_desc *desc)
 644{
 645        devres_release(dev, devm_spi_mem_dirmap_release,
 646                       devm_spi_mem_dirmap_match, desc);
 647}
 648EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
 649
 650/**
 651 * spi_mem_dirmap_read() - Read data through a direct mapping
 652 * @desc: direct mapping descriptor
 653 * @offs: offset to start reading from. Note that this is not an absolute
 654 *        offset, but the offset within the direct mapping which already has
 655 *        its own offset
 656 * @len: length in bytes
 657 * @buf: destination buffer. This buffer must be DMA-able
 658 *
 659 * This function reads data from a memory device using a direct mapping
 660 * previously instantiated with spi_mem_dirmap_create().
 661 *
 662 * Return: the amount of data read from the memory device or a negative error
 663 * code. Note that the returned size might be smaller than @len, and the caller
 664 * is responsible for calling spi_mem_dirmap_read() again when that happens.
 665 */
 666ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
 667                            u64 offs, size_t len, void *buf)
 668{
 669        struct spi_controller *ctlr = desc->mem->spi->controller;
 670        ssize_t ret;
 671
 672        if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
 673                return -EINVAL;
 674
 675        if (!len)
 676                return 0;
 677
 678        if (desc->nodirmap) {
 679                ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
 680        } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
 681                ret = spi_mem_access_start(desc->mem);
 682                if (ret)
 683                        return ret;
 684
 685                ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
 686
 687                spi_mem_access_end(desc->mem);
 688        } else {
 689                ret = -ENOTSUPP;
 690        }
 691
 692        return ret;
 693}
 694EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
 695
 696/**
 697 * spi_mem_dirmap_write() - Write data through a direct mapping
 698 * @desc: direct mapping descriptor
 699 * @offs: offset to start writing from. Note that this is not an absolute
 700 *        offset, but the offset within the direct mapping which already has
 701 *        its own offset
 702 * @len: length in bytes
 703 * @buf: source buffer. This buffer must be DMA-able
 704 *
 705 * This function writes data to a memory device using a direct mapping
 706 * previously instantiated with spi_mem_dirmap_create().
 707 *
 708 * Return: the amount of data written to the memory device or a negative error
 709 * code. Note that the returned size might be smaller than @len, and the caller
 710 * is responsible for calling spi_mem_dirmap_write() again when that happens.
 711 */
 712ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
 713                             u64 offs, size_t len, const void *buf)
 714{
 715        struct spi_controller *ctlr = desc->mem->spi->controller;
 716        ssize_t ret;
 717
 718        if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
 719                return -EINVAL;
 720
 721        if (!len)
 722                return 0;
 723
 724        if (desc->nodirmap) {
 725                ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
 726        } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
 727                ret = spi_mem_access_start(desc->mem);
 728                if (ret)
 729                        return ret;
 730
 731                ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
 732
 733                spi_mem_access_end(desc->mem);
 734        } else {
 735                ret = -ENOTSUPP;
 736        }
 737
 738        return ret;
 739}
 740EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
 741
 742static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
 743{
 744        return container_of(drv, struct spi_mem_driver, spidrv.driver);
 745}
 746
 747static int spi_mem_read_status(struct spi_mem *mem,
 748                               const struct spi_mem_op *op,
 749                               u16 *status)
 750{
 751        const u8 *bytes = (u8 *)op->data.buf.in;
 752        int ret;
 753
 754        ret = spi_mem_exec_op(mem, op);
 755        if (ret)
 756                return ret;
 757
 758        if (op->data.nbytes > 1)
 759                *status = ((u16)bytes[0] << 8) | bytes[1];
 760        else
 761                *status = bytes[0];
 762
 763        return 0;
 764}
 765
 766/**
 767 * spi_mem_poll_status() - Poll memory device status
 768 * @mem: SPI memory device
 769 * @op: the memory operation to execute
 770 * @mask: status bitmask to ckeck
 771 * @match: (status & mask) expected value
 772 * @initial_delay_us: delay in us before starting to poll
 773 * @polling_delay_us: time to sleep between reads in us
 774 * @timeout_ms: timeout in milliseconds
 775 *
 776 * This function polls a status register and returns when
 777 * (status & mask) == match or when the timeout has expired.
 778 *
 779 * Return: 0 in case of success, -ETIMEDOUT in case of error,
 780 *         -EOPNOTSUPP if not supported.
 781 */
 782int spi_mem_poll_status(struct spi_mem *mem,
 783                        const struct spi_mem_op *op,
 784                        u16 mask, u16 match,
 785                        unsigned long initial_delay_us,
 786                        unsigned long polling_delay_us,
 787                        u16 timeout_ms)
 788{
 789        struct spi_controller *ctlr = mem->spi->controller;
 790        int ret = -EOPNOTSUPP;
 791        int read_status_ret;
 792        u16 status;
 793
 794        if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
 795            op->data.dir != SPI_MEM_DATA_IN)
 796                return -EINVAL;
 797
 798        if (ctlr->mem_ops && ctlr->mem_ops->poll_status) {
 799                ret = spi_mem_access_start(mem);
 800                if (ret)
 801                        return ret;
 802
 803                ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
 804                                                 initial_delay_us, polling_delay_us,
 805                                                 timeout_ms);
 806
 807                spi_mem_access_end(mem);
 808        }
 809
 810        if (ret == -EOPNOTSUPP) {
 811                if (!spi_mem_supports_op(mem, op))
 812                        return ret;
 813
 814                if (initial_delay_us < 10)
 815                        udelay(initial_delay_us);
 816                else
 817                        usleep_range((initial_delay_us >> 2) + 1,
 818                                     initial_delay_us);
 819
 820                ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
 821                                        (read_status_ret || ((status) & mask) == match),
 822                                        polling_delay_us, timeout_ms * 1000, false, mem,
 823                                        op, &status);
 824                if (read_status_ret)
 825                        return read_status_ret;
 826        }
 827
 828        return ret;
 829}
 830EXPORT_SYMBOL_GPL(spi_mem_poll_status);
 831
 832static int spi_mem_probe(struct spi_device *spi)
 833{
 834        struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
 835        struct spi_controller *ctlr = spi->controller;
 836        struct spi_mem *mem;
 837
 838        mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
 839        if (!mem)
 840                return -ENOMEM;
 841
 842        mem->spi = spi;
 843
 844        if (ctlr->mem_ops && ctlr->mem_ops->get_name)
 845                mem->name = ctlr->mem_ops->get_name(mem);
 846        else
 847                mem->name = dev_name(&spi->dev);
 848
 849        if (IS_ERR_OR_NULL(mem->name))
 850                return PTR_ERR_OR_ZERO(mem->name);
 851
 852        spi_set_drvdata(spi, mem);
 853
 854        return memdrv->probe(mem);
 855}
 856
 857static int spi_mem_remove(struct spi_device *spi)
 858{
 859        struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
 860        struct spi_mem *mem = spi_get_drvdata(spi);
 861
 862        if (memdrv->remove)
 863                return memdrv->remove(mem);
 864
 865        return 0;
 866}
 867
 868static void spi_mem_shutdown(struct spi_device *spi)
 869{
 870        struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
 871        struct spi_mem *mem = spi_get_drvdata(spi);
 872
 873        if (memdrv->shutdown)
 874                memdrv->shutdown(mem);
 875}
 876
 877/**
 878 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
 879 * @memdrv: the SPI memory driver to register
 880 * @owner: the owner of this driver
 881 *
 882 * Registers a SPI memory driver.
 883 *
 884 * Return: 0 in case of success, a negative error core otherwise.
 885 */
 886
 887int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
 888                                       struct module *owner)
 889{
 890        memdrv->spidrv.probe = spi_mem_probe;
 891        memdrv->spidrv.remove = spi_mem_remove;
 892        memdrv->spidrv.shutdown = spi_mem_shutdown;
 893
 894        return __spi_register_driver(owner, &memdrv->spidrv);
 895}
 896EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
 897
 898/**
 899 * spi_mem_driver_unregister() - Unregister a SPI memory driver
 900 * @memdrv: the SPI memory driver to unregister
 901 *
 902 * Unregisters a SPI memory driver.
 903 */
 904void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
 905{
 906        spi_unregister_driver(&memdrv->spidrv);
 907}
 908EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
 909