linux/drivers/spi/spi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2// SPI init/core code
   3//
   4// Copyright (C) 2005 David Brownell
   5// Copyright (C) 2008 Secret Lab Technologies Ltd.
   6
   7#include <linux/kernel.h>
   8#include <linux/device.h>
   9#include <linux/init.h>
  10#include <linux/cache.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/mutex.h>
  14#include <linux/of_device.h>
  15#include <linux/of_irq.h>
  16#include <linux/clk/clk-conf.h>
  17#include <linux/slab.h>
  18#include <linux/mod_devicetable.h>
  19#include <linux/spi/spi.h>
  20#include <linux/spi/spi-mem.h>
  21#include <linux/of_gpio.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/pm_domain.h>
  24#include <linux/property.h>
  25#include <linux/export.h>
  26#include <linux/sched/rt.h>
  27#include <uapi/linux/sched/types.h>
  28#include <linux/delay.h>
  29#include <linux/kthread.h>
  30#include <linux/ioport.h>
  31#include <linux/acpi.h>
  32#include <linux/highmem.h>
  33#include <linux/idr.h>
  34#include <linux/platform_data/x86/apple.h>
  35
  36#define CREATE_TRACE_POINTS
  37#include <trace/events/spi.h>
  38
  39#include "internals.h"
  40
  41static DEFINE_IDR(spi_master_idr);
  42
  43static void spidev_release(struct device *dev)
  44{
  45        struct spi_device       *spi = to_spi_device(dev);
  46
  47        /* spi controllers may cleanup for released devices */
  48        if (spi->controller->cleanup)
  49                spi->controller->cleanup(spi);
  50
  51        spi_controller_put(spi->controller);
  52        kfree(spi->driver_override);
  53        kfree(spi);
  54}
  55
  56static ssize_t
  57modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  58{
  59        const struct spi_device *spi = to_spi_device(dev);
  60        int len;
  61
  62        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  63        if (len != -ENODEV)
  64                return len;
  65
  66        return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  67}
  68static DEVICE_ATTR_RO(modalias);
  69
  70static ssize_t driver_override_store(struct device *dev,
  71                                     struct device_attribute *a,
  72                                     const char *buf, size_t count)
  73{
  74        struct spi_device *spi = to_spi_device(dev);
  75        const char *end = memchr(buf, '\n', count);
  76        const size_t len = end ? end - buf : count;
  77        const char *driver_override, *old;
  78
  79        /* We need to keep extra room for a newline when displaying value */
  80        if (len >= (PAGE_SIZE - 1))
  81                return -EINVAL;
  82
  83        driver_override = kstrndup(buf, len, GFP_KERNEL);
  84        if (!driver_override)
  85                return -ENOMEM;
  86
  87        device_lock(dev);
  88        old = spi->driver_override;
  89        if (len) {
  90                spi->driver_override = driver_override;
  91        } else {
  92                /* Emptry string, disable driver override */
  93                spi->driver_override = NULL;
  94                kfree(driver_override);
  95        }
  96        device_unlock(dev);
  97        kfree(old);
  98
  99        return count;
 100}
 101
 102static ssize_t driver_override_show(struct device *dev,
 103                                    struct device_attribute *a, char *buf)
 104{
 105        const struct spi_device *spi = to_spi_device(dev);
 106        ssize_t len;
 107
 108        device_lock(dev);
 109        len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
 110        device_unlock(dev);
 111        return len;
 112}
 113static DEVICE_ATTR_RW(driver_override);
 114
 115#define SPI_STATISTICS_ATTRS(field, file)                               \
 116static ssize_t spi_controller_##field##_show(struct device *dev,        \
 117                                             struct device_attribute *attr, \
 118                                             char *buf)                 \
 119{                                                                       \
 120        struct spi_controller *ctlr = container_of(dev,                 \
 121                                         struct spi_controller, dev);   \
 122        return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
 123}                                                                       \
 124static struct device_attribute dev_attr_spi_controller_##field = {      \
 125        .attr = { .name = file, .mode = 0444 },                         \
 126        .show = spi_controller_##field##_show,                          \
 127};                                                                      \
 128static ssize_t spi_device_##field##_show(struct device *dev,            \
 129                                         struct device_attribute *attr, \
 130                                        char *buf)                      \
 131{                                                                       \
 132        struct spi_device *spi = to_spi_device(dev);                    \
 133        return spi_statistics_##field##_show(&spi->statistics, buf);    \
 134}                                                                       \
 135static struct device_attribute dev_attr_spi_device_##field = {          \
 136        .attr = { .name = file, .mode = 0444 },                         \
 137        .show = spi_device_##field##_show,                              \
 138}
 139
 140#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
 141static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
 142                                            char *buf)                  \
 143{                                                                       \
 144        unsigned long flags;                                            \
 145        ssize_t len;                                                    \
 146        spin_lock_irqsave(&stat->lock, flags);                          \
 147        len = sprintf(buf, format_string, stat->field);                 \
 148        spin_unlock_irqrestore(&stat->lock, flags);                     \
 149        return len;                                                     \
 150}                                                                       \
 151SPI_STATISTICS_ATTRS(name, file)
 152
 153#define SPI_STATISTICS_SHOW(field, format_string)                       \
 154        SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
 155                                 field, format_string)
 156
 157SPI_STATISTICS_SHOW(messages, "%lu");
 158SPI_STATISTICS_SHOW(transfers, "%lu");
 159SPI_STATISTICS_SHOW(errors, "%lu");
 160SPI_STATISTICS_SHOW(timedout, "%lu");
 161
 162SPI_STATISTICS_SHOW(spi_sync, "%lu");
 163SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
 164SPI_STATISTICS_SHOW(spi_async, "%lu");
 165
 166SPI_STATISTICS_SHOW(bytes, "%llu");
 167SPI_STATISTICS_SHOW(bytes_rx, "%llu");
 168SPI_STATISTICS_SHOW(bytes_tx, "%llu");
 169
 170#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
 171        SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
 172                                 "transfer_bytes_histo_" number,        \
 173                                 transfer_bytes_histo[index],  "%lu")
 174SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
 175SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
 176SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
 177SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
 178SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
 179SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
 180SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
 181SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
 182SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
 183SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
 184SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
 185SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
 186SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
 187SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
 188SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
 189SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 190SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 191
 192SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
 193
 194static struct attribute *spi_dev_attrs[] = {
 195        &dev_attr_modalias.attr,
 196        &dev_attr_driver_override.attr,
 197        NULL,
 198};
 199
 200static const struct attribute_group spi_dev_group = {
 201        .attrs  = spi_dev_attrs,
 202};
 203
 204static struct attribute *spi_device_statistics_attrs[] = {
 205        &dev_attr_spi_device_messages.attr,
 206        &dev_attr_spi_device_transfers.attr,
 207        &dev_attr_spi_device_errors.attr,
 208        &dev_attr_spi_device_timedout.attr,
 209        &dev_attr_spi_device_spi_sync.attr,
 210        &dev_attr_spi_device_spi_sync_immediate.attr,
 211        &dev_attr_spi_device_spi_async.attr,
 212        &dev_attr_spi_device_bytes.attr,
 213        &dev_attr_spi_device_bytes_rx.attr,
 214        &dev_attr_spi_device_bytes_tx.attr,
 215        &dev_attr_spi_device_transfer_bytes_histo0.attr,
 216        &dev_attr_spi_device_transfer_bytes_histo1.attr,
 217        &dev_attr_spi_device_transfer_bytes_histo2.attr,
 218        &dev_attr_spi_device_transfer_bytes_histo3.attr,
 219        &dev_attr_spi_device_transfer_bytes_histo4.attr,
 220        &dev_attr_spi_device_transfer_bytes_histo5.attr,
 221        &dev_attr_spi_device_transfer_bytes_histo6.attr,
 222        &dev_attr_spi_device_transfer_bytes_histo7.attr,
 223        &dev_attr_spi_device_transfer_bytes_histo8.attr,
 224        &dev_attr_spi_device_transfer_bytes_histo9.attr,
 225        &dev_attr_spi_device_transfer_bytes_histo10.attr,
 226        &dev_attr_spi_device_transfer_bytes_histo11.attr,
 227        &dev_attr_spi_device_transfer_bytes_histo12.attr,
 228        &dev_attr_spi_device_transfer_bytes_histo13.attr,
 229        &dev_attr_spi_device_transfer_bytes_histo14.attr,
 230        &dev_attr_spi_device_transfer_bytes_histo15.attr,
 231        &dev_attr_spi_device_transfer_bytes_histo16.attr,
 232        &dev_attr_spi_device_transfers_split_maxsize.attr,
 233        NULL,
 234};
 235
 236static const struct attribute_group spi_device_statistics_group = {
 237        .name  = "statistics",
 238        .attrs  = spi_device_statistics_attrs,
 239};
 240
 241static const struct attribute_group *spi_dev_groups[] = {
 242        &spi_dev_group,
 243        &spi_device_statistics_group,
 244        NULL,
 245};
 246
 247static struct attribute *spi_controller_statistics_attrs[] = {
 248        &dev_attr_spi_controller_messages.attr,
 249        &dev_attr_spi_controller_transfers.attr,
 250        &dev_attr_spi_controller_errors.attr,
 251        &dev_attr_spi_controller_timedout.attr,
 252        &dev_attr_spi_controller_spi_sync.attr,
 253        &dev_attr_spi_controller_spi_sync_immediate.attr,
 254        &dev_attr_spi_controller_spi_async.attr,
 255        &dev_attr_spi_controller_bytes.attr,
 256        &dev_attr_spi_controller_bytes_rx.attr,
 257        &dev_attr_spi_controller_bytes_tx.attr,
 258        &dev_attr_spi_controller_transfer_bytes_histo0.attr,
 259        &dev_attr_spi_controller_transfer_bytes_histo1.attr,
 260        &dev_attr_spi_controller_transfer_bytes_histo2.attr,
 261        &dev_attr_spi_controller_transfer_bytes_histo3.attr,
 262        &dev_attr_spi_controller_transfer_bytes_histo4.attr,
 263        &dev_attr_spi_controller_transfer_bytes_histo5.attr,
 264        &dev_attr_spi_controller_transfer_bytes_histo6.attr,
 265        &dev_attr_spi_controller_transfer_bytes_histo7.attr,
 266        &dev_attr_spi_controller_transfer_bytes_histo8.attr,
 267        &dev_attr_spi_controller_transfer_bytes_histo9.attr,
 268        &dev_attr_spi_controller_transfer_bytes_histo10.attr,
 269        &dev_attr_spi_controller_transfer_bytes_histo11.attr,
 270        &dev_attr_spi_controller_transfer_bytes_histo12.attr,
 271        &dev_attr_spi_controller_transfer_bytes_histo13.attr,
 272        &dev_attr_spi_controller_transfer_bytes_histo14.attr,
 273        &dev_attr_spi_controller_transfer_bytes_histo15.attr,
 274        &dev_attr_spi_controller_transfer_bytes_histo16.attr,
 275        &dev_attr_spi_controller_transfers_split_maxsize.attr,
 276        NULL,
 277};
 278
 279static const struct attribute_group spi_controller_statistics_group = {
 280        .name  = "statistics",
 281        .attrs  = spi_controller_statistics_attrs,
 282};
 283
 284static const struct attribute_group *spi_master_groups[] = {
 285        &spi_controller_statistics_group,
 286        NULL,
 287};
 288
 289void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 290                                       struct spi_transfer *xfer,
 291                                       struct spi_controller *ctlr)
 292{
 293        unsigned long flags;
 294        int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
 295
 296        if (l2len < 0)
 297                l2len = 0;
 298
 299        spin_lock_irqsave(&stats->lock, flags);
 300
 301        stats->transfers++;
 302        stats->transfer_bytes_histo[l2len]++;
 303
 304        stats->bytes += xfer->len;
 305        if ((xfer->tx_buf) &&
 306            (xfer->tx_buf != ctlr->dummy_tx))
 307                stats->bytes_tx += xfer->len;
 308        if ((xfer->rx_buf) &&
 309            (xfer->rx_buf != ctlr->dummy_rx))
 310                stats->bytes_rx += xfer->len;
 311
 312        spin_unlock_irqrestore(&stats->lock, flags);
 313}
 314EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
 315
 316/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 317 * and the sysfs version makes coldplug work too.
 318 */
 319
 320static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
 321                                                const struct spi_device *sdev)
 322{
 323        while (id->name[0]) {
 324                if (!strcmp(sdev->modalias, id->name))
 325                        return id;
 326                id++;
 327        }
 328        return NULL;
 329}
 330
 331const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
 332{
 333        const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
 334
 335        return spi_match_id(sdrv->id_table, sdev);
 336}
 337EXPORT_SYMBOL_GPL(spi_get_device_id);
 338
 339static int spi_match_device(struct device *dev, struct device_driver *drv)
 340{
 341        const struct spi_device *spi = to_spi_device(dev);
 342        const struct spi_driver *sdrv = to_spi_driver(drv);
 343
 344        /* Check override first, and if set, only use the named driver */
 345        if (spi->driver_override)
 346                return strcmp(spi->driver_override, drv->name) == 0;
 347
 348        /* Attempt an OF style match */
 349        if (of_driver_match_device(dev, drv))
 350                return 1;
 351
 352        /* Then try ACPI */
 353        if (acpi_driver_match_device(dev, drv))
 354                return 1;
 355
 356        if (sdrv->id_table)
 357                return !!spi_match_id(sdrv->id_table, spi);
 358
 359        return strcmp(spi->modalias, drv->name) == 0;
 360}
 361
 362static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 363{
 364        const struct spi_device         *spi = to_spi_device(dev);
 365        int rc;
 366
 367        rc = acpi_device_uevent_modalias(dev, env);
 368        if (rc != -ENODEV)
 369                return rc;
 370
 371        return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 372}
 373
 374struct bus_type spi_bus_type = {
 375        .name           = "spi",
 376        .dev_groups     = spi_dev_groups,
 377        .match          = spi_match_device,
 378        .uevent         = spi_uevent,
 379};
 380EXPORT_SYMBOL_GPL(spi_bus_type);
 381
 382
 383static int spi_drv_probe(struct device *dev)
 384{
 385        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 386        struct spi_device               *spi = to_spi_device(dev);
 387        int ret;
 388
 389        ret = of_clk_set_defaults(dev->of_node, false);
 390        if (ret)
 391                return ret;
 392
 393        if (dev->of_node) {
 394                spi->irq = of_irq_get(dev->of_node, 0);
 395                if (spi->irq == -EPROBE_DEFER)
 396                        return -EPROBE_DEFER;
 397                if (spi->irq < 0)
 398                        spi->irq = 0;
 399        }
 400
 401        ret = dev_pm_domain_attach(dev, true);
 402        if (ret)
 403                return ret;
 404
 405        ret = sdrv->probe(spi);
 406        if (ret)
 407                dev_pm_domain_detach(dev, true);
 408
 409        return ret;
 410}
 411
 412static int spi_drv_remove(struct device *dev)
 413{
 414        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 415        int ret;
 416
 417        ret = sdrv->remove(to_spi_device(dev));
 418        dev_pm_domain_detach(dev, true);
 419
 420        return ret;
 421}
 422
 423static void spi_drv_shutdown(struct device *dev)
 424{
 425        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 426
 427        sdrv->shutdown(to_spi_device(dev));
 428}
 429
 430/**
 431 * __spi_register_driver - register a SPI driver
 432 * @owner: owner module of the driver to register
 433 * @sdrv: the driver to register
 434 * Context: can sleep
 435 *
 436 * Return: zero on success, else a negative error code.
 437 */
 438int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
 439{
 440        sdrv->driver.owner = owner;
 441        sdrv->driver.bus = &spi_bus_type;
 442        if (sdrv->probe)
 443                sdrv->driver.probe = spi_drv_probe;
 444        if (sdrv->remove)
 445                sdrv->driver.remove = spi_drv_remove;
 446        if (sdrv->shutdown)
 447                sdrv->driver.shutdown = spi_drv_shutdown;
 448        return driver_register(&sdrv->driver);
 449}
 450EXPORT_SYMBOL_GPL(__spi_register_driver);
 451
 452/*-------------------------------------------------------------------------*/
 453
 454/* SPI devices should normally not be created by SPI device drivers; that
 455 * would make them board-specific.  Similarly with SPI controller drivers.
 456 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 457 * with other readonly (flashable) information about mainboard devices.
 458 */
 459
 460struct boardinfo {
 461        struct list_head        list;
 462        struct spi_board_info   board_info;
 463};
 464
 465static LIST_HEAD(board_list);
 466static LIST_HEAD(spi_controller_list);
 467
 468/*
 469 * Used to protect add/del opertion for board_info list and
 470 * spi_controller list, and their matching process
 471 * also used to protect object of type struct idr
 472 */
 473static DEFINE_MUTEX(board_lock);
 474
 475/**
 476 * spi_alloc_device - Allocate a new SPI device
 477 * @ctlr: Controller to which device is connected
 478 * Context: can sleep
 479 *
 480 * Allows a driver to allocate and initialize a spi_device without
 481 * registering it immediately.  This allows a driver to directly
 482 * fill the spi_device with device parameters before calling
 483 * spi_add_device() on it.
 484 *
 485 * Caller is responsible to call spi_add_device() on the returned
 486 * spi_device structure to add it to the SPI controller.  If the caller
 487 * needs to discard the spi_device without adding it, then it should
 488 * call spi_dev_put() on it.
 489 *
 490 * Return: a pointer to the new device, or NULL.
 491 */
 492struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
 493{
 494        struct spi_device       *spi;
 495
 496        if (!spi_controller_get(ctlr))
 497                return NULL;
 498
 499        spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 500        if (!spi) {
 501                spi_controller_put(ctlr);
 502                return NULL;
 503        }
 504
 505        spi->master = spi->controller = ctlr;
 506        spi->dev.parent = &ctlr->dev;
 507        spi->dev.bus = &spi_bus_type;
 508        spi->dev.release = spidev_release;
 509        spi->cs_gpio = -ENOENT;
 510
 511        spin_lock_init(&spi->statistics.lock);
 512
 513        device_initialize(&spi->dev);
 514        return spi;
 515}
 516EXPORT_SYMBOL_GPL(spi_alloc_device);
 517
 518static void spi_dev_set_name(struct spi_device *spi)
 519{
 520        struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
 521
 522        if (adev) {
 523                dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
 524                return;
 525        }
 526
 527        dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
 528                     spi->chip_select);
 529}
 530
 531static int spi_dev_check(struct device *dev, void *data)
 532{
 533        struct spi_device *spi = to_spi_device(dev);
 534        struct spi_device *new_spi = data;
 535
 536        if (spi->controller == new_spi->controller &&
 537            spi->chip_select == new_spi->chip_select)
 538                return -EBUSY;
 539        return 0;
 540}
 541
 542/**
 543 * spi_add_device - Add spi_device allocated with spi_alloc_device
 544 * @spi: spi_device to register
 545 *
 546 * Companion function to spi_alloc_device.  Devices allocated with
 547 * spi_alloc_device can be added onto the spi bus with this function.
 548 *
 549 * Return: 0 on success; negative errno on failure
 550 */
 551int spi_add_device(struct spi_device *spi)
 552{
 553        static DEFINE_MUTEX(spi_add_lock);
 554        struct spi_controller *ctlr = spi->controller;
 555        struct device *dev = ctlr->dev.parent;
 556        int status;
 557
 558        /* Chipselects are numbered 0..max; validate. */
 559        if (spi->chip_select >= ctlr->num_chipselect) {
 560                dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
 561                        ctlr->num_chipselect);
 562                return -EINVAL;
 563        }
 564
 565        /* Set the bus ID string */
 566        spi_dev_set_name(spi);
 567
 568        /* We need to make sure there's no other device with this
 569         * chipselect **BEFORE** we call setup(), else we'll trash
 570         * its configuration.  Lock against concurrent add() calls.
 571         */
 572        mutex_lock(&spi_add_lock);
 573
 574        status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
 575        if (status) {
 576                dev_err(dev, "chipselect %d already in use\n",
 577                                spi->chip_select);
 578                goto done;
 579        }
 580
 581        if (ctlr->cs_gpios)
 582                spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
 583
 584        /* Drivers may modify this initial i/o setup, but will
 585         * normally rely on the device being setup.  Devices
 586         * using SPI_CS_HIGH can't coexist well otherwise...
 587         */
 588        status = spi_setup(spi);
 589        if (status < 0) {
 590                dev_err(dev, "can't setup %s, status %d\n",
 591                                dev_name(&spi->dev), status);
 592                goto done;
 593        }
 594
 595        /* Device may be bound to an active driver when this returns */
 596        status = device_add(&spi->dev);
 597        if (status < 0)
 598                dev_err(dev, "can't add %s, status %d\n",
 599                                dev_name(&spi->dev), status);
 600        else
 601                dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 602
 603done:
 604        mutex_unlock(&spi_add_lock);
 605        return status;
 606}
 607EXPORT_SYMBOL_GPL(spi_add_device);
 608
 609/**
 610 * spi_new_device - instantiate one new SPI device
 611 * @ctlr: Controller to which device is connected
 612 * @chip: Describes the SPI device
 613 * Context: can sleep
 614 *
 615 * On typical mainboards, this is purely internal; and it's not needed
 616 * after board init creates the hard-wired devices.  Some development
 617 * platforms may not be able to use spi_register_board_info though, and
 618 * this is exported so that for example a USB or parport based adapter
 619 * driver could add devices (which it would learn about out-of-band).
 620 *
 621 * Return: the new device, or NULL.
 622 */
 623struct spi_device *spi_new_device(struct spi_controller *ctlr,
 624                                  struct spi_board_info *chip)
 625{
 626        struct spi_device       *proxy;
 627        int                     status;
 628
 629        /* NOTE:  caller did any chip->bus_num checks necessary.
 630         *
 631         * Also, unless we change the return value convention to use
 632         * error-or-pointer (not NULL-or-pointer), troubleshootability
 633         * suggests syslogged diagnostics are best here (ugh).
 634         */
 635
 636        proxy = spi_alloc_device(ctlr);
 637        if (!proxy)
 638                return NULL;
 639
 640        WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 641
 642        proxy->chip_select = chip->chip_select;
 643        proxy->max_speed_hz = chip->max_speed_hz;
 644        proxy->mode = chip->mode;
 645        proxy->irq = chip->irq;
 646        strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 647        proxy->dev.platform_data = (void *) chip->platform_data;
 648        proxy->controller_data = chip->controller_data;
 649        proxy->controller_state = NULL;
 650
 651        if (chip->properties) {
 652                status = device_add_properties(&proxy->dev, chip->properties);
 653                if (status) {
 654                        dev_err(&ctlr->dev,
 655                                "failed to add properties to '%s': %d\n",
 656                                chip->modalias, status);
 657                        goto err_dev_put;
 658                }
 659        }
 660
 661        status = spi_add_device(proxy);
 662        if (status < 0)
 663                goto err_remove_props;
 664
 665        return proxy;
 666
 667err_remove_props:
 668        if (chip->properties)
 669                device_remove_properties(&proxy->dev);
 670err_dev_put:
 671        spi_dev_put(proxy);
 672        return NULL;
 673}
 674EXPORT_SYMBOL_GPL(spi_new_device);
 675
 676/**
 677 * spi_unregister_device - unregister a single SPI device
 678 * @spi: spi_device to unregister
 679 *
 680 * Start making the passed SPI device vanish. Normally this would be handled
 681 * by spi_unregister_controller().
 682 */
 683void spi_unregister_device(struct spi_device *spi)
 684{
 685        if (!spi)
 686                return;
 687
 688        if (spi->dev.of_node) {
 689                of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
 690                of_node_put(spi->dev.of_node);
 691        }
 692        if (ACPI_COMPANION(&spi->dev))
 693                acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
 694        device_unregister(&spi->dev);
 695}
 696EXPORT_SYMBOL_GPL(spi_unregister_device);
 697
 698static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
 699                                              struct spi_board_info *bi)
 700{
 701        struct spi_device *dev;
 702
 703        if (ctlr->bus_num != bi->bus_num)
 704                return;
 705
 706        dev = spi_new_device(ctlr, bi);
 707        if (!dev)
 708                dev_err(ctlr->dev.parent, "can't create new device for %s\n",
 709                        bi->modalias);
 710}
 711
 712/**
 713 * spi_register_board_info - register SPI devices for a given board
 714 * @info: array of chip descriptors
 715 * @n: how many descriptors are provided
 716 * Context: can sleep
 717 *
 718 * Board-specific early init code calls this (probably during arch_initcall)
 719 * with segments of the SPI device table.  Any device nodes are created later,
 720 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 721 * this table of devices forever, so that reloading a controller driver will
 722 * not make Linux forget about these hard-wired devices.
 723 *
 724 * Other code can also call this, e.g. a particular add-on board might provide
 725 * SPI devices through its expansion connector, so code initializing that board
 726 * would naturally declare its SPI devices.
 727 *
 728 * The board info passed can safely be __initdata ... but be careful of
 729 * any embedded pointers (platform_data, etc), they're copied as-is.
 730 * Device properties are deep-copied though.
 731 *
 732 * Return: zero on success, else a negative error code.
 733 */
 734int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 735{
 736        struct boardinfo *bi;
 737        int i;
 738
 739        if (!n)
 740                return 0;
 741
 742        bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
 743        if (!bi)
 744                return -ENOMEM;
 745
 746        for (i = 0; i < n; i++, bi++, info++) {
 747                struct spi_controller *ctlr;
 748
 749                memcpy(&bi->board_info, info, sizeof(*info));
 750                if (info->properties) {
 751                        bi->board_info.properties =
 752                                        property_entries_dup(info->properties);
 753                        if (IS_ERR(bi->board_info.properties))
 754                                return PTR_ERR(bi->board_info.properties);
 755                }
 756
 757                mutex_lock(&board_lock);
 758                list_add_tail(&bi->list, &board_list);
 759                list_for_each_entry(ctlr, &spi_controller_list, list)
 760                        spi_match_controller_to_boardinfo(ctlr,
 761                                                          &bi->board_info);
 762                mutex_unlock(&board_lock);
 763        }
 764
 765        return 0;
 766}
 767
 768/*-------------------------------------------------------------------------*/
 769
 770static void spi_set_cs(struct spi_device *spi, bool enable)
 771{
 772        if (spi->mode & SPI_CS_HIGH)
 773                enable = !enable;
 774
 775        if (gpio_is_valid(spi->cs_gpio)) {
 776                /* Honour the SPI_NO_CS flag */
 777                if (!(spi->mode & SPI_NO_CS))
 778                        gpio_set_value(spi->cs_gpio, !enable);
 779                /* Some SPI masters need both GPIO CS & slave_select */
 780                if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
 781                    spi->controller->set_cs)
 782                        spi->controller->set_cs(spi, !enable);
 783        } else if (spi->controller->set_cs) {
 784                spi->controller->set_cs(spi, !enable);
 785        }
 786}
 787
 788#ifdef CONFIG_HAS_DMA
 789int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
 790                struct sg_table *sgt, void *buf, size_t len,
 791                enum dma_data_direction dir)
 792{
 793        const bool vmalloced_buf = is_vmalloc_addr(buf);
 794        unsigned int max_seg_size = dma_get_max_seg_size(dev);
 795#ifdef CONFIG_HIGHMEM
 796        const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
 797                                (unsigned long)buf < (PKMAP_BASE +
 798                                        (LAST_PKMAP * PAGE_SIZE)));
 799#else
 800        const bool kmap_buf = false;
 801#endif
 802        int desc_len;
 803        int sgs;
 804        struct page *vm_page;
 805        struct scatterlist *sg;
 806        void *sg_buf;
 807        size_t min;
 808        int i, ret;
 809
 810        if (vmalloced_buf || kmap_buf) {
 811                desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 812                sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 813        } else if (virt_addr_valid(buf)) {
 814                desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
 815                sgs = DIV_ROUND_UP(len, desc_len);
 816        } else {
 817                return -EINVAL;
 818        }
 819
 820        ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 821        if (ret != 0)
 822                return ret;
 823
 824        sg = &sgt->sgl[0];
 825        for (i = 0; i < sgs; i++) {
 826
 827                if (vmalloced_buf || kmap_buf) {
 828                        /*
 829                         * Next scatterlist entry size is the minimum between
 830                         * the desc_len and the remaining buffer length that
 831                         * fits in a page.
 832                         */
 833                        min = min_t(size_t, desc_len,
 834                                    min_t(size_t, len,
 835                                          PAGE_SIZE - offset_in_page(buf)));
 836                        if (vmalloced_buf)
 837                                vm_page = vmalloc_to_page(buf);
 838                        else
 839                                vm_page = kmap_to_page(buf);
 840                        if (!vm_page) {
 841                                sg_free_table(sgt);
 842                                return -ENOMEM;
 843                        }
 844                        sg_set_page(sg, vm_page,
 845                                    min, offset_in_page(buf));
 846                } else {
 847                        min = min_t(size_t, len, desc_len);
 848                        sg_buf = buf;
 849                        sg_set_buf(sg, sg_buf, min);
 850                }
 851
 852                buf += min;
 853                len -= min;
 854                sg = sg_next(sg);
 855        }
 856
 857        ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
 858        if (!ret)
 859                ret = -ENOMEM;
 860        if (ret < 0) {
 861                sg_free_table(sgt);
 862                return ret;
 863        }
 864
 865        sgt->nents = ret;
 866
 867        return 0;
 868}
 869
 870void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
 871                   struct sg_table *sgt, enum dma_data_direction dir)
 872{
 873        if (sgt->orig_nents) {
 874                dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
 875                sg_free_table(sgt);
 876        }
 877}
 878
 879static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 880{
 881        struct device *tx_dev, *rx_dev;
 882        struct spi_transfer *xfer;
 883        int ret;
 884
 885        if (!ctlr->can_dma)
 886                return 0;
 887
 888        if (ctlr->dma_tx)
 889                tx_dev = ctlr->dma_tx->device->dev;
 890        else
 891                tx_dev = ctlr->dev.parent;
 892
 893        if (ctlr->dma_rx)
 894                rx_dev = ctlr->dma_rx->device->dev;
 895        else
 896                rx_dev = ctlr->dev.parent;
 897
 898        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 899                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 900                        continue;
 901
 902                if (xfer->tx_buf != NULL) {
 903                        ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
 904                                          (void *)xfer->tx_buf, xfer->len,
 905                                          DMA_TO_DEVICE);
 906                        if (ret != 0)
 907                                return ret;
 908                }
 909
 910                if (xfer->rx_buf != NULL) {
 911                        ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
 912                                          xfer->rx_buf, xfer->len,
 913                                          DMA_FROM_DEVICE);
 914                        if (ret != 0) {
 915                                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
 916                                              DMA_TO_DEVICE);
 917                                return ret;
 918                        }
 919                }
 920        }
 921
 922        ctlr->cur_msg_mapped = true;
 923
 924        return 0;
 925}
 926
 927static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
 928{
 929        struct spi_transfer *xfer;
 930        struct device *tx_dev, *rx_dev;
 931
 932        if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
 933                return 0;
 934
 935        if (ctlr->dma_tx)
 936                tx_dev = ctlr->dma_tx->device->dev;
 937        else
 938                tx_dev = ctlr->dev.parent;
 939
 940        if (ctlr->dma_rx)
 941                rx_dev = ctlr->dma_rx->device->dev;
 942        else
 943                rx_dev = ctlr->dev.parent;
 944
 945        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 946                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 947                        continue;
 948
 949                spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 950                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 951        }
 952
 953        return 0;
 954}
 955#else /* !CONFIG_HAS_DMA */
 956static inline int __spi_map_msg(struct spi_controller *ctlr,
 957                                struct spi_message *msg)
 958{
 959        return 0;
 960}
 961
 962static inline int __spi_unmap_msg(struct spi_controller *ctlr,
 963                                  struct spi_message *msg)
 964{
 965        return 0;
 966}
 967#endif /* !CONFIG_HAS_DMA */
 968
 969static inline int spi_unmap_msg(struct spi_controller *ctlr,
 970                                struct spi_message *msg)
 971{
 972        struct spi_transfer *xfer;
 973
 974        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 975                /*
 976                 * Restore the original value of tx_buf or rx_buf if they are
 977                 * NULL.
 978                 */
 979                if (xfer->tx_buf == ctlr->dummy_tx)
 980                        xfer->tx_buf = NULL;
 981                if (xfer->rx_buf == ctlr->dummy_rx)
 982                        xfer->rx_buf = NULL;
 983        }
 984
 985        return __spi_unmap_msg(ctlr, msg);
 986}
 987
 988static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 989{
 990        struct spi_transfer *xfer;
 991        void *tmp;
 992        unsigned int max_tx, max_rx;
 993
 994        if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
 995                max_tx = 0;
 996                max_rx = 0;
 997
 998                list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 999                        if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1000                            !xfer->tx_buf)
1001                                max_tx = max(xfer->len, max_tx);
1002                        if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1003                            !xfer->rx_buf)
1004                                max_rx = max(xfer->len, max_rx);
1005                }
1006
1007                if (max_tx) {
1008                        tmp = krealloc(ctlr->dummy_tx, max_tx,
1009                                       GFP_KERNEL | GFP_DMA);
1010                        if (!tmp)
1011                                return -ENOMEM;
1012                        ctlr->dummy_tx = tmp;
1013                        memset(tmp, 0, max_tx);
1014                }
1015
1016                if (max_rx) {
1017                        tmp = krealloc(ctlr->dummy_rx, max_rx,
1018                                       GFP_KERNEL | GFP_DMA);
1019                        if (!tmp)
1020                                return -ENOMEM;
1021                        ctlr->dummy_rx = tmp;
1022                }
1023
1024                if (max_tx || max_rx) {
1025                        list_for_each_entry(xfer, &msg->transfers,
1026                                            transfer_list) {
1027                                if (!xfer->tx_buf)
1028                                        xfer->tx_buf = ctlr->dummy_tx;
1029                                if (!xfer->rx_buf)
1030                                        xfer->rx_buf = ctlr->dummy_rx;
1031                        }
1032                }
1033        }
1034
1035        return __spi_map_msg(ctlr, msg);
1036}
1037
1038static int spi_transfer_wait(struct spi_controller *ctlr,
1039                             struct spi_message *msg,
1040                             struct spi_transfer *xfer)
1041{
1042        struct spi_statistics *statm = &ctlr->statistics;
1043        struct spi_statistics *stats = &msg->spi->statistics;
1044        unsigned long long ms = 1;
1045
1046        if (spi_controller_is_slave(ctlr)) {
1047                if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1048                        dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1049                        return -EINTR;
1050                }
1051        } else {
1052                ms = 8LL * 1000LL * xfer->len;
1053                do_div(ms, xfer->speed_hz);
1054                ms += ms + 200; /* some tolerance */
1055
1056                if (ms > UINT_MAX)
1057                        ms = UINT_MAX;
1058
1059                ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1060                                                 msecs_to_jiffies(ms));
1061
1062                if (ms == 0) {
1063                        SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1064                        SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1065                        dev_err(&msg->spi->dev,
1066                                "SPI transfer timed out\n");
1067                        return -ETIMEDOUT;
1068                }
1069        }
1070
1071        return 0;
1072}
1073
1074/*
1075 * spi_transfer_one_message - Default implementation of transfer_one_message()
1076 *
1077 * This is a standard implementation of transfer_one_message() for
1078 * drivers which implement a transfer_one() operation.  It provides
1079 * standard handling of delays and chip select management.
1080 */
1081static int spi_transfer_one_message(struct spi_controller *ctlr,
1082                                    struct spi_message *msg)
1083{
1084        struct spi_transfer *xfer;
1085        bool keep_cs = false;
1086        int ret = 0;
1087        struct spi_statistics *statm = &ctlr->statistics;
1088        struct spi_statistics *stats = &msg->spi->statistics;
1089
1090        spi_set_cs(msg->spi, true);
1091
1092        SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1093        SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1094
1095        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1096                trace_spi_transfer_start(msg, xfer);
1097
1098                spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1099                spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1100
1101                if (xfer->tx_buf || xfer->rx_buf) {
1102                        reinit_completion(&ctlr->xfer_completion);
1103
1104                        ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1105                        if (ret < 0) {
1106                                SPI_STATISTICS_INCREMENT_FIELD(statm,
1107                                                               errors);
1108                                SPI_STATISTICS_INCREMENT_FIELD(stats,
1109                                                               errors);
1110                                dev_err(&msg->spi->dev,
1111                                        "SPI transfer failed: %d\n", ret);
1112                                goto out;
1113                        }
1114
1115                        if (ret > 0) {
1116                                ret = spi_transfer_wait(ctlr, msg, xfer);
1117                                if (ret < 0)
1118                                        msg->status = ret;
1119                        }
1120                } else {
1121                        if (xfer->len)
1122                                dev_err(&msg->spi->dev,
1123                                        "Bufferless transfer has length %u\n",
1124                                        xfer->len);
1125                }
1126
1127                trace_spi_transfer_stop(msg, xfer);
1128
1129                if (msg->status != -EINPROGRESS)
1130                        goto out;
1131
1132                if (xfer->delay_usecs) {
1133                        u16 us = xfer->delay_usecs;
1134
1135                        if (us <= 10)
1136                                udelay(us);
1137                        else
1138                                usleep_range(us, us + DIV_ROUND_UP(us, 10));
1139                }
1140
1141                if (xfer->cs_change) {
1142                        if (list_is_last(&xfer->transfer_list,
1143                                         &msg->transfers)) {
1144                                keep_cs = true;
1145                        } else {
1146                                spi_set_cs(msg->spi, false);
1147                                udelay(10);
1148                                spi_set_cs(msg->spi, true);
1149                        }
1150                }
1151
1152                msg->actual_length += xfer->len;
1153        }
1154
1155out:
1156        if (ret != 0 || !keep_cs)
1157                spi_set_cs(msg->spi, false);
1158
1159        if (msg->status == -EINPROGRESS)
1160                msg->status = ret;
1161
1162        if (msg->status && ctlr->handle_err)
1163                ctlr->handle_err(ctlr, msg);
1164
1165        spi_res_release(ctlr, msg);
1166
1167        spi_finalize_current_message(ctlr);
1168
1169        return ret;
1170}
1171
1172/**
1173 * spi_finalize_current_transfer - report completion of a transfer
1174 * @ctlr: the controller reporting completion
1175 *
1176 * Called by SPI drivers using the core transfer_one_message()
1177 * implementation to notify it that the current interrupt driven
1178 * transfer has finished and the next one may be scheduled.
1179 */
1180void spi_finalize_current_transfer(struct spi_controller *ctlr)
1181{
1182        complete(&ctlr->xfer_completion);
1183}
1184EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1185
1186/**
1187 * __spi_pump_messages - function which processes spi message queue
1188 * @ctlr: controller to process queue for
1189 * @in_kthread: true if we are in the context of the message pump thread
1190 *
1191 * This function checks if there is any spi message in the queue that
1192 * needs processing and if so call out to the driver to initialize hardware
1193 * and transfer each message.
1194 *
1195 * Note that it is called both from the kthread itself and also from
1196 * inside spi_sync(); the queue extraction handling at the top of the
1197 * function should deal with this safely.
1198 */
1199static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1200{
1201        unsigned long flags;
1202        bool was_busy = false;
1203        int ret;
1204
1205        /* Lock queue */
1206        spin_lock_irqsave(&ctlr->queue_lock, flags);
1207
1208        /* Make sure we are not already running a message */
1209        if (ctlr->cur_msg) {
1210                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1211                return;
1212        }
1213
1214        /* If another context is idling the device then defer */
1215        if (ctlr->idling) {
1216                kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1217                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1218                return;
1219        }
1220
1221        /* Check if the queue is idle */
1222        if (list_empty(&ctlr->queue) || !ctlr->running) {
1223                if (!ctlr->busy) {
1224                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1225                        return;
1226                }
1227
1228                /* Only do teardown in the thread */
1229                if (!in_kthread) {
1230                        kthread_queue_work(&ctlr->kworker,
1231                                           &ctlr->pump_messages);
1232                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1233                        return;
1234                }
1235
1236                ctlr->busy = false;
1237                ctlr->idling = true;
1238                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1239
1240                kfree(ctlr->dummy_rx);
1241                ctlr->dummy_rx = NULL;
1242                kfree(ctlr->dummy_tx);
1243                ctlr->dummy_tx = NULL;
1244                if (ctlr->unprepare_transfer_hardware &&
1245                    ctlr->unprepare_transfer_hardware(ctlr))
1246                        dev_err(&ctlr->dev,
1247                                "failed to unprepare transfer hardware\n");
1248                if (ctlr->auto_runtime_pm) {
1249                        pm_runtime_mark_last_busy(ctlr->dev.parent);
1250                        pm_runtime_put_autosuspend(ctlr->dev.parent);
1251                }
1252                trace_spi_controller_idle(ctlr);
1253
1254                spin_lock_irqsave(&ctlr->queue_lock, flags);
1255                ctlr->idling = false;
1256                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1257                return;
1258        }
1259
1260        /* Extract head of queue */
1261        ctlr->cur_msg =
1262                list_first_entry(&ctlr->queue, struct spi_message, queue);
1263
1264        list_del_init(&ctlr->cur_msg->queue);
1265        if (ctlr->busy)
1266                was_busy = true;
1267        else
1268                ctlr->busy = true;
1269        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1270
1271        mutex_lock(&ctlr->io_mutex);
1272
1273        if (!was_busy && ctlr->auto_runtime_pm) {
1274                ret = pm_runtime_get_sync(ctlr->dev.parent);
1275                if (ret < 0) {
1276                        pm_runtime_put_noidle(ctlr->dev.parent);
1277                        dev_err(&ctlr->dev, "Failed to power device: %d\n",
1278                                ret);
1279                        mutex_unlock(&ctlr->io_mutex);
1280                        return;
1281                }
1282        }
1283
1284        if (!was_busy)
1285                trace_spi_controller_busy(ctlr);
1286
1287        if (!was_busy && ctlr->prepare_transfer_hardware) {
1288                ret = ctlr->prepare_transfer_hardware(ctlr);
1289                if (ret) {
1290                        dev_err(&ctlr->dev,
1291                                "failed to prepare transfer hardware\n");
1292
1293                        if (ctlr->auto_runtime_pm)
1294                                pm_runtime_put(ctlr->dev.parent);
1295                        mutex_unlock(&ctlr->io_mutex);
1296                        return;
1297                }
1298        }
1299
1300        trace_spi_message_start(ctlr->cur_msg);
1301
1302        if (ctlr->prepare_message) {
1303                ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1304                if (ret) {
1305                        dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1306                                ret);
1307                        ctlr->cur_msg->status = ret;
1308                        spi_finalize_current_message(ctlr);
1309                        goto out;
1310                }
1311                ctlr->cur_msg_prepared = true;
1312        }
1313
1314        ret = spi_map_msg(ctlr, ctlr->cur_msg);
1315        if (ret) {
1316                ctlr->cur_msg->status = ret;
1317                spi_finalize_current_message(ctlr);
1318                goto out;
1319        }
1320
1321        ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1322        if (ret) {
1323                dev_err(&ctlr->dev,
1324                        "failed to transfer one message from queue\n");
1325                goto out;
1326        }
1327
1328out:
1329        mutex_unlock(&ctlr->io_mutex);
1330
1331        /* Prod the scheduler in case transfer_one() was busy waiting */
1332        if (!ret)
1333                cond_resched();
1334}
1335
1336/**
1337 * spi_pump_messages - kthread work function which processes spi message queue
1338 * @work: pointer to kthread work struct contained in the controller struct
1339 */
1340static void spi_pump_messages(struct kthread_work *work)
1341{
1342        struct spi_controller *ctlr =
1343                container_of(work, struct spi_controller, pump_messages);
1344
1345        __spi_pump_messages(ctlr, true);
1346}
1347
1348static int spi_init_queue(struct spi_controller *ctlr)
1349{
1350        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1351
1352        ctlr->running = false;
1353        ctlr->busy = false;
1354
1355        kthread_init_worker(&ctlr->kworker);
1356        ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1357                                         "%s", dev_name(&ctlr->dev));
1358        if (IS_ERR(ctlr->kworker_task)) {
1359                dev_err(&ctlr->dev, "failed to create message pump task\n");
1360                return PTR_ERR(ctlr->kworker_task);
1361        }
1362        kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1363
1364        /*
1365         * Controller config will indicate if this controller should run the
1366         * message pump with high (realtime) priority to reduce the transfer
1367         * latency on the bus by minimising the delay between a transfer
1368         * request and the scheduling of the message pump thread. Without this
1369         * setting the message pump thread will remain at default priority.
1370         */
1371        if (ctlr->rt) {
1372                dev_info(&ctlr->dev,
1373                        "will run message pump with realtime priority\n");
1374                sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
1375        }
1376
1377        return 0;
1378}
1379
1380/**
1381 * spi_get_next_queued_message() - called by driver to check for queued
1382 * messages
1383 * @ctlr: the controller to check for queued messages
1384 *
1385 * If there are more messages in the queue, the next message is returned from
1386 * this call.
1387 *
1388 * Return: the next message in the queue, else NULL if the queue is empty.
1389 */
1390struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1391{
1392        struct spi_message *next;
1393        unsigned long flags;
1394
1395        /* get a pointer to the next message, if any */
1396        spin_lock_irqsave(&ctlr->queue_lock, flags);
1397        next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1398                                        queue);
1399        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1400
1401        return next;
1402}
1403EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1404
1405/**
1406 * spi_finalize_current_message() - the current message is complete
1407 * @ctlr: the controller to return the message to
1408 *
1409 * Called by the driver to notify the core that the message in the front of the
1410 * queue is complete and can be removed from the queue.
1411 */
1412void spi_finalize_current_message(struct spi_controller *ctlr)
1413{
1414        struct spi_message *mesg;
1415        unsigned long flags;
1416        int ret;
1417
1418        spin_lock_irqsave(&ctlr->queue_lock, flags);
1419        mesg = ctlr->cur_msg;
1420        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1421
1422        spi_unmap_msg(ctlr, mesg);
1423
1424        if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1425                ret = ctlr->unprepare_message(ctlr, mesg);
1426                if (ret) {
1427                        dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1428                                ret);
1429                }
1430        }
1431
1432        spin_lock_irqsave(&ctlr->queue_lock, flags);
1433        ctlr->cur_msg = NULL;
1434        ctlr->cur_msg_prepared = false;
1435        kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1436        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1437
1438        trace_spi_message_done(mesg);
1439
1440        mesg->state = NULL;
1441        if (mesg->complete)
1442                mesg->complete(mesg->context);
1443}
1444EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1445
1446static int spi_start_queue(struct spi_controller *ctlr)
1447{
1448        unsigned long flags;
1449
1450        spin_lock_irqsave(&ctlr->queue_lock, flags);
1451
1452        if (ctlr->running || ctlr->busy) {
1453                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1454                return -EBUSY;
1455        }
1456
1457        ctlr->running = true;
1458        ctlr->cur_msg = NULL;
1459        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1460
1461        kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1462
1463        return 0;
1464}
1465
1466static int spi_stop_queue(struct spi_controller *ctlr)
1467{
1468        unsigned long flags;
1469        unsigned limit = 500;
1470        int ret = 0;
1471
1472        spin_lock_irqsave(&ctlr->queue_lock, flags);
1473
1474        /*
1475         * This is a bit lame, but is optimized for the common execution path.
1476         * A wait_queue on the ctlr->busy could be used, but then the common
1477         * execution path (pump_messages) would be required to call wake_up or
1478         * friends on every SPI message. Do this instead.
1479         */
1480        while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1481                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1482                usleep_range(10000, 11000);
1483                spin_lock_irqsave(&ctlr->queue_lock, flags);
1484        }
1485
1486        if (!list_empty(&ctlr->queue) || ctlr->busy)
1487                ret = -EBUSY;
1488        else
1489                ctlr->running = false;
1490
1491        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1492
1493        if (ret) {
1494                dev_warn(&ctlr->dev, "could not stop message queue\n");
1495                return ret;
1496        }
1497        return ret;
1498}
1499
1500static int spi_destroy_queue(struct spi_controller *ctlr)
1501{
1502        int ret;
1503
1504        ret = spi_stop_queue(ctlr);
1505
1506        /*
1507         * kthread_flush_worker will block until all work is done.
1508         * If the reason that stop_queue timed out is that the work will never
1509         * finish, then it does no good to call flush/stop thread, so
1510         * return anyway.
1511         */
1512        if (ret) {
1513                dev_err(&ctlr->dev, "problem destroying queue\n");
1514                return ret;
1515        }
1516
1517        kthread_flush_worker(&ctlr->kworker);
1518        kthread_stop(ctlr->kworker_task);
1519
1520        return 0;
1521}
1522
1523static int __spi_queued_transfer(struct spi_device *spi,
1524                                 struct spi_message *msg,
1525                                 bool need_pump)
1526{
1527        struct spi_controller *ctlr = spi->controller;
1528        unsigned long flags;
1529
1530        spin_lock_irqsave(&ctlr->queue_lock, flags);
1531
1532        if (!ctlr->running) {
1533                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1534                return -ESHUTDOWN;
1535        }
1536        msg->actual_length = 0;
1537        msg->status = -EINPROGRESS;
1538
1539        list_add_tail(&msg->queue, &ctlr->queue);
1540        if (!ctlr->busy && need_pump)
1541                kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1542
1543        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1544        return 0;
1545}
1546
1547/**
1548 * spi_queued_transfer - transfer function for queued transfers
1549 * @spi: spi device which is requesting transfer
1550 * @msg: spi message which is to handled is queued to driver queue
1551 *
1552 * Return: zero on success, else a negative error code.
1553 */
1554static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1555{
1556        return __spi_queued_transfer(spi, msg, true);
1557}
1558
1559static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1560{
1561        int ret;
1562
1563        ctlr->transfer = spi_queued_transfer;
1564        if (!ctlr->transfer_one_message)
1565                ctlr->transfer_one_message = spi_transfer_one_message;
1566
1567        /* Initialize and start queue */
1568        ret = spi_init_queue(ctlr);
1569        if (ret) {
1570                dev_err(&ctlr->dev, "problem initializing queue\n");
1571                goto err_init_queue;
1572        }
1573        ctlr->queued = true;
1574        ret = spi_start_queue(ctlr);
1575        if (ret) {
1576                dev_err(&ctlr->dev, "problem starting queue\n");
1577                goto err_start_queue;
1578        }
1579
1580        return 0;
1581
1582err_start_queue:
1583        spi_destroy_queue(ctlr);
1584err_init_queue:
1585        return ret;
1586}
1587
1588/**
1589 * spi_flush_queue - Send all pending messages in the queue from the callers'
1590 *                   context
1591 * @ctlr: controller to process queue for
1592 *
1593 * This should be used when one wants to ensure all pending messages have been
1594 * sent before doing something. Is used by the spi-mem code to make sure SPI
1595 * memory operations do not preempt regular SPI transfers that have been queued
1596 * before the spi-mem operation.
1597 */
1598void spi_flush_queue(struct spi_controller *ctlr)
1599{
1600        if (ctlr->transfer == spi_queued_transfer)
1601                __spi_pump_messages(ctlr, false);
1602}
1603
1604/*-------------------------------------------------------------------------*/
1605
1606#if defined(CONFIG_OF)
1607static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1608                           struct device_node *nc)
1609{
1610        u32 value;
1611        int rc;
1612
1613        /* Mode (clock phase/polarity/etc.) */
1614        if (of_property_read_bool(nc, "spi-cpha"))
1615                spi->mode |= SPI_CPHA;
1616        if (of_property_read_bool(nc, "spi-cpol"))
1617                spi->mode |= SPI_CPOL;
1618        if (of_property_read_bool(nc, "spi-cs-high"))
1619                spi->mode |= SPI_CS_HIGH;
1620        if (of_property_read_bool(nc, "spi-3wire"))
1621                spi->mode |= SPI_3WIRE;
1622        if (of_property_read_bool(nc, "spi-lsb-first"))
1623                spi->mode |= SPI_LSB_FIRST;
1624
1625        /* Device DUAL/QUAD mode */
1626        if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1627                switch (value) {
1628                case 1:
1629                        break;
1630                case 2:
1631                        spi->mode |= SPI_TX_DUAL;
1632                        break;
1633                case 4:
1634                        spi->mode |= SPI_TX_QUAD;
1635                        break;
1636                case 8:
1637                        spi->mode |= SPI_TX_OCTAL;
1638                        break;
1639                default:
1640                        dev_warn(&ctlr->dev,
1641                                "spi-tx-bus-width %d not supported\n",
1642                                value);
1643                        break;
1644                }
1645        }
1646
1647        if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1648                switch (value) {
1649                case 1:
1650                        break;
1651                case 2:
1652                        spi->mode |= SPI_RX_DUAL;
1653                        break;
1654                case 4:
1655                        spi->mode |= SPI_RX_QUAD;
1656                        break;
1657                case 8:
1658                        spi->mode |= SPI_RX_OCTAL;
1659                        break;
1660                default:
1661                        dev_warn(&ctlr->dev,
1662                                "spi-rx-bus-width %d not supported\n",
1663                                value);
1664                        break;
1665                }
1666        }
1667
1668        if (spi_controller_is_slave(ctlr)) {
1669                if (!of_node_name_eq(nc, "slave")) {
1670                        dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1671                                nc);
1672                        return -EINVAL;
1673                }
1674                return 0;
1675        }
1676
1677        /* Device address */
1678        rc = of_property_read_u32(nc, "reg", &value);
1679        if (rc) {
1680                dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1681                        nc, rc);
1682                return rc;
1683        }
1684        spi->chip_select = value;
1685
1686        /* Device speed */
1687        rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1688        if (rc) {
1689                dev_err(&ctlr->dev,
1690                        "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1691                return rc;
1692        }
1693        spi->max_speed_hz = value;
1694
1695        return 0;
1696}
1697
1698static struct spi_device *
1699of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1700{
1701        struct spi_device *spi;
1702        int rc;
1703
1704        /* Alloc an spi_device */
1705        spi = spi_alloc_device(ctlr);
1706        if (!spi) {
1707                dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1708                rc = -ENOMEM;
1709                goto err_out;
1710        }
1711
1712        /* Select device driver */
1713        rc = of_modalias_node(nc, spi->modalias,
1714                                sizeof(spi->modalias));
1715        if (rc < 0) {
1716                dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1717                goto err_out;
1718        }
1719
1720        rc = of_spi_parse_dt(ctlr, spi, nc);
1721        if (rc)
1722                goto err_out;
1723
1724        /* Store a pointer to the node in the device structure */
1725        of_node_get(nc);
1726        spi->dev.of_node = nc;
1727
1728        /* Register the new device */
1729        rc = spi_add_device(spi);
1730        if (rc) {
1731                dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1732                goto err_of_node_put;
1733        }
1734
1735        return spi;
1736
1737err_of_node_put:
1738        of_node_put(nc);
1739err_out:
1740        spi_dev_put(spi);
1741        return ERR_PTR(rc);
1742}
1743
1744/**
1745 * of_register_spi_devices() - Register child devices onto the SPI bus
1746 * @ctlr:       Pointer to spi_controller device
1747 *
1748 * Registers an spi_device for each child node of controller node which
1749 * represents a valid SPI slave.
1750 */
1751static void of_register_spi_devices(struct spi_controller *ctlr)
1752{
1753        struct spi_device *spi;
1754        struct device_node *nc;
1755
1756        if (!ctlr->dev.of_node)
1757                return;
1758
1759        for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1760                if (of_node_test_and_set_flag(nc, OF_POPULATED))
1761                        continue;
1762                spi = of_register_spi_device(ctlr, nc);
1763                if (IS_ERR(spi)) {
1764                        dev_warn(&ctlr->dev,
1765                                 "Failed to create SPI device for %pOF\n", nc);
1766                        of_node_clear_flag(nc, OF_POPULATED);
1767                }
1768        }
1769}
1770#else
1771static void of_register_spi_devices(struct spi_controller *ctlr) { }
1772#endif
1773
1774#ifdef CONFIG_ACPI
1775static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1776{
1777        struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1778        const union acpi_object *obj;
1779
1780        if (!x86_apple_machine)
1781                return;
1782
1783        if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1784            && obj->buffer.length >= 4)
1785                spi->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1786
1787        if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1788            && obj->buffer.length == 8)
1789                spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1790
1791        if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1792            && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1793                spi->mode |= SPI_LSB_FIRST;
1794
1795        if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1796            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1797                spi->mode |= SPI_CPOL;
1798
1799        if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1800            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1801                spi->mode |= SPI_CPHA;
1802}
1803
1804static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1805{
1806        struct spi_device *spi = data;
1807        struct spi_controller *ctlr = spi->controller;
1808
1809        if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1810                struct acpi_resource_spi_serialbus *sb;
1811
1812                sb = &ares->data.spi_serial_bus;
1813                if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1814                        /*
1815                         * ACPI DeviceSelection numbering is handled by the
1816                         * host controller driver in Windows and can vary
1817                         * from driver to driver. In Linux we always expect
1818                         * 0 .. max - 1 so we need to ask the driver to
1819                         * translate between the two schemes.
1820                         */
1821                        if (ctlr->fw_translate_cs) {
1822                                int cs = ctlr->fw_translate_cs(ctlr,
1823                                                sb->device_selection);
1824                                if (cs < 0)
1825                                        return cs;
1826                                spi->chip_select = cs;
1827                        } else {
1828                                spi->chip_select = sb->device_selection;
1829                        }
1830
1831                        spi->max_speed_hz = sb->connection_speed;
1832
1833                        if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1834                                spi->mode |= SPI_CPHA;
1835                        if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1836                                spi->mode |= SPI_CPOL;
1837                        if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1838                                spi->mode |= SPI_CS_HIGH;
1839                }
1840        } else if (spi->irq < 0) {
1841                struct resource r;
1842
1843                if (acpi_dev_resource_interrupt(ares, 0, &r))
1844                        spi->irq = r.start;
1845        }
1846
1847        /* Always tell the ACPI core to skip this resource */
1848        return 1;
1849}
1850
1851static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1852                                            struct acpi_device *adev)
1853{
1854        struct list_head resource_list;
1855        struct spi_device *spi;
1856        int ret;
1857
1858        if (acpi_bus_get_status(adev) || !adev->status.present ||
1859            acpi_device_enumerated(adev))
1860                return AE_OK;
1861
1862        spi = spi_alloc_device(ctlr);
1863        if (!spi) {
1864                dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1865                        dev_name(&adev->dev));
1866                return AE_NO_MEMORY;
1867        }
1868
1869        ACPI_COMPANION_SET(&spi->dev, adev);
1870        spi->irq = -1;
1871
1872        INIT_LIST_HEAD(&resource_list);
1873        ret = acpi_dev_get_resources(adev, &resource_list,
1874                                     acpi_spi_add_resource, spi);
1875        acpi_dev_free_resource_list(&resource_list);
1876
1877        acpi_spi_parse_apple_properties(spi);
1878
1879        if (ret < 0 || !spi->max_speed_hz) {
1880                spi_dev_put(spi);
1881                return AE_OK;
1882        }
1883
1884        acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1885                          sizeof(spi->modalias));
1886
1887        if (spi->irq < 0)
1888                spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1889
1890        acpi_device_set_enumerated(adev);
1891
1892        adev->power.flags.ignore_parent = true;
1893        if (spi_add_device(spi)) {
1894                adev->power.flags.ignore_parent = false;
1895                dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1896                        dev_name(&adev->dev));
1897                spi_dev_put(spi);
1898        }
1899
1900        return AE_OK;
1901}
1902
1903static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1904                                       void *data, void **return_value)
1905{
1906        struct spi_controller *ctlr = data;
1907        struct acpi_device *adev;
1908
1909        if (acpi_bus_get_device(handle, &adev))
1910                return AE_OK;
1911
1912        return acpi_register_spi_device(ctlr, adev);
1913}
1914
1915static void acpi_register_spi_devices(struct spi_controller *ctlr)
1916{
1917        acpi_status status;
1918        acpi_handle handle;
1919
1920        handle = ACPI_HANDLE(ctlr->dev.parent);
1921        if (!handle)
1922                return;
1923
1924        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1925                                     acpi_spi_add_device, NULL, ctlr, NULL);
1926        if (ACPI_FAILURE(status))
1927                dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1928}
1929#else
1930static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1931#endif /* CONFIG_ACPI */
1932
1933static void spi_controller_release(struct device *dev)
1934{
1935        struct spi_controller *ctlr;
1936
1937        ctlr = container_of(dev, struct spi_controller, dev);
1938        kfree(ctlr);
1939}
1940
1941static struct class spi_master_class = {
1942        .name           = "spi_master",
1943        .owner          = THIS_MODULE,
1944        .dev_release    = spi_controller_release,
1945        .dev_groups     = spi_master_groups,
1946};
1947
1948#ifdef CONFIG_SPI_SLAVE
1949/**
1950 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1951 *                   controller
1952 * @spi: device used for the current transfer
1953 */
1954int spi_slave_abort(struct spi_device *spi)
1955{
1956        struct spi_controller *ctlr = spi->controller;
1957
1958        if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1959                return ctlr->slave_abort(ctlr);
1960
1961        return -ENOTSUPP;
1962}
1963EXPORT_SYMBOL_GPL(spi_slave_abort);
1964
1965static int match_true(struct device *dev, void *data)
1966{
1967        return 1;
1968}
1969
1970static ssize_t spi_slave_show(struct device *dev,
1971                              struct device_attribute *attr, char *buf)
1972{
1973        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1974                                                   dev);
1975        struct device *child;
1976
1977        child = device_find_child(&ctlr->dev, NULL, match_true);
1978        return sprintf(buf, "%s\n",
1979                       child ? to_spi_device(child)->modalias : NULL);
1980}
1981
1982static ssize_t spi_slave_store(struct device *dev,
1983                               struct device_attribute *attr, const char *buf,
1984                               size_t count)
1985{
1986        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1987                                                   dev);
1988        struct spi_device *spi;
1989        struct device *child;
1990        char name[32];
1991        int rc;
1992
1993        rc = sscanf(buf, "%31s", name);
1994        if (rc != 1 || !name[0])
1995                return -EINVAL;
1996
1997        child = device_find_child(&ctlr->dev, NULL, match_true);
1998        if (child) {
1999                /* Remove registered slave */
2000                device_unregister(child);
2001                put_device(child);
2002        }
2003
2004        if (strcmp(name, "(null)")) {
2005                /* Register new slave */
2006                spi = spi_alloc_device(ctlr);
2007                if (!spi)
2008                        return -ENOMEM;
2009
2010                strlcpy(spi->modalias, name, sizeof(spi->modalias));
2011
2012                rc = spi_add_device(spi);
2013                if (rc) {
2014                        spi_dev_put(spi);
2015                        return rc;
2016                }
2017        }
2018
2019        return count;
2020}
2021
2022static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
2023
2024static struct attribute *spi_slave_attrs[] = {
2025        &dev_attr_slave.attr,
2026        NULL,
2027};
2028
2029static const struct attribute_group spi_slave_group = {
2030        .attrs = spi_slave_attrs,
2031};
2032
2033static const struct attribute_group *spi_slave_groups[] = {
2034        &spi_controller_statistics_group,
2035        &spi_slave_group,
2036        NULL,
2037};
2038
2039static struct class spi_slave_class = {
2040        .name           = "spi_slave",
2041        .owner          = THIS_MODULE,
2042        .dev_release    = spi_controller_release,
2043        .dev_groups     = spi_slave_groups,
2044};
2045#else
2046extern struct class spi_slave_class;    /* dummy */
2047#endif
2048
2049/**
2050 * __spi_alloc_controller - allocate an SPI master or slave controller
2051 * @dev: the controller, possibly using the platform_bus
2052 * @size: how much zeroed driver-private data to allocate; the pointer to this
2053 *      memory is in the driver_data field of the returned device,
2054 *      accessible with spi_controller_get_devdata().
2055 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2056 *      slave (true) controller
2057 * Context: can sleep
2058 *
2059 * This call is used only by SPI controller drivers, which are the
2060 * only ones directly touching chip registers.  It's how they allocate
2061 * an spi_controller structure, prior to calling spi_register_controller().
2062 *
2063 * This must be called from context that can sleep.
2064 *
2065 * The caller is responsible for assigning the bus number and initializing the
2066 * controller's methods before calling spi_register_controller(); and (after
2067 * errors adding the device) calling spi_controller_put() to prevent a memory
2068 * leak.
2069 *
2070 * Return: the SPI controller structure on success, else NULL.
2071 */
2072struct spi_controller *__spi_alloc_controller(struct device *dev,
2073                                              unsigned int size, bool slave)
2074{
2075        struct spi_controller   *ctlr;
2076
2077        if (!dev)
2078                return NULL;
2079
2080        ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2081        if (!ctlr)
2082                return NULL;
2083
2084        device_initialize(&ctlr->dev);
2085        ctlr->bus_num = -1;
2086        ctlr->num_chipselect = 1;
2087        ctlr->slave = slave;
2088        if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2089                ctlr->dev.class = &spi_slave_class;
2090        else
2091                ctlr->dev.class = &spi_master_class;
2092        ctlr->dev.parent = dev;
2093        pm_suspend_ignore_children(&ctlr->dev, true);
2094        spi_controller_set_devdata(ctlr, &ctlr[1]);
2095
2096        return ctlr;
2097}
2098EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2099
2100#ifdef CONFIG_OF
2101static int of_spi_register_master(struct spi_controller *ctlr)
2102{
2103        int nb, i, *cs;
2104        struct device_node *np = ctlr->dev.of_node;
2105
2106        if (!np)
2107                return 0;
2108
2109        nb = of_gpio_named_count(np, "cs-gpios");
2110        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2111
2112        /* Return error only for an incorrectly formed cs-gpios property */
2113        if (nb == 0 || nb == -ENOENT)
2114                return 0;
2115        else if (nb < 0)
2116                return nb;
2117
2118        cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2119                          GFP_KERNEL);
2120        ctlr->cs_gpios = cs;
2121
2122        if (!ctlr->cs_gpios)
2123                return -ENOMEM;
2124
2125        for (i = 0; i < ctlr->num_chipselect; i++)
2126                cs[i] = -ENOENT;
2127
2128        for (i = 0; i < nb; i++)
2129                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2130
2131        return 0;
2132}
2133#else
2134static int of_spi_register_master(struct spi_controller *ctlr)
2135{
2136        return 0;
2137}
2138#endif
2139
2140static int spi_controller_check_ops(struct spi_controller *ctlr)
2141{
2142        /*
2143         * The controller may implement only the high-level SPI-memory like
2144         * operations if it does not support regular SPI transfers, and this is
2145         * valid use case.
2146         * If ->mem_ops is NULL, we request that at least one of the
2147         * ->transfer_xxx() method be implemented.
2148         */
2149        if (ctlr->mem_ops) {
2150                if (!ctlr->mem_ops->exec_op)
2151                        return -EINVAL;
2152        } else if (!ctlr->transfer && !ctlr->transfer_one &&
2153                   !ctlr->transfer_one_message) {
2154                return -EINVAL;
2155        }
2156
2157        return 0;
2158}
2159
2160/**
2161 * spi_register_controller - register SPI master or slave controller
2162 * @ctlr: initialized master, originally from spi_alloc_master() or
2163 *      spi_alloc_slave()
2164 * Context: can sleep
2165 *
2166 * SPI controllers connect to their drivers using some non-SPI bus,
2167 * such as the platform bus.  The final stage of probe() in that code
2168 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2169 *
2170 * SPI controllers use board specific (often SOC specific) bus numbers,
2171 * and board-specific addressing for SPI devices combines those numbers
2172 * with chip select numbers.  Since SPI does not directly support dynamic
2173 * device identification, boards need configuration tables telling which
2174 * chip is at which address.
2175 *
2176 * This must be called from context that can sleep.  It returns zero on
2177 * success, else a negative error code (dropping the controller's refcount).
2178 * After a successful return, the caller is responsible for calling
2179 * spi_unregister_controller().
2180 *
2181 * Return: zero on success, else a negative error code.
2182 */
2183int spi_register_controller(struct spi_controller *ctlr)
2184{
2185        struct device           *dev = ctlr->dev.parent;
2186        struct boardinfo        *bi;
2187        int                     status = -ENODEV;
2188        int                     id, first_dynamic;
2189
2190        if (!dev)
2191                return -ENODEV;
2192
2193        /*
2194         * Make sure all necessary hooks are implemented before registering
2195         * the SPI controller.
2196         */
2197        status = spi_controller_check_ops(ctlr);
2198        if (status)
2199                return status;
2200
2201        if (!spi_controller_is_slave(ctlr)) {
2202                status = of_spi_register_master(ctlr);
2203                if (status)
2204                        return status;
2205        }
2206
2207        /* even if it's just one always-selected device, there must
2208         * be at least one chipselect
2209         */
2210        if (ctlr->num_chipselect == 0)
2211                return -EINVAL;
2212        if (ctlr->bus_num >= 0) {
2213                /* devices with a fixed bus num must check-in with the num */
2214                mutex_lock(&board_lock);
2215                id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2216                        ctlr->bus_num + 1, GFP_KERNEL);
2217                mutex_unlock(&board_lock);
2218                if (WARN(id < 0, "couldn't get idr"))
2219                        return id == -ENOSPC ? -EBUSY : id;
2220                ctlr->bus_num = id;
2221        } else if (ctlr->dev.of_node) {
2222                /* allocate dynamic bus number using Linux idr */
2223                id = of_alias_get_id(ctlr->dev.of_node, "spi");
2224                if (id >= 0) {
2225                        ctlr->bus_num = id;
2226                        mutex_lock(&board_lock);
2227                        id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2228                                       ctlr->bus_num + 1, GFP_KERNEL);
2229                        mutex_unlock(&board_lock);
2230                        if (WARN(id < 0, "couldn't get idr"))
2231                                return id == -ENOSPC ? -EBUSY : id;
2232                }
2233        }
2234        if (ctlr->bus_num < 0) {
2235                first_dynamic = of_alias_get_highest_id("spi");
2236                if (first_dynamic < 0)
2237                        first_dynamic = 0;
2238                else
2239                        first_dynamic++;
2240
2241                mutex_lock(&board_lock);
2242                id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2243                               0, GFP_KERNEL);
2244                mutex_unlock(&board_lock);
2245                if (WARN(id < 0, "couldn't get idr"))
2246                        return id;
2247                ctlr->bus_num = id;
2248        }
2249        INIT_LIST_HEAD(&ctlr->queue);
2250        spin_lock_init(&ctlr->queue_lock);
2251        spin_lock_init(&ctlr->bus_lock_spinlock);
2252        mutex_init(&ctlr->bus_lock_mutex);
2253        mutex_init(&ctlr->io_mutex);
2254        ctlr->bus_lock_flag = 0;
2255        init_completion(&ctlr->xfer_completion);
2256        if (!ctlr->max_dma_len)
2257                ctlr->max_dma_len = INT_MAX;
2258
2259        /* register the device, then userspace will see it.
2260         * registration fails if the bus ID is in use.
2261         */
2262        dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2263        status = device_add(&ctlr->dev);
2264        if (status < 0) {
2265                /* free bus id */
2266                mutex_lock(&board_lock);
2267                idr_remove(&spi_master_idr, ctlr->bus_num);
2268                mutex_unlock(&board_lock);
2269                goto done;
2270        }
2271        dev_dbg(dev, "registered %s %s\n",
2272                        spi_controller_is_slave(ctlr) ? "slave" : "master",
2273                        dev_name(&ctlr->dev));
2274
2275        /*
2276         * If we're using a queued driver, start the queue. Note that we don't
2277         * need the queueing logic if the driver is only supporting high-level
2278         * memory operations.
2279         */
2280        if (ctlr->transfer) {
2281                dev_info(dev, "controller is unqueued, this is deprecated\n");
2282        } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2283                status = spi_controller_initialize_queue(ctlr);
2284                if (status) {
2285                        device_del(&ctlr->dev);
2286                        /* free bus id */
2287                        mutex_lock(&board_lock);
2288                        idr_remove(&spi_master_idr, ctlr->bus_num);
2289                        mutex_unlock(&board_lock);
2290                        goto done;
2291                }
2292        }
2293        /* add statistics */
2294        spin_lock_init(&ctlr->statistics.lock);
2295
2296        mutex_lock(&board_lock);
2297        list_add_tail(&ctlr->list, &spi_controller_list);
2298        list_for_each_entry(bi, &board_list, list)
2299                spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2300        mutex_unlock(&board_lock);
2301
2302        /* Register devices from the device tree and ACPI */
2303        of_register_spi_devices(ctlr);
2304        acpi_register_spi_devices(ctlr);
2305done:
2306        return status;
2307}
2308EXPORT_SYMBOL_GPL(spi_register_controller);
2309
2310static void devm_spi_unregister(struct device *dev, void *res)
2311{
2312        spi_unregister_controller(*(struct spi_controller **)res);
2313}
2314
2315/**
2316 * devm_spi_register_controller - register managed SPI master or slave
2317 *      controller
2318 * @dev:    device managing SPI controller
2319 * @ctlr: initialized controller, originally from spi_alloc_master() or
2320 *      spi_alloc_slave()
2321 * Context: can sleep
2322 *
2323 * Register a SPI device as with spi_register_controller() which will
2324 * automatically be unregistered and freed.
2325 *
2326 * Return: zero on success, else a negative error code.
2327 */
2328int devm_spi_register_controller(struct device *dev,
2329                                 struct spi_controller *ctlr)
2330{
2331        struct spi_controller **ptr;
2332        int ret;
2333
2334        ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2335        if (!ptr)
2336                return -ENOMEM;
2337
2338        ret = spi_register_controller(ctlr);
2339        if (!ret) {
2340                *ptr = ctlr;
2341                devres_add(dev, ptr);
2342        } else {
2343                devres_free(ptr);
2344        }
2345
2346        return ret;
2347}
2348EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2349
2350static int __unregister(struct device *dev, void *null)
2351{
2352        spi_unregister_device(to_spi_device(dev));
2353        return 0;
2354}
2355
2356/**
2357 * spi_unregister_controller - unregister SPI master or slave controller
2358 * @ctlr: the controller being unregistered
2359 * Context: can sleep
2360 *
2361 * This call is used only by SPI controller drivers, which are the
2362 * only ones directly touching chip registers.
2363 *
2364 * This must be called from context that can sleep.
2365 *
2366 * Note that this function also drops a reference to the controller.
2367 */
2368void spi_unregister_controller(struct spi_controller *ctlr)
2369{
2370        struct spi_controller *found;
2371        int id = ctlr->bus_num;
2372        int dummy;
2373
2374        /* First make sure that this controller was ever added */
2375        mutex_lock(&board_lock);
2376        found = idr_find(&spi_master_idr, id);
2377        mutex_unlock(&board_lock);
2378        if (ctlr->queued) {
2379                if (spi_destroy_queue(ctlr))
2380                        dev_err(&ctlr->dev, "queue remove failed\n");
2381        }
2382        mutex_lock(&board_lock);
2383        list_del(&ctlr->list);
2384        mutex_unlock(&board_lock);
2385
2386        dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2387        device_unregister(&ctlr->dev);
2388        /* free bus id */
2389        mutex_lock(&board_lock);
2390        if (found == ctlr)
2391                idr_remove(&spi_master_idr, id);
2392        mutex_unlock(&board_lock);
2393}
2394EXPORT_SYMBOL_GPL(spi_unregister_controller);
2395
2396int spi_controller_suspend(struct spi_controller *ctlr)
2397{
2398        int ret;
2399
2400        /* Basically no-ops for non-queued controllers */
2401        if (!ctlr->queued)
2402                return 0;
2403
2404        ret = spi_stop_queue(ctlr);
2405        if (ret)
2406                dev_err(&ctlr->dev, "queue stop failed\n");
2407
2408        return ret;
2409}
2410EXPORT_SYMBOL_GPL(spi_controller_suspend);
2411
2412int spi_controller_resume(struct spi_controller *ctlr)
2413{
2414        int ret;
2415
2416        if (!ctlr->queued)
2417                return 0;
2418
2419        ret = spi_start_queue(ctlr);
2420        if (ret)
2421                dev_err(&ctlr->dev, "queue restart failed\n");
2422
2423        return ret;
2424}
2425EXPORT_SYMBOL_GPL(spi_controller_resume);
2426
2427static int __spi_controller_match(struct device *dev, const void *data)
2428{
2429        struct spi_controller *ctlr;
2430        const u16 *bus_num = data;
2431
2432        ctlr = container_of(dev, struct spi_controller, dev);
2433        return ctlr->bus_num == *bus_num;
2434}
2435
2436/**
2437 * spi_busnum_to_master - look up master associated with bus_num
2438 * @bus_num: the master's bus number
2439 * Context: can sleep
2440 *
2441 * This call may be used with devices that are registered after
2442 * arch init time.  It returns a refcounted pointer to the relevant
2443 * spi_controller (which the caller must release), or NULL if there is
2444 * no such master registered.
2445 *
2446 * Return: the SPI master structure on success, else NULL.
2447 */
2448struct spi_controller *spi_busnum_to_master(u16 bus_num)
2449{
2450        struct device           *dev;
2451        struct spi_controller   *ctlr = NULL;
2452
2453        dev = class_find_device(&spi_master_class, NULL, &bus_num,
2454                                __spi_controller_match);
2455        if (dev)
2456                ctlr = container_of(dev, struct spi_controller, dev);
2457        /* reference got in class_find_device */
2458        return ctlr;
2459}
2460EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2461
2462/*-------------------------------------------------------------------------*/
2463
2464/* Core methods for SPI resource management */
2465
2466/**
2467 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2468 *                 during the processing of a spi_message while using
2469 *                 spi_transfer_one
2470 * @spi:     the spi device for which we allocate memory
2471 * @release: the release code to execute for this resource
2472 * @size:    size to alloc and return
2473 * @gfp:     GFP allocation flags
2474 *
2475 * Return: the pointer to the allocated data
2476 *
2477 * This may get enhanced in the future to allocate from a memory pool
2478 * of the @spi_device or @spi_controller to avoid repeated allocations.
2479 */
2480void *spi_res_alloc(struct spi_device *spi,
2481                    spi_res_release_t release,
2482                    size_t size, gfp_t gfp)
2483{
2484        struct spi_res *sres;
2485
2486        sres = kzalloc(sizeof(*sres) + size, gfp);
2487        if (!sres)
2488                return NULL;
2489
2490        INIT_LIST_HEAD(&sres->entry);
2491        sres->release = release;
2492
2493        return sres->data;
2494}
2495EXPORT_SYMBOL_GPL(spi_res_alloc);
2496
2497/**
2498 * spi_res_free - free an spi resource
2499 * @res: pointer to the custom data of a resource
2500 *
2501 */
2502void spi_res_free(void *res)
2503{
2504        struct spi_res *sres = container_of(res, struct spi_res, data);
2505
2506        if (!res)
2507                return;
2508
2509        WARN_ON(!list_empty(&sres->entry));
2510        kfree(sres);
2511}
2512EXPORT_SYMBOL_GPL(spi_res_free);
2513
2514/**
2515 * spi_res_add - add a spi_res to the spi_message
2516 * @message: the spi message
2517 * @res:     the spi_resource
2518 */
2519void spi_res_add(struct spi_message *message, void *res)
2520{
2521        struct spi_res *sres = container_of(res, struct spi_res, data);
2522
2523        WARN_ON(!list_empty(&sres->entry));
2524        list_add_tail(&sres->entry, &message->resources);
2525}
2526EXPORT_SYMBOL_GPL(spi_res_add);
2527
2528/**
2529 * spi_res_release - release all spi resources for this message
2530 * @ctlr:  the @spi_controller
2531 * @message: the @spi_message
2532 */
2533void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2534{
2535        struct spi_res *res;
2536
2537        while (!list_empty(&message->resources)) {
2538                res = list_last_entry(&message->resources,
2539                                      struct spi_res, entry);
2540
2541                if (res->release)
2542                        res->release(ctlr, message, res->data);
2543
2544                list_del(&res->entry);
2545
2546                kfree(res);
2547        }
2548}
2549EXPORT_SYMBOL_GPL(spi_res_release);
2550
2551/*-------------------------------------------------------------------------*/
2552
2553/* Core methods for spi_message alterations */
2554
2555static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2556                                            struct spi_message *msg,
2557                                            void *res)
2558{
2559        struct spi_replaced_transfers *rxfer = res;
2560        size_t i;
2561
2562        /* call extra callback if requested */
2563        if (rxfer->release)
2564                rxfer->release(ctlr, msg, res);
2565
2566        /* insert replaced transfers back into the message */
2567        list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2568
2569        /* remove the formerly inserted entries */
2570        for (i = 0; i < rxfer->inserted; i++)
2571                list_del(&rxfer->inserted_transfers[i].transfer_list);
2572}
2573
2574/**
2575 * spi_replace_transfers - replace transfers with several transfers
2576 *                         and register change with spi_message.resources
2577 * @msg:           the spi_message we work upon
2578 * @xfer_first:    the first spi_transfer we want to replace
2579 * @remove:        number of transfers to remove
2580 * @insert:        the number of transfers we want to insert instead
2581 * @release:       extra release code necessary in some circumstances
2582 * @extradatasize: extra data to allocate (with alignment guarantees
2583 *                 of struct @spi_transfer)
2584 * @gfp:           gfp flags
2585 *
2586 * Returns: pointer to @spi_replaced_transfers,
2587 *          PTR_ERR(...) in case of errors.
2588 */
2589struct spi_replaced_transfers *spi_replace_transfers(
2590        struct spi_message *msg,
2591        struct spi_transfer *xfer_first,
2592        size_t remove,
2593        size_t insert,
2594        spi_replaced_release_t release,
2595        size_t extradatasize,
2596        gfp_t gfp)
2597{
2598        struct spi_replaced_transfers *rxfer;
2599        struct spi_transfer *xfer;
2600        size_t i;
2601
2602        /* allocate the structure using spi_res */
2603        rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2604                              insert * sizeof(struct spi_transfer)
2605                              + sizeof(struct spi_replaced_transfers)
2606                              + extradatasize,
2607                              gfp);
2608        if (!rxfer)
2609                return ERR_PTR(-ENOMEM);
2610
2611        /* the release code to invoke before running the generic release */
2612        rxfer->release = release;
2613
2614        /* assign extradata */
2615        if (extradatasize)
2616                rxfer->extradata =
2617                        &rxfer->inserted_transfers[insert];
2618
2619        /* init the replaced_transfers list */
2620        INIT_LIST_HEAD(&rxfer->replaced_transfers);
2621
2622        /* assign the list_entry after which we should reinsert
2623         * the @replaced_transfers - it may be spi_message.messages!
2624         */
2625        rxfer->replaced_after = xfer_first->transfer_list.prev;
2626
2627        /* remove the requested number of transfers */
2628        for (i = 0; i < remove; i++) {
2629                /* if the entry after replaced_after it is msg->transfers
2630                 * then we have been requested to remove more transfers
2631                 * than are in the list
2632                 */
2633                if (rxfer->replaced_after->next == &msg->transfers) {
2634                        dev_err(&msg->spi->dev,
2635                                "requested to remove more spi_transfers than are available\n");
2636                        /* insert replaced transfers back into the message */
2637                        list_splice(&rxfer->replaced_transfers,
2638                                    rxfer->replaced_after);
2639
2640                        /* free the spi_replace_transfer structure */
2641                        spi_res_free(rxfer);
2642
2643                        /* and return with an error */
2644                        return ERR_PTR(-EINVAL);
2645                }
2646
2647                /* remove the entry after replaced_after from list of
2648                 * transfers and add it to list of replaced_transfers
2649                 */
2650                list_move_tail(rxfer->replaced_after->next,
2651                               &rxfer->replaced_transfers);
2652        }
2653
2654        /* create copy of the given xfer with identical settings
2655         * based on the first transfer to get removed
2656         */
2657        for (i = 0; i < insert; i++) {
2658                /* we need to run in reverse order */
2659                xfer = &rxfer->inserted_transfers[insert - 1 - i];
2660
2661                /* copy all spi_transfer data */
2662                memcpy(xfer, xfer_first, sizeof(*xfer));
2663
2664                /* add to list */
2665                list_add(&xfer->transfer_list, rxfer->replaced_after);
2666
2667                /* clear cs_change and delay_usecs for all but the last */
2668                if (i) {
2669                        xfer->cs_change = false;
2670                        xfer->delay_usecs = 0;
2671                }
2672        }
2673
2674        /* set up inserted */
2675        rxfer->inserted = insert;
2676
2677        /* and register it with spi_res/spi_message */
2678        spi_res_add(msg, rxfer);
2679
2680        return rxfer;
2681}
2682EXPORT_SYMBOL_GPL(spi_replace_transfers);
2683
2684static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2685                                        struct spi_message *msg,
2686                                        struct spi_transfer **xferp,
2687                                        size_t maxsize,
2688                                        gfp_t gfp)
2689{
2690        struct spi_transfer *xfer = *xferp, *xfers;
2691        struct spi_replaced_transfers *srt;
2692        size_t offset;
2693        size_t count, i;
2694
2695        /* warn once about this fact that we are splitting a transfer */
2696        dev_warn_once(&msg->spi->dev,
2697                      "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2698                      xfer->len, maxsize);
2699
2700        /* calculate how many we have to replace */
2701        count = DIV_ROUND_UP(xfer->len, maxsize);
2702
2703        /* create replacement */
2704        srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2705        if (IS_ERR(srt))
2706                return PTR_ERR(srt);
2707        xfers = srt->inserted_transfers;
2708
2709        /* now handle each of those newly inserted spi_transfers
2710         * note that the replacements spi_transfers all are preset
2711         * to the same values as *xferp, so tx_buf, rx_buf and len
2712         * are all identical (as well as most others)
2713         * so we just have to fix up len and the pointers.
2714         *
2715         * this also includes support for the depreciated
2716         * spi_message.is_dma_mapped interface
2717         */
2718
2719        /* the first transfer just needs the length modified, so we
2720         * run it outside the loop
2721         */
2722        xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2723
2724        /* all the others need rx_buf/tx_buf also set */
2725        for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2726                /* update rx_buf, tx_buf and dma */
2727                if (xfers[i].rx_buf)
2728                        xfers[i].rx_buf += offset;
2729                if (xfers[i].rx_dma)
2730                        xfers[i].rx_dma += offset;
2731                if (xfers[i].tx_buf)
2732                        xfers[i].tx_buf += offset;
2733                if (xfers[i].tx_dma)
2734                        xfers[i].tx_dma += offset;
2735
2736                /* update length */
2737                xfers[i].len = min(maxsize, xfers[i].len - offset);
2738        }
2739
2740        /* we set up xferp to the last entry we have inserted,
2741         * so that we skip those already split transfers
2742         */
2743        *xferp = &xfers[count - 1];
2744
2745        /* increment statistics counters */
2746        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2747                                       transfers_split_maxsize);
2748        SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2749                                       transfers_split_maxsize);
2750
2751        return 0;
2752}
2753
2754/**
2755 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2756 *                              when an individual transfer exceeds a
2757 *                              certain size
2758 * @ctlr:    the @spi_controller for this transfer
2759 * @msg:   the @spi_message to transform
2760 * @maxsize:  the maximum when to apply this
2761 * @gfp: GFP allocation flags
2762 *
2763 * Return: status of transformation
2764 */
2765int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2766                                struct spi_message *msg,
2767                                size_t maxsize,
2768                                gfp_t gfp)
2769{
2770        struct spi_transfer *xfer;
2771        int ret;
2772
2773        /* iterate over the transfer_list,
2774         * but note that xfer is advanced to the last transfer inserted
2775         * to avoid checking sizes again unnecessarily (also xfer does
2776         * potentiall belong to a different list by the time the
2777         * replacement has happened
2778         */
2779        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2780                if (xfer->len > maxsize) {
2781                        ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2782                                                           maxsize, gfp);
2783                        if (ret)
2784                                return ret;
2785                }
2786        }
2787
2788        return 0;
2789}
2790EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2791
2792/*-------------------------------------------------------------------------*/
2793
2794/* Core methods for SPI controller protocol drivers.  Some of the
2795 * other core methods are currently defined as inline functions.
2796 */
2797
2798static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2799                                        u8 bits_per_word)
2800{
2801        if (ctlr->bits_per_word_mask) {
2802                /* Only 32 bits fit in the mask */
2803                if (bits_per_word > 32)
2804                        return -EINVAL;
2805                if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2806                        return -EINVAL;
2807        }
2808
2809        return 0;
2810}
2811
2812/**
2813 * spi_setup - setup SPI mode and clock rate
2814 * @spi: the device whose settings are being modified
2815 * Context: can sleep, and no requests are queued to the device
2816 *
2817 * SPI protocol drivers may need to update the transfer mode if the
2818 * device doesn't work with its default.  They may likewise need
2819 * to update clock rates or word sizes from initial values.  This function
2820 * changes those settings, and must be called from a context that can sleep.
2821 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2822 * effect the next time the device is selected and data is transferred to
2823 * or from it.  When this function returns, the spi device is deselected.
2824 *
2825 * Note that this call will fail if the protocol driver specifies an option
2826 * that the underlying controller or its driver does not support.  For
2827 * example, not all hardware supports wire transfers using nine bit words,
2828 * LSB-first wire encoding, or active-high chipselects.
2829 *
2830 * Return: zero on success, else a negative error code.
2831 */
2832int spi_setup(struct spi_device *spi)
2833{
2834        unsigned        bad_bits, ugly_bits;
2835        int             status;
2836
2837        /* check mode to prevent that DUAL and QUAD set at the same time
2838         */
2839        if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2840                ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2841                dev_err(&spi->dev,
2842                "setup: can not select dual and quad at the same time\n");
2843                return -EINVAL;
2844        }
2845        /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2846         */
2847        if ((spi->mode & SPI_3WIRE) && (spi->mode &
2848                (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2849                 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
2850                return -EINVAL;
2851        /* help drivers fail *cleanly* when they need options
2852         * that aren't supported with their current controller
2853         * SPI_CS_WORD has a fallback software implementation,
2854         * so it is ignored here.
2855         */
2856        bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
2857        ugly_bits = bad_bits &
2858                    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2859                     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
2860        if (ugly_bits) {
2861                dev_warn(&spi->dev,
2862                         "setup: ignoring unsupported mode bits %x\n",
2863                         ugly_bits);
2864                spi->mode &= ~ugly_bits;
2865                bad_bits &= ~ugly_bits;
2866        }
2867        if (bad_bits) {
2868                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2869                        bad_bits);
2870                return -EINVAL;
2871        }
2872
2873        if (!spi->bits_per_word)
2874                spi->bits_per_word = 8;
2875
2876        status = __spi_validate_bits_per_word(spi->controller,
2877                                              spi->bits_per_word);
2878        if (status)
2879                return status;
2880
2881        if (!spi->max_speed_hz)
2882                spi->max_speed_hz = spi->controller->max_speed_hz;
2883
2884        if (spi->controller->setup)
2885                status = spi->controller->setup(spi);
2886
2887        spi_set_cs(spi, false);
2888
2889        dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2890                        (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2891                        (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2892                        (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2893                        (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2894                        (spi->mode & SPI_LOOP) ? "loopback, " : "",
2895                        spi->bits_per_word, spi->max_speed_hz,
2896                        status);
2897
2898        return status;
2899}
2900EXPORT_SYMBOL_GPL(spi_setup);
2901
2902static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2903{
2904        struct spi_controller *ctlr = spi->controller;
2905        struct spi_transfer *xfer;
2906        int w_size;
2907
2908        if (list_empty(&message->transfers))
2909                return -EINVAL;
2910
2911        /* If an SPI controller does not support toggling the CS line on each
2912         * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
2913         * for the CS line, we can emulate the CS-per-word hardware function by
2914         * splitting transfers into one-word transfers and ensuring that
2915         * cs_change is set for each transfer.
2916         */
2917        if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
2918                                          gpio_is_valid(spi->cs_gpio))) {
2919                size_t maxsize;
2920                int ret;
2921
2922                maxsize = (spi->bits_per_word + 7) / 8;
2923
2924                /* spi_split_transfers_maxsize() requires message->spi */
2925                message->spi = spi;
2926
2927                ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
2928                                                  GFP_KERNEL);
2929                if (ret)
2930                        return ret;
2931
2932                list_for_each_entry(xfer, &message->transfers, transfer_list) {
2933                        /* don't change cs_change on the last entry in the list */
2934                        if (list_is_last(&xfer->transfer_list, &message->transfers))
2935                                break;
2936                        xfer->cs_change = 1;
2937                }
2938        }
2939
2940        /* Half-duplex links include original MicroWire, and ones with
2941         * only one data pin like SPI_3WIRE (switches direction) or where
2942         * either MOSI or MISO is missing.  They can also be caused by
2943         * software limitations.
2944         */
2945        if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2946            (spi->mode & SPI_3WIRE)) {
2947                unsigned flags = ctlr->flags;
2948
2949                list_for_each_entry(xfer, &message->transfers, transfer_list) {
2950                        if (xfer->rx_buf && xfer->tx_buf)
2951                                return -EINVAL;
2952                        if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2953                                return -EINVAL;
2954                        if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2955                                return -EINVAL;
2956                }
2957        }
2958
2959        /**
2960         * Set transfer bits_per_word and max speed as spi device default if
2961         * it is not set for this transfer.
2962         * Set transfer tx_nbits and rx_nbits as single transfer default
2963         * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2964         */
2965        message->frame_length = 0;
2966        list_for_each_entry(xfer, &message->transfers, transfer_list) {
2967                message->frame_length += xfer->len;
2968                if (!xfer->bits_per_word)
2969                        xfer->bits_per_word = spi->bits_per_word;
2970
2971                if (!xfer->speed_hz)
2972                        xfer->speed_hz = spi->max_speed_hz;
2973                if (!xfer->speed_hz)
2974                        xfer->speed_hz = ctlr->max_speed_hz;
2975
2976                if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2977                        xfer->speed_hz = ctlr->max_speed_hz;
2978
2979                if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2980                        return -EINVAL;
2981
2982                /*
2983                 * SPI transfer length should be multiple of SPI word size
2984                 * where SPI word size should be power-of-two multiple
2985                 */
2986                if (xfer->bits_per_word <= 8)
2987                        w_size = 1;
2988                else if (xfer->bits_per_word <= 16)
2989                        w_size = 2;
2990                else
2991                        w_size = 4;
2992
2993                /* No partial transfers accepted */
2994                if (xfer->len % w_size)
2995                        return -EINVAL;
2996
2997                if (xfer->speed_hz && ctlr->min_speed_hz &&
2998                    xfer->speed_hz < ctlr->min_speed_hz)
2999                        return -EINVAL;
3000
3001                if (xfer->tx_buf && !xfer->tx_nbits)
3002                        xfer->tx_nbits = SPI_NBITS_SINGLE;
3003                if (xfer->rx_buf && !xfer->rx_nbits)
3004                        xfer->rx_nbits = SPI_NBITS_SINGLE;
3005                /* check transfer tx/rx_nbits:
3006                 * 1. check the value matches one of single, dual and quad
3007                 * 2. check tx/rx_nbits match the mode in spi_device
3008                 */
3009                if (xfer->tx_buf) {
3010                        if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3011                                xfer->tx_nbits != SPI_NBITS_DUAL &&
3012                                xfer->tx_nbits != SPI_NBITS_QUAD)
3013                                return -EINVAL;
3014                        if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3015                                !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3016                                return -EINVAL;
3017                        if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3018                                !(spi->mode & SPI_TX_QUAD))
3019                                return -EINVAL;
3020                }
3021                /* check transfer rx_nbits */
3022                if (xfer->rx_buf) {
3023                        if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3024                                xfer->rx_nbits != SPI_NBITS_DUAL &&
3025                                xfer->rx_nbits != SPI_NBITS_QUAD)
3026                                return -EINVAL;
3027                        if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3028                                !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3029                                return -EINVAL;
3030                        if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3031                                !(spi->mode & SPI_RX_QUAD))
3032                                return -EINVAL;
3033                }
3034        }
3035
3036        message->status = -EINPROGRESS;
3037
3038        return 0;
3039}
3040
3041static int __spi_async(struct spi_device *spi, struct spi_message *message)
3042{
3043        struct spi_controller *ctlr = spi->controller;
3044
3045        /*
3046         * Some controllers do not support doing regular SPI transfers. Return
3047         * ENOTSUPP when this is the case.
3048         */
3049        if (!ctlr->transfer)
3050                return -ENOTSUPP;
3051
3052        message->spi = spi;
3053
3054        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3055        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3056
3057        trace_spi_message_submit(message);
3058
3059        return ctlr->transfer(spi, message);
3060}
3061
3062/**
3063 * spi_async - asynchronous SPI transfer
3064 * @spi: device with which data will be exchanged
3065 * @message: describes the data transfers, including completion callback
3066 * Context: any (irqs may be blocked, etc)
3067 *
3068 * This call may be used in_irq and other contexts which can't sleep,
3069 * as well as from task contexts which can sleep.
3070 *
3071 * The completion callback is invoked in a context which can't sleep.
3072 * Before that invocation, the value of message->status is undefined.
3073 * When the callback is issued, message->status holds either zero (to
3074 * indicate complete success) or a negative error code.  After that
3075 * callback returns, the driver which issued the transfer request may
3076 * deallocate the associated memory; it's no longer in use by any SPI
3077 * core or controller driver code.
3078 *
3079 * Note that although all messages to a spi_device are handled in
3080 * FIFO order, messages may go to different devices in other orders.
3081 * Some device might be higher priority, or have various "hard" access
3082 * time requirements, for example.
3083 *
3084 * On detection of any fault during the transfer, processing of
3085 * the entire message is aborted, and the device is deselected.
3086 * Until returning from the associated message completion callback,
3087 * no other spi_message queued to that device will be processed.
3088 * (This rule applies equally to all the synchronous transfer calls,
3089 * which are wrappers around this core asynchronous primitive.)
3090 *
3091 * Return: zero on success, else a negative error code.
3092 */
3093int spi_async(struct spi_device *spi, struct spi_message *message)
3094{
3095        struct spi_controller *ctlr = spi->controller;
3096        int ret;
3097        unsigned long flags;
3098
3099        ret = __spi_validate(spi, message);
3100        if (ret != 0)
3101                return ret;
3102
3103        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3104
3105        if (ctlr->bus_lock_flag)
3106                ret = -EBUSY;
3107        else
3108                ret = __spi_async(spi, message);
3109
3110        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3111
3112        return ret;
3113}
3114EXPORT_SYMBOL_GPL(spi_async);
3115
3116/**
3117 * spi_async_locked - version of spi_async with exclusive bus usage
3118 * @spi: device with which data will be exchanged
3119 * @message: describes the data transfers, including completion callback
3120 * Context: any (irqs may be blocked, etc)
3121 *
3122 * This call may be used in_irq and other contexts which can't sleep,
3123 * as well as from task contexts which can sleep.
3124 *
3125 * The completion callback is invoked in a context which can't sleep.
3126 * Before that invocation, the value of message->status is undefined.
3127 * When the callback is issued, message->status holds either zero (to
3128 * indicate complete success) or a negative error code.  After that
3129 * callback returns, the driver which issued the transfer request may
3130 * deallocate the associated memory; it's no longer in use by any SPI
3131 * core or controller driver code.
3132 *
3133 * Note that although all messages to a spi_device are handled in
3134 * FIFO order, messages may go to different devices in other orders.
3135 * Some device might be higher priority, or have various "hard" access
3136 * time requirements, for example.
3137 *
3138 * On detection of any fault during the transfer, processing of
3139 * the entire message is aborted, and the device is deselected.
3140 * Until returning from the associated message completion callback,
3141 * no other spi_message queued to that device will be processed.
3142 * (This rule applies equally to all the synchronous transfer calls,
3143 * which are wrappers around this core asynchronous primitive.)
3144 *
3145 * Return: zero on success, else a negative error code.
3146 */
3147int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3148{
3149        struct spi_controller *ctlr = spi->controller;
3150        int ret;
3151        unsigned long flags;
3152
3153        ret = __spi_validate(spi, message);
3154        if (ret != 0)
3155                return ret;
3156
3157        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3158
3159        ret = __spi_async(spi, message);
3160
3161        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3162
3163        return ret;
3164
3165}
3166EXPORT_SYMBOL_GPL(spi_async_locked);
3167
3168/*-------------------------------------------------------------------------*/
3169
3170/* Utility methods for SPI protocol drivers, layered on
3171 * top of the core.  Some other utility methods are defined as
3172 * inline functions.
3173 */
3174
3175static void spi_complete(void *arg)
3176{
3177        complete(arg);
3178}
3179
3180static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3181{
3182        DECLARE_COMPLETION_ONSTACK(done);
3183        int status;
3184        struct spi_controller *ctlr = spi->controller;
3185        unsigned long flags;
3186
3187        status = __spi_validate(spi, message);
3188        if (status != 0)
3189                return status;
3190
3191        message->complete = spi_complete;
3192        message->context = &done;
3193        message->spi = spi;
3194
3195        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3196        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3197
3198        /* If we're not using the legacy transfer method then we will
3199         * try to transfer in the calling context so special case.
3200         * This code would be less tricky if we could remove the
3201         * support for driver implemented message queues.
3202         */
3203        if (ctlr->transfer == spi_queued_transfer) {
3204                spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3205
3206                trace_spi_message_submit(message);
3207
3208                status = __spi_queued_transfer(spi, message, false);
3209
3210                spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3211        } else {
3212                status = spi_async_locked(spi, message);
3213        }
3214
3215        if (status == 0) {
3216                /* Push out the messages in the calling context if we
3217                 * can.
3218                 */
3219                if (ctlr->transfer == spi_queued_transfer) {
3220                        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3221                                                       spi_sync_immediate);
3222                        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3223                                                       spi_sync_immediate);
3224                        __spi_pump_messages(ctlr, false);
3225                }
3226
3227                wait_for_completion(&done);
3228                status = message->status;
3229        }
3230        message->context = NULL;
3231        return status;
3232}
3233
3234/**
3235 * spi_sync - blocking/synchronous SPI data transfers
3236 * @spi: device with which data will be exchanged
3237 * @message: describes the data transfers
3238 * Context: can sleep
3239 *
3240 * This call may only be used from a context that may sleep.  The sleep
3241 * is non-interruptible, and has no timeout.  Low-overhead controller
3242 * drivers may DMA directly into and out of the message buffers.
3243 *
3244 * Note that the SPI device's chip select is active during the message,
3245 * and then is normally disabled between messages.  Drivers for some
3246 * frequently-used devices may want to minimize costs of selecting a chip,
3247 * by leaving it selected in anticipation that the next message will go
3248 * to the same chip.  (That may increase power usage.)
3249 *
3250 * Also, the caller is guaranteeing that the memory associated with the
3251 * message will not be freed before this call returns.
3252 *
3253 * Return: zero on success, else a negative error code.
3254 */
3255int spi_sync(struct spi_device *spi, struct spi_message *message)
3256{
3257        int ret;
3258
3259        mutex_lock(&spi->controller->bus_lock_mutex);
3260        ret = __spi_sync(spi, message);
3261        mutex_unlock(&spi->controller->bus_lock_mutex);
3262
3263        return ret;
3264}
3265EXPORT_SYMBOL_GPL(spi_sync);
3266
3267/**
3268 * spi_sync_locked - version of spi_sync with exclusive bus usage
3269 * @spi: device with which data will be exchanged
3270 * @message: describes the data transfers
3271 * Context: can sleep
3272 *
3273 * This call may only be used from a context that may sleep.  The sleep
3274 * is non-interruptible, and has no timeout.  Low-overhead controller
3275 * drivers may DMA directly into and out of the message buffers.
3276 *
3277 * This call should be used by drivers that require exclusive access to the
3278 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3279 * be released by a spi_bus_unlock call when the exclusive access is over.
3280 *
3281 * Return: zero on success, else a negative error code.
3282 */
3283int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3284{
3285        return __spi_sync(spi, message);
3286}
3287EXPORT_SYMBOL_GPL(spi_sync_locked);
3288
3289/**
3290 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3291 * @ctlr: SPI bus master that should be locked for exclusive bus access
3292 * Context: can sleep
3293 *
3294 * This call may only be used from a context that may sleep.  The sleep
3295 * is non-interruptible, and has no timeout.
3296 *
3297 * This call should be used by drivers that require exclusive access to the
3298 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3299 * exclusive access is over. Data transfer must be done by spi_sync_locked
3300 * and spi_async_locked calls when the SPI bus lock is held.
3301 *
3302 * Return: always zero.
3303 */
3304int spi_bus_lock(struct spi_controller *ctlr)
3305{
3306        unsigned long flags;
3307
3308        mutex_lock(&ctlr->bus_lock_mutex);
3309
3310        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3311        ctlr->bus_lock_flag = 1;
3312        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3313
3314        /* mutex remains locked until spi_bus_unlock is called */
3315
3316        return 0;
3317}
3318EXPORT_SYMBOL_GPL(spi_bus_lock);
3319
3320/**
3321 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3322 * @ctlr: SPI bus master that was locked for exclusive bus access
3323 * Context: can sleep
3324 *
3325 * This call may only be used from a context that may sleep.  The sleep
3326 * is non-interruptible, and has no timeout.
3327 *
3328 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3329 * call.
3330 *
3331 * Return: always zero.
3332 */
3333int spi_bus_unlock(struct spi_controller *ctlr)
3334{
3335        ctlr->bus_lock_flag = 0;
3336
3337        mutex_unlock(&ctlr->bus_lock_mutex);
3338
3339        return 0;
3340}
3341EXPORT_SYMBOL_GPL(spi_bus_unlock);
3342
3343/* portable code must never pass more than 32 bytes */
3344#define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
3345
3346static u8       *buf;
3347
3348/**
3349 * spi_write_then_read - SPI synchronous write followed by read
3350 * @spi: device with which data will be exchanged
3351 * @txbuf: data to be written (need not be dma-safe)
3352 * @n_tx: size of txbuf, in bytes
3353 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3354 * @n_rx: size of rxbuf, in bytes
3355 * Context: can sleep
3356 *
3357 * This performs a half duplex MicroWire style transaction with the
3358 * device, sending txbuf and then reading rxbuf.  The return value
3359 * is zero for success, else a negative errno status code.
3360 * This call may only be used from a context that may sleep.
3361 *
3362 * Parameters to this routine are always copied using a small buffer;
3363 * portable code should never use this for more than 32 bytes.
3364 * Performance-sensitive or bulk transfer code should instead use
3365 * spi_{async,sync}() calls with dma-safe buffers.
3366 *
3367 * Return: zero on success, else a negative error code.
3368 */
3369int spi_write_then_read(struct spi_device *spi,
3370                const void *txbuf, unsigned n_tx,
3371                void *rxbuf, unsigned n_rx)
3372{
3373        static DEFINE_MUTEX(lock);
3374
3375        int                     status;
3376        struct spi_message      message;
3377        struct spi_transfer     x[2];
3378        u8                      *local_buf;
3379
3380        /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3381         * copying here, (as a pure convenience thing), but we can
3382         * keep heap costs out of the hot path unless someone else is
3383         * using the pre-allocated buffer or the transfer is too large.
3384         */
3385        if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3386                local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3387                                    GFP_KERNEL | GFP_DMA);
3388                if (!local_buf)
3389                        return -ENOMEM;
3390        } else {
3391                local_buf = buf;
3392        }
3393
3394        spi_message_init(&message);
3395        memset(x, 0, sizeof(x));
3396        if (n_tx) {
3397                x[0].len = n_tx;
3398                spi_message_add_tail(&x[0], &message);
3399        }
3400        if (n_rx) {
3401                x[1].len = n_rx;
3402                spi_message_add_tail(&x[1], &message);
3403        }
3404
3405        memcpy(local_buf, txbuf, n_tx);
3406        x[0].tx_buf = local_buf;
3407        x[1].rx_buf = local_buf + n_tx;
3408
3409        /* do the i/o */
3410        status = spi_sync(spi, &message);
3411        if (status == 0)
3412                memcpy(rxbuf, x[1].rx_buf, n_rx);
3413
3414        if (x[0].tx_buf == buf)
3415                mutex_unlock(&lock);
3416        else
3417                kfree(local_buf);
3418
3419        return status;
3420}
3421EXPORT_SYMBOL_GPL(spi_write_then_read);
3422
3423/*-------------------------------------------------------------------------*/
3424
3425#if IS_ENABLED(CONFIG_OF)
3426static int __spi_of_device_match(struct device *dev, void *data)
3427{
3428        return dev->of_node == data;
3429}
3430
3431/* must call put_device() when done with returned spi_device device */
3432struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3433{
3434        struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3435                                                __spi_of_device_match);
3436        return dev ? to_spi_device(dev) : NULL;
3437}
3438EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3439#endif /* IS_ENABLED(CONFIG_OF) */
3440
3441#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3442static int __spi_of_controller_match(struct device *dev, const void *data)
3443{
3444        return dev->of_node == data;
3445}
3446
3447/* the spi controllers are not using spi_bus, so we find it with another way */
3448static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3449{
3450        struct device *dev;
3451
3452        dev = class_find_device(&spi_master_class, NULL, node,
3453                                __spi_of_controller_match);
3454        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3455                dev = class_find_device(&spi_slave_class, NULL, node,
3456                                        __spi_of_controller_match);
3457        if (!dev)
3458                return NULL;
3459
3460        /* reference got in class_find_device */
3461        return container_of(dev, struct spi_controller, dev);
3462}
3463
3464static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3465                         void *arg)
3466{
3467        struct of_reconfig_data *rd = arg;
3468        struct spi_controller *ctlr;
3469        struct spi_device *spi;
3470
3471        switch (of_reconfig_get_state_change(action, arg)) {
3472        case OF_RECONFIG_CHANGE_ADD:
3473                ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3474                if (ctlr == NULL)
3475                        return NOTIFY_OK;       /* not for us */
3476
3477                if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3478                        put_device(&ctlr->dev);
3479                        return NOTIFY_OK;
3480                }
3481
3482                spi = of_register_spi_device(ctlr, rd->dn);
3483                put_device(&ctlr->dev);
3484
3485                if (IS_ERR(spi)) {
3486                        pr_err("%s: failed to create for '%pOF'\n",
3487                                        __func__, rd->dn);
3488                        of_node_clear_flag(rd->dn, OF_POPULATED);
3489                        return notifier_from_errno(PTR_ERR(spi));
3490                }
3491                break;
3492
3493        case OF_RECONFIG_CHANGE_REMOVE:
3494                /* already depopulated? */
3495                if (!of_node_check_flag(rd->dn, OF_POPULATED))
3496                        return NOTIFY_OK;
3497
3498                /* find our device by node */
3499                spi = of_find_spi_device_by_node(rd->dn);
3500                if (spi == NULL)
3501                        return NOTIFY_OK;       /* no? not meant for us */
3502
3503                /* unregister takes one ref away */
3504                spi_unregister_device(spi);
3505
3506                /* and put the reference of the find */
3507                put_device(&spi->dev);
3508                break;
3509        }
3510
3511        return NOTIFY_OK;
3512}
3513
3514static struct notifier_block spi_of_notifier = {
3515        .notifier_call = of_spi_notify,
3516};
3517#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3518extern struct notifier_block spi_of_notifier;
3519#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3520
3521#if IS_ENABLED(CONFIG_ACPI)
3522static int spi_acpi_controller_match(struct device *dev, const void *data)
3523{
3524        return ACPI_COMPANION(dev->parent) == data;
3525}
3526
3527static int spi_acpi_device_match(struct device *dev, void *data)
3528{
3529        return ACPI_COMPANION(dev) == data;
3530}
3531
3532static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3533{
3534        struct device *dev;
3535
3536        dev = class_find_device(&spi_master_class, NULL, adev,
3537                                spi_acpi_controller_match);
3538        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3539                dev = class_find_device(&spi_slave_class, NULL, adev,
3540                                        spi_acpi_controller_match);
3541        if (!dev)
3542                return NULL;
3543
3544        return container_of(dev, struct spi_controller, dev);
3545}
3546
3547static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3548{
3549        struct device *dev;
3550
3551        dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3552
3553        return dev ? to_spi_device(dev) : NULL;
3554}
3555
3556static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3557                           void *arg)
3558{
3559        struct acpi_device *adev = arg;
3560        struct spi_controller *ctlr;
3561        struct spi_device *spi;
3562
3563        switch (value) {
3564        case ACPI_RECONFIG_DEVICE_ADD:
3565                ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3566                if (!ctlr)
3567                        break;
3568
3569                acpi_register_spi_device(ctlr, adev);
3570                put_device(&ctlr->dev);
3571                break;
3572        case ACPI_RECONFIG_DEVICE_REMOVE:
3573                if (!acpi_device_enumerated(adev))
3574                        break;
3575
3576                spi = acpi_spi_find_device_by_adev(adev);
3577                if (!spi)
3578                        break;
3579
3580                spi_unregister_device(spi);
3581                put_device(&spi->dev);
3582                break;
3583        }
3584
3585        return NOTIFY_OK;
3586}
3587
3588static struct notifier_block spi_acpi_notifier = {
3589        .notifier_call = acpi_spi_notify,
3590};
3591#else
3592extern struct notifier_block spi_acpi_notifier;
3593#endif
3594
3595static int __init spi_init(void)
3596{
3597        int     status;
3598
3599        buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3600        if (!buf) {
3601                status = -ENOMEM;
3602                goto err0;
3603        }
3604
3605        status = bus_register(&spi_bus_type);
3606        if (status < 0)
3607                goto err1;
3608
3609        status = class_register(&spi_master_class);
3610        if (status < 0)
3611                goto err2;
3612
3613        if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3614                status = class_register(&spi_slave_class);
3615                if (status < 0)
3616                        goto err3;
3617        }
3618
3619        if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3620                WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3621        if (IS_ENABLED(CONFIG_ACPI))
3622                WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3623
3624        return 0;
3625
3626err3:
3627        class_unregister(&spi_master_class);
3628err2:
3629        bus_unregister(&spi_bus_type);
3630err1:
3631        kfree(buf);
3632        buf = NULL;
3633err0:
3634        return status;
3635}
3636
3637/* board_info is normally registered in arch_initcall(),
3638 * but even essential drivers wait till later
3639 *
3640 * REVISIT only boardinfo really needs static linking. the rest (device and
3641 * driver registration) _could_ be dynamically linked (modular) ... costs
3642 * include needing to have boardinfo data structures be much more public.
3643 */
3644postcore_initcall(spi_init);
3645
3646