linux/drivers/spi/spi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2// SPI init/core code
   3//
   4// Copyright (C) 2005 David Brownell
   5// Copyright (C) 2008 Secret Lab Technologies Ltd.
   6
   7#include <linux/kernel.h>
   8#include <linux/device.h>
   9#include <linux/init.h>
  10#include <linux/cache.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/mutex.h>
  14#include <linux/of_device.h>
  15#include <linux/of_irq.h>
  16#include <linux/clk/clk-conf.h>
  17#include <linux/slab.h>
  18#include <linux/mod_devicetable.h>
  19#include <linux/spi/spi.h>
  20#include <linux/spi/spi-mem.h>
  21#include <linux/of_gpio.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/pm_domain.h>
  25#include <linux/property.h>
  26#include <linux/export.h>
  27#include <linux/sched/rt.h>
  28#include <uapi/linux/sched/types.h>
  29#include <linux/delay.h>
  30#include <linux/kthread.h>
  31#include <linux/ioport.h>
  32#include <linux/acpi.h>
  33#include <linux/highmem.h>
  34#include <linux/idr.h>
  35#include <linux/platform_data/x86/apple.h>
  36
  37#define CREATE_TRACE_POINTS
  38#include <trace/events/spi.h>
  39
  40#include "internals.h"
  41
  42static DEFINE_IDR(spi_master_idr);
  43
  44static void spidev_release(struct device *dev)
  45{
  46        struct spi_device       *spi = to_spi_device(dev);
  47
  48        /* spi controllers may cleanup for released devices */
  49        if (spi->controller->cleanup)
  50                spi->controller->cleanup(spi);
  51
  52        spi_controller_put(spi->controller);
  53        kfree(spi->driver_override);
  54        kfree(spi);
  55}
  56
  57static ssize_t
  58modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  59{
  60        const struct spi_device *spi = to_spi_device(dev);
  61        int len;
  62
  63        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  64        if (len != -ENODEV)
  65                return len;
  66
  67        return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  68}
  69static DEVICE_ATTR_RO(modalias);
  70
  71static ssize_t driver_override_store(struct device *dev,
  72                                     struct device_attribute *a,
  73                                     const char *buf, size_t count)
  74{
  75        struct spi_device *spi = to_spi_device(dev);
  76        const char *end = memchr(buf, '\n', count);
  77        const size_t len = end ? end - buf : count;
  78        const char *driver_override, *old;
  79
  80        /* We need to keep extra room for a newline when displaying value */
  81        if (len >= (PAGE_SIZE - 1))
  82                return -EINVAL;
  83
  84        driver_override = kstrndup(buf, len, GFP_KERNEL);
  85        if (!driver_override)
  86                return -ENOMEM;
  87
  88        device_lock(dev);
  89        old = spi->driver_override;
  90        if (len) {
  91                spi->driver_override = driver_override;
  92        } else {
  93                /* Emptry string, disable driver override */
  94                spi->driver_override = NULL;
  95                kfree(driver_override);
  96        }
  97        device_unlock(dev);
  98        kfree(old);
  99
 100        return count;
 101}
 102
 103static ssize_t driver_override_show(struct device *dev,
 104                                    struct device_attribute *a, char *buf)
 105{
 106        const struct spi_device *spi = to_spi_device(dev);
 107        ssize_t len;
 108
 109        device_lock(dev);
 110        len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
 111        device_unlock(dev);
 112        return len;
 113}
 114static DEVICE_ATTR_RW(driver_override);
 115
 116#define SPI_STATISTICS_ATTRS(field, file)                               \
 117static ssize_t spi_controller_##field##_show(struct device *dev,        \
 118                                             struct device_attribute *attr, \
 119                                             char *buf)                 \
 120{                                                                       \
 121        struct spi_controller *ctlr = container_of(dev,                 \
 122                                         struct spi_controller, dev);   \
 123        return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
 124}                                                                       \
 125static struct device_attribute dev_attr_spi_controller_##field = {      \
 126        .attr = { .name = file, .mode = 0444 },                         \
 127        .show = spi_controller_##field##_show,                          \
 128};                                                                      \
 129static ssize_t spi_device_##field##_show(struct device *dev,            \
 130                                         struct device_attribute *attr, \
 131                                        char *buf)                      \
 132{                                                                       \
 133        struct spi_device *spi = to_spi_device(dev);                    \
 134        return spi_statistics_##field##_show(&spi->statistics, buf);    \
 135}                                                                       \
 136static struct device_attribute dev_attr_spi_device_##field = {          \
 137        .attr = { .name = file, .mode = 0444 },                         \
 138        .show = spi_device_##field##_show,                              \
 139}
 140
 141#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
 142static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
 143                                            char *buf)                  \
 144{                                                                       \
 145        unsigned long flags;                                            \
 146        ssize_t len;                                                    \
 147        spin_lock_irqsave(&stat->lock, flags);                          \
 148        len = sprintf(buf, format_string, stat->field);                 \
 149        spin_unlock_irqrestore(&stat->lock, flags);                     \
 150        return len;                                                     \
 151}                                                                       \
 152SPI_STATISTICS_ATTRS(name, file)
 153
 154#define SPI_STATISTICS_SHOW(field, format_string)                       \
 155        SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
 156                                 field, format_string)
 157
 158SPI_STATISTICS_SHOW(messages, "%lu");
 159SPI_STATISTICS_SHOW(transfers, "%lu");
 160SPI_STATISTICS_SHOW(errors, "%lu");
 161SPI_STATISTICS_SHOW(timedout, "%lu");
 162
 163SPI_STATISTICS_SHOW(spi_sync, "%lu");
 164SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
 165SPI_STATISTICS_SHOW(spi_async, "%lu");
 166
 167SPI_STATISTICS_SHOW(bytes, "%llu");
 168SPI_STATISTICS_SHOW(bytes_rx, "%llu");
 169SPI_STATISTICS_SHOW(bytes_tx, "%llu");
 170
 171#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
 172        SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
 173                                 "transfer_bytes_histo_" number,        \
 174                                 transfer_bytes_histo[index],  "%lu")
 175SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
 176SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
 177SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
 178SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
 179SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
 180SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
 181SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
 182SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
 183SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
 184SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
 185SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
 186SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
 187SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
 188SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
 189SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
 190SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 191SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 192
 193SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
 194
 195static struct attribute *spi_dev_attrs[] = {
 196        &dev_attr_modalias.attr,
 197        &dev_attr_driver_override.attr,
 198        NULL,
 199};
 200
 201static const struct attribute_group spi_dev_group = {
 202        .attrs  = spi_dev_attrs,
 203};
 204
 205static struct attribute *spi_device_statistics_attrs[] = {
 206        &dev_attr_spi_device_messages.attr,
 207        &dev_attr_spi_device_transfers.attr,
 208        &dev_attr_spi_device_errors.attr,
 209        &dev_attr_spi_device_timedout.attr,
 210        &dev_attr_spi_device_spi_sync.attr,
 211        &dev_attr_spi_device_spi_sync_immediate.attr,
 212        &dev_attr_spi_device_spi_async.attr,
 213        &dev_attr_spi_device_bytes.attr,
 214        &dev_attr_spi_device_bytes_rx.attr,
 215        &dev_attr_spi_device_bytes_tx.attr,
 216        &dev_attr_spi_device_transfer_bytes_histo0.attr,
 217        &dev_attr_spi_device_transfer_bytes_histo1.attr,
 218        &dev_attr_spi_device_transfer_bytes_histo2.attr,
 219        &dev_attr_spi_device_transfer_bytes_histo3.attr,
 220        &dev_attr_spi_device_transfer_bytes_histo4.attr,
 221        &dev_attr_spi_device_transfer_bytes_histo5.attr,
 222        &dev_attr_spi_device_transfer_bytes_histo6.attr,
 223        &dev_attr_spi_device_transfer_bytes_histo7.attr,
 224        &dev_attr_spi_device_transfer_bytes_histo8.attr,
 225        &dev_attr_spi_device_transfer_bytes_histo9.attr,
 226        &dev_attr_spi_device_transfer_bytes_histo10.attr,
 227        &dev_attr_spi_device_transfer_bytes_histo11.attr,
 228        &dev_attr_spi_device_transfer_bytes_histo12.attr,
 229        &dev_attr_spi_device_transfer_bytes_histo13.attr,
 230        &dev_attr_spi_device_transfer_bytes_histo14.attr,
 231        &dev_attr_spi_device_transfer_bytes_histo15.attr,
 232        &dev_attr_spi_device_transfer_bytes_histo16.attr,
 233        &dev_attr_spi_device_transfers_split_maxsize.attr,
 234        NULL,
 235};
 236
 237static const struct attribute_group spi_device_statistics_group = {
 238        .name  = "statistics",
 239        .attrs  = spi_device_statistics_attrs,
 240};
 241
 242static const struct attribute_group *spi_dev_groups[] = {
 243        &spi_dev_group,
 244        &spi_device_statistics_group,
 245        NULL,
 246};
 247
 248static struct attribute *spi_controller_statistics_attrs[] = {
 249        &dev_attr_spi_controller_messages.attr,
 250        &dev_attr_spi_controller_transfers.attr,
 251        &dev_attr_spi_controller_errors.attr,
 252        &dev_attr_spi_controller_timedout.attr,
 253        &dev_attr_spi_controller_spi_sync.attr,
 254        &dev_attr_spi_controller_spi_sync_immediate.attr,
 255        &dev_attr_spi_controller_spi_async.attr,
 256        &dev_attr_spi_controller_bytes.attr,
 257        &dev_attr_spi_controller_bytes_rx.attr,
 258        &dev_attr_spi_controller_bytes_tx.attr,
 259        &dev_attr_spi_controller_transfer_bytes_histo0.attr,
 260        &dev_attr_spi_controller_transfer_bytes_histo1.attr,
 261        &dev_attr_spi_controller_transfer_bytes_histo2.attr,
 262        &dev_attr_spi_controller_transfer_bytes_histo3.attr,
 263        &dev_attr_spi_controller_transfer_bytes_histo4.attr,
 264        &dev_attr_spi_controller_transfer_bytes_histo5.attr,
 265        &dev_attr_spi_controller_transfer_bytes_histo6.attr,
 266        &dev_attr_spi_controller_transfer_bytes_histo7.attr,
 267        &dev_attr_spi_controller_transfer_bytes_histo8.attr,
 268        &dev_attr_spi_controller_transfer_bytes_histo9.attr,
 269        &dev_attr_spi_controller_transfer_bytes_histo10.attr,
 270        &dev_attr_spi_controller_transfer_bytes_histo11.attr,
 271        &dev_attr_spi_controller_transfer_bytes_histo12.attr,
 272        &dev_attr_spi_controller_transfer_bytes_histo13.attr,
 273        &dev_attr_spi_controller_transfer_bytes_histo14.attr,
 274        &dev_attr_spi_controller_transfer_bytes_histo15.attr,
 275        &dev_attr_spi_controller_transfer_bytes_histo16.attr,
 276        &dev_attr_spi_controller_transfers_split_maxsize.attr,
 277        NULL,
 278};
 279
 280static const struct attribute_group spi_controller_statistics_group = {
 281        .name  = "statistics",
 282        .attrs  = spi_controller_statistics_attrs,
 283};
 284
 285static const struct attribute_group *spi_master_groups[] = {
 286        &spi_controller_statistics_group,
 287        NULL,
 288};
 289
 290void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 291                                       struct spi_transfer *xfer,
 292                                       struct spi_controller *ctlr)
 293{
 294        unsigned long flags;
 295        int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
 296
 297        if (l2len < 0)
 298                l2len = 0;
 299
 300        spin_lock_irqsave(&stats->lock, flags);
 301
 302        stats->transfers++;
 303        stats->transfer_bytes_histo[l2len]++;
 304
 305        stats->bytes += xfer->len;
 306        if ((xfer->tx_buf) &&
 307            (xfer->tx_buf != ctlr->dummy_tx))
 308                stats->bytes_tx += xfer->len;
 309        if ((xfer->rx_buf) &&
 310            (xfer->rx_buf != ctlr->dummy_rx))
 311                stats->bytes_rx += xfer->len;
 312
 313        spin_unlock_irqrestore(&stats->lock, flags);
 314}
 315EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
 316
 317/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 318 * and the sysfs version makes coldplug work too.
 319 */
 320
 321static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
 322                                                const struct spi_device *sdev)
 323{
 324        while (id->name[0]) {
 325                if (!strcmp(sdev->modalias, id->name))
 326                        return id;
 327                id++;
 328        }
 329        return NULL;
 330}
 331
 332const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
 333{
 334        const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
 335
 336        return spi_match_id(sdrv->id_table, sdev);
 337}
 338EXPORT_SYMBOL_GPL(spi_get_device_id);
 339
 340static int spi_match_device(struct device *dev, struct device_driver *drv)
 341{
 342        const struct spi_device *spi = to_spi_device(dev);
 343        const struct spi_driver *sdrv = to_spi_driver(drv);
 344
 345        /* Check override first, and if set, only use the named driver */
 346        if (spi->driver_override)
 347                return strcmp(spi->driver_override, drv->name) == 0;
 348
 349        /* Attempt an OF style match */
 350        if (of_driver_match_device(dev, drv))
 351                return 1;
 352
 353        /* Then try ACPI */
 354        if (acpi_driver_match_device(dev, drv))
 355                return 1;
 356
 357        if (sdrv->id_table)
 358                return !!spi_match_id(sdrv->id_table, spi);
 359
 360        return strcmp(spi->modalias, drv->name) == 0;
 361}
 362
 363static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 364{
 365        const struct spi_device         *spi = to_spi_device(dev);
 366        int rc;
 367
 368        rc = acpi_device_uevent_modalias(dev, env);
 369        if (rc != -ENODEV)
 370                return rc;
 371
 372        return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 373}
 374
 375struct bus_type spi_bus_type = {
 376        .name           = "spi",
 377        .dev_groups     = spi_dev_groups,
 378        .match          = spi_match_device,
 379        .uevent         = spi_uevent,
 380};
 381EXPORT_SYMBOL_GPL(spi_bus_type);
 382
 383
 384static int spi_drv_probe(struct device *dev)
 385{
 386        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 387        struct spi_device               *spi = to_spi_device(dev);
 388        int ret;
 389
 390        ret = of_clk_set_defaults(dev->of_node, false);
 391        if (ret)
 392                return ret;
 393
 394        if (dev->of_node) {
 395                spi->irq = of_irq_get(dev->of_node, 0);
 396                if (spi->irq == -EPROBE_DEFER)
 397                        return -EPROBE_DEFER;
 398                if (spi->irq < 0)
 399                        spi->irq = 0;
 400        }
 401
 402        ret = dev_pm_domain_attach(dev, true);
 403        if (ret)
 404                return ret;
 405
 406        ret = sdrv->probe(spi);
 407        if (ret)
 408                dev_pm_domain_detach(dev, true);
 409
 410        return ret;
 411}
 412
 413static int spi_drv_remove(struct device *dev)
 414{
 415        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 416        int ret;
 417
 418        ret = sdrv->remove(to_spi_device(dev));
 419        dev_pm_domain_detach(dev, true);
 420
 421        return ret;
 422}
 423
 424static void spi_drv_shutdown(struct device *dev)
 425{
 426        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 427
 428        sdrv->shutdown(to_spi_device(dev));
 429}
 430
 431/**
 432 * __spi_register_driver - register a SPI driver
 433 * @owner: owner module of the driver to register
 434 * @sdrv: the driver to register
 435 * Context: can sleep
 436 *
 437 * Return: zero on success, else a negative error code.
 438 */
 439int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
 440{
 441        sdrv->driver.owner = owner;
 442        sdrv->driver.bus = &spi_bus_type;
 443        if (sdrv->probe)
 444                sdrv->driver.probe = spi_drv_probe;
 445        if (sdrv->remove)
 446                sdrv->driver.remove = spi_drv_remove;
 447        if (sdrv->shutdown)
 448                sdrv->driver.shutdown = spi_drv_shutdown;
 449        return driver_register(&sdrv->driver);
 450}
 451EXPORT_SYMBOL_GPL(__spi_register_driver);
 452
 453/*-------------------------------------------------------------------------*/
 454
 455/* SPI devices should normally not be created by SPI device drivers; that
 456 * would make them board-specific.  Similarly with SPI controller drivers.
 457 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 458 * with other readonly (flashable) information about mainboard devices.
 459 */
 460
 461struct boardinfo {
 462        struct list_head        list;
 463        struct spi_board_info   board_info;
 464};
 465
 466static LIST_HEAD(board_list);
 467static LIST_HEAD(spi_controller_list);
 468
 469/*
 470 * Used to protect add/del opertion for board_info list and
 471 * spi_controller list, and their matching process
 472 * also used to protect object of type struct idr
 473 */
 474static DEFINE_MUTEX(board_lock);
 475
 476/**
 477 * spi_alloc_device - Allocate a new SPI device
 478 * @ctlr: Controller to which device is connected
 479 * Context: can sleep
 480 *
 481 * Allows a driver to allocate and initialize a spi_device without
 482 * registering it immediately.  This allows a driver to directly
 483 * fill the spi_device with device parameters before calling
 484 * spi_add_device() on it.
 485 *
 486 * Caller is responsible to call spi_add_device() on the returned
 487 * spi_device structure to add it to the SPI controller.  If the caller
 488 * needs to discard the spi_device without adding it, then it should
 489 * call spi_dev_put() on it.
 490 *
 491 * Return: a pointer to the new device, or NULL.
 492 */
 493struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
 494{
 495        struct spi_device       *spi;
 496
 497        if (!spi_controller_get(ctlr))
 498                return NULL;
 499
 500        spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 501        if (!spi) {
 502                spi_controller_put(ctlr);
 503                return NULL;
 504        }
 505
 506        spi->master = spi->controller = ctlr;
 507        spi->dev.parent = &ctlr->dev;
 508        spi->dev.bus = &spi_bus_type;
 509        spi->dev.release = spidev_release;
 510        spi->cs_gpio = -ENOENT;
 511
 512        spin_lock_init(&spi->statistics.lock);
 513
 514        device_initialize(&spi->dev);
 515        return spi;
 516}
 517EXPORT_SYMBOL_GPL(spi_alloc_device);
 518
 519static void spi_dev_set_name(struct spi_device *spi)
 520{
 521        struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
 522
 523        if (adev) {
 524                dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
 525                return;
 526        }
 527
 528        dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
 529                     spi->chip_select);
 530}
 531
 532static int spi_dev_check(struct device *dev, void *data)
 533{
 534        struct spi_device *spi = to_spi_device(dev);
 535        struct spi_device *new_spi = data;
 536
 537        if (spi->controller == new_spi->controller &&
 538            spi->chip_select == new_spi->chip_select)
 539                return -EBUSY;
 540        return 0;
 541}
 542
 543/**
 544 * spi_add_device - Add spi_device allocated with spi_alloc_device
 545 * @spi: spi_device to register
 546 *
 547 * Companion function to spi_alloc_device.  Devices allocated with
 548 * spi_alloc_device can be added onto the spi bus with this function.
 549 *
 550 * Return: 0 on success; negative errno on failure
 551 */
 552int spi_add_device(struct spi_device *spi)
 553{
 554        static DEFINE_MUTEX(spi_add_lock);
 555        struct spi_controller *ctlr = spi->controller;
 556        struct device *dev = ctlr->dev.parent;
 557        int status;
 558
 559        /* Chipselects are numbered 0..max; validate. */
 560        if (spi->chip_select >= ctlr->num_chipselect) {
 561                dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
 562                        ctlr->num_chipselect);
 563                return -EINVAL;
 564        }
 565
 566        /* Set the bus ID string */
 567        spi_dev_set_name(spi);
 568
 569        /* We need to make sure there's no other device with this
 570         * chipselect **BEFORE** we call setup(), else we'll trash
 571         * its configuration.  Lock against concurrent add() calls.
 572         */
 573        mutex_lock(&spi_add_lock);
 574
 575        status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
 576        if (status) {
 577                dev_err(dev, "chipselect %d already in use\n",
 578                                spi->chip_select);
 579                goto done;
 580        }
 581
 582        /* Descriptors take precedence */
 583        if (ctlr->cs_gpiods)
 584                spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
 585        else if (ctlr->cs_gpios)
 586                spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
 587
 588        /* Drivers may modify this initial i/o setup, but will
 589         * normally rely on the device being setup.  Devices
 590         * using SPI_CS_HIGH can't coexist well otherwise...
 591         */
 592        status = spi_setup(spi);
 593        if (status < 0) {
 594                dev_err(dev, "can't setup %s, status %d\n",
 595                                dev_name(&spi->dev), status);
 596                goto done;
 597        }
 598
 599        /* Device may be bound to an active driver when this returns */
 600        status = device_add(&spi->dev);
 601        if (status < 0)
 602                dev_err(dev, "can't add %s, status %d\n",
 603                                dev_name(&spi->dev), status);
 604        else
 605                dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 606
 607done:
 608        mutex_unlock(&spi_add_lock);
 609        return status;
 610}
 611EXPORT_SYMBOL_GPL(spi_add_device);
 612
 613/**
 614 * spi_new_device - instantiate one new SPI device
 615 * @ctlr: Controller to which device is connected
 616 * @chip: Describes the SPI device
 617 * Context: can sleep
 618 *
 619 * On typical mainboards, this is purely internal; and it's not needed
 620 * after board init creates the hard-wired devices.  Some development
 621 * platforms may not be able to use spi_register_board_info though, and
 622 * this is exported so that for example a USB or parport based adapter
 623 * driver could add devices (which it would learn about out-of-band).
 624 *
 625 * Return: the new device, or NULL.
 626 */
 627struct spi_device *spi_new_device(struct spi_controller *ctlr,
 628                                  struct spi_board_info *chip)
 629{
 630        struct spi_device       *proxy;
 631        int                     status;
 632
 633        /* NOTE:  caller did any chip->bus_num checks necessary.
 634         *
 635         * Also, unless we change the return value convention to use
 636         * error-or-pointer (not NULL-or-pointer), troubleshootability
 637         * suggests syslogged diagnostics are best here (ugh).
 638         */
 639
 640        proxy = spi_alloc_device(ctlr);
 641        if (!proxy)
 642                return NULL;
 643
 644        WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 645
 646        proxy->chip_select = chip->chip_select;
 647        proxy->max_speed_hz = chip->max_speed_hz;
 648        proxy->mode = chip->mode;
 649        proxy->irq = chip->irq;
 650        strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 651        proxy->dev.platform_data = (void *) chip->platform_data;
 652        proxy->controller_data = chip->controller_data;
 653        proxy->controller_state = NULL;
 654
 655        if (chip->properties) {
 656                status = device_add_properties(&proxy->dev, chip->properties);
 657                if (status) {
 658                        dev_err(&ctlr->dev,
 659                                "failed to add properties to '%s': %d\n",
 660                                chip->modalias, status);
 661                        goto err_dev_put;
 662                }
 663        }
 664
 665        status = spi_add_device(proxy);
 666        if (status < 0)
 667                goto err_remove_props;
 668
 669        return proxy;
 670
 671err_remove_props:
 672        if (chip->properties)
 673                device_remove_properties(&proxy->dev);
 674err_dev_put:
 675        spi_dev_put(proxy);
 676        return NULL;
 677}
 678EXPORT_SYMBOL_GPL(spi_new_device);
 679
 680/**
 681 * spi_unregister_device - unregister a single SPI device
 682 * @spi: spi_device to unregister
 683 *
 684 * Start making the passed SPI device vanish. Normally this would be handled
 685 * by spi_unregister_controller().
 686 */
 687void spi_unregister_device(struct spi_device *spi)
 688{
 689        if (!spi)
 690                return;
 691
 692        if (spi->dev.of_node) {
 693                of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
 694                of_node_put(spi->dev.of_node);
 695        }
 696        if (ACPI_COMPANION(&spi->dev))
 697                acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
 698        device_unregister(&spi->dev);
 699}
 700EXPORT_SYMBOL_GPL(spi_unregister_device);
 701
 702static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
 703                                              struct spi_board_info *bi)
 704{
 705        struct spi_device *dev;
 706
 707        if (ctlr->bus_num != bi->bus_num)
 708                return;
 709
 710        dev = spi_new_device(ctlr, bi);
 711        if (!dev)
 712                dev_err(ctlr->dev.parent, "can't create new device for %s\n",
 713                        bi->modalias);
 714}
 715
 716/**
 717 * spi_register_board_info - register SPI devices for a given board
 718 * @info: array of chip descriptors
 719 * @n: how many descriptors are provided
 720 * Context: can sleep
 721 *
 722 * Board-specific early init code calls this (probably during arch_initcall)
 723 * with segments of the SPI device table.  Any device nodes are created later,
 724 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 725 * this table of devices forever, so that reloading a controller driver will
 726 * not make Linux forget about these hard-wired devices.
 727 *
 728 * Other code can also call this, e.g. a particular add-on board might provide
 729 * SPI devices through its expansion connector, so code initializing that board
 730 * would naturally declare its SPI devices.
 731 *
 732 * The board info passed can safely be __initdata ... but be careful of
 733 * any embedded pointers (platform_data, etc), they're copied as-is.
 734 * Device properties are deep-copied though.
 735 *
 736 * Return: zero on success, else a negative error code.
 737 */
 738int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 739{
 740        struct boardinfo *bi;
 741        int i;
 742
 743        if (!n)
 744                return 0;
 745
 746        bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
 747        if (!bi)
 748                return -ENOMEM;
 749
 750        for (i = 0; i < n; i++, bi++, info++) {
 751                struct spi_controller *ctlr;
 752
 753                memcpy(&bi->board_info, info, sizeof(*info));
 754                if (info->properties) {
 755                        bi->board_info.properties =
 756                                        property_entries_dup(info->properties);
 757                        if (IS_ERR(bi->board_info.properties))
 758                                return PTR_ERR(bi->board_info.properties);
 759                }
 760
 761                mutex_lock(&board_lock);
 762                list_add_tail(&bi->list, &board_list);
 763                list_for_each_entry(ctlr, &spi_controller_list, list)
 764                        spi_match_controller_to_boardinfo(ctlr,
 765                                                          &bi->board_info);
 766                mutex_unlock(&board_lock);
 767        }
 768
 769        return 0;
 770}
 771
 772/*-------------------------------------------------------------------------*/
 773
 774static void spi_set_cs(struct spi_device *spi, bool enable)
 775{
 776        if (spi->mode & SPI_CS_HIGH)
 777                enable = !enable;
 778
 779        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
 780                /*
 781                 * Honour the SPI_NO_CS flag and invert the enable line, as
 782                 * active low is default for SPI. Execution paths that handle
 783                 * polarity inversion in gpiolib (such as device tree) will
 784                 * enforce active high using the SPI_CS_HIGH resulting in a
 785                 * double inversion through the code above.
 786                 */
 787                if (!(spi->mode & SPI_NO_CS)) {
 788                        if (spi->cs_gpiod)
 789                                gpiod_set_value_cansleep(spi->cs_gpiod,
 790                                                         !enable);
 791                        else
 792                                gpio_set_value_cansleep(spi->cs_gpio, !enable);
 793                }
 794                /* Some SPI masters need both GPIO CS & slave_select */
 795                if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
 796                    spi->controller->set_cs)
 797                        spi->controller->set_cs(spi, !enable);
 798        } else if (spi->controller->set_cs) {
 799                spi->controller->set_cs(spi, !enable);
 800        }
 801}
 802
 803#ifdef CONFIG_HAS_DMA
 804int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
 805                struct sg_table *sgt, void *buf, size_t len,
 806                enum dma_data_direction dir)
 807{
 808        const bool vmalloced_buf = is_vmalloc_addr(buf);
 809        unsigned int max_seg_size = dma_get_max_seg_size(dev);
 810#ifdef CONFIG_HIGHMEM
 811        const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
 812                                (unsigned long)buf < (PKMAP_BASE +
 813                                        (LAST_PKMAP * PAGE_SIZE)));
 814#else
 815        const bool kmap_buf = false;
 816#endif
 817        int desc_len;
 818        int sgs;
 819        struct page *vm_page;
 820        struct scatterlist *sg;
 821        void *sg_buf;
 822        size_t min;
 823        int i, ret;
 824
 825        if (vmalloced_buf || kmap_buf) {
 826                desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 827                sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 828        } else if (virt_addr_valid(buf)) {
 829                desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
 830                sgs = DIV_ROUND_UP(len, desc_len);
 831        } else {
 832                return -EINVAL;
 833        }
 834
 835        ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 836        if (ret != 0)
 837                return ret;
 838
 839        sg = &sgt->sgl[0];
 840        for (i = 0; i < sgs; i++) {
 841
 842                if (vmalloced_buf || kmap_buf) {
 843                        /*
 844                         * Next scatterlist entry size is the minimum between
 845                         * the desc_len and the remaining buffer length that
 846                         * fits in a page.
 847                         */
 848                        min = min_t(size_t, desc_len,
 849                                    min_t(size_t, len,
 850                                          PAGE_SIZE - offset_in_page(buf)));
 851                        if (vmalloced_buf)
 852                                vm_page = vmalloc_to_page(buf);
 853                        else
 854                                vm_page = kmap_to_page(buf);
 855                        if (!vm_page) {
 856                                sg_free_table(sgt);
 857                                return -ENOMEM;
 858                        }
 859                        sg_set_page(sg, vm_page,
 860                                    min, offset_in_page(buf));
 861                } else {
 862                        min = min_t(size_t, len, desc_len);
 863                        sg_buf = buf;
 864                        sg_set_buf(sg, sg_buf, min);
 865                }
 866
 867                buf += min;
 868                len -= min;
 869                sg = sg_next(sg);
 870        }
 871
 872        ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
 873        if (!ret)
 874                ret = -ENOMEM;
 875        if (ret < 0) {
 876                sg_free_table(sgt);
 877                return ret;
 878        }
 879
 880        sgt->nents = ret;
 881
 882        return 0;
 883}
 884
 885void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
 886                   struct sg_table *sgt, enum dma_data_direction dir)
 887{
 888        if (sgt->orig_nents) {
 889                dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
 890                sg_free_table(sgt);
 891        }
 892}
 893
 894static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 895{
 896        struct device *tx_dev, *rx_dev;
 897        struct spi_transfer *xfer;
 898        int ret;
 899
 900        if (!ctlr->can_dma)
 901                return 0;
 902
 903        if (ctlr->dma_tx)
 904                tx_dev = ctlr->dma_tx->device->dev;
 905        else
 906                tx_dev = ctlr->dev.parent;
 907
 908        if (ctlr->dma_rx)
 909                rx_dev = ctlr->dma_rx->device->dev;
 910        else
 911                rx_dev = ctlr->dev.parent;
 912
 913        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 914                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 915                        continue;
 916
 917                if (xfer->tx_buf != NULL) {
 918                        ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
 919                                          (void *)xfer->tx_buf, xfer->len,
 920                                          DMA_TO_DEVICE);
 921                        if (ret != 0)
 922                                return ret;
 923                }
 924
 925                if (xfer->rx_buf != NULL) {
 926                        ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
 927                                          xfer->rx_buf, xfer->len,
 928                                          DMA_FROM_DEVICE);
 929                        if (ret != 0) {
 930                                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
 931                                              DMA_TO_DEVICE);
 932                                return ret;
 933                        }
 934                }
 935        }
 936
 937        ctlr->cur_msg_mapped = true;
 938
 939        return 0;
 940}
 941
 942static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
 943{
 944        struct spi_transfer *xfer;
 945        struct device *tx_dev, *rx_dev;
 946
 947        if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
 948                return 0;
 949
 950        if (ctlr->dma_tx)
 951                tx_dev = ctlr->dma_tx->device->dev;
 952        else
 953                tx_dev = ctlr->dev.parent;
 954
 955        if (ctlr->dma_rx)
 956                rx_dev = ctlr->dma_rx->device->dev;
 957        else
 958                rx_dev = ctlr->dev.parent;
 959
 960        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 961                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 962                        continue;
 963
 964                spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 965                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 966        }
 967
 968        return 0;
 969}
 970#else /* !CONFIG_HAS_DMA */
 971static inline int __spi_map_msg(struct spi_controller *ctlr,
 972                                struct spi_message *msg)
 973{
 974        return 0;
 975}
 976
 977static inline int __spi_unmap_msg(struct spi_controller *ctlr,
 978                                  struct spi_message *msg)
 979{
 980        return 0;
 981}
 982#endif /* !CONFIG_HAS_DMA */
 983
 984static inline int spi_unmap_msg(struct spi_controller *ctlr,
 985                                struct spi_message *msg)
 986{
 987        struct spi_transfer *xfer;
 988
 989        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 990                /*
 991                 * Restore the original value of tx_buf or rx_buf if they are
 992                 * NULL.
 993                 */
 994                if (xfer->tx_buf == ctlr->dummy_tx)
 995                        xfer->tx_buf = NULL;
 996                if (xfer->rx_buf == ctlr->dummy_rx)
 997                        xfer->rx_buf = NULL;
 998        }
 999
1000        return __spi_unmap_msg(ctlr, msg);
1001}
1002
1003static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1004{
1005        struct spi_transfer *xfer;
1006        void *tmp;
1007        unsigned int max_tx, max_rx;
1008
1009        if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
1010                max_tx = 0;
1011                max_rx = 0;
1012
1013                list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1014                        if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1015                            !xfer->tx_buf)
1016                                max_tx = max(xfer->len, max_tx);
1017                        if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1018                            !xfer->rx_buf)
1019                                max_rx = max(xfer->len, max_rx);
1020                }
1021
1022                if (max_tx) {
1023                        tmp = krealloc(ctlr->dummy_tx, max_tx,
1024                                       GFP_KERNEL | GFP_DMA);
1025                        if (!tmp)
1026                                return -ENOMEM;
1027                        ctlr->dummy_tx = tmp;
1028                        memset(tmp, 0, max_tx);
1029                }
1030
1031                if (max_rx) {
1032                        tmp = krealloc(ctlr->dummy_rx, max_rx,
1033                                       GFP_KERNEL | GFP_DMA);
1034                        if (!tmp)
1035                                return -ENOMEM;
1036                        ctlr->dummy_rx = tmp;
1037                }
1038
1039                if (max_tx || max_rx) {
1040                        list_for_each_entry(xfer, &msg->transfers,
1041                                            transfer_list) {
1042                                if (!xfer->tx_buf)
1043                                        xfer->tx_buf = ctlr->dummy_tx;
1044                                if (!xfer->rx_buf)
1045                                        xfer->rx_buf = ctlr->dummy_rx;
1046                        }
1047                }
1048        }
1049
1050        return __spi_map_msg(ctlr, msg);
1051}
1052
1053static int spi_transfer_wait(struct spi_controller *ctlr,
1054                             struct spi_message *msg,
1055                             struct spi_transfer *xfer)
1056{
1057        struct spi_statistics *statm = &ctlr->statistics;
1058        struct spi_statistics *stats = &msg->spi->statistics;
1059        unsigned long long ms = 1;
1060
1061        if (spi_controller_is_slave(ctlr)) {
1062                if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1063                        dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1064                        return -EINTR;
1065                }
1066        } else {
1067                ms = 8LL * 1000LL * xfer->len;
1068                do_div(ms, xfer->speed_hz);
1069                ms += ms + 200; /* some tolerance */
1070
1071                if (ms > UINT_MAX)
1072                        ms = UINT_MAX;
1073
1074                ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1075                                                 msecs_to_jiffies(ms));
1076
1077                if (ms == 0) {
1078                        SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1079                        SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1080                        dev_err(&msg->spi->dev,
1081                                "SPI transfer timed out\n");
1082                        return -ETIMEDOUT;
1083                }
1084        }
1085
1086        return 0;
1087}
1088
1089/*
1090 * spi_transfer_one_message - Default implementation of transfer_one_message()
1091 *
1092 * This is a standard implementation of transfer_one_message() for
1093 * drivers which implement a transfer_one() operation.  It provides
1094 * standard handling of delays and chip select management.
1095 */
1096static int spi_transfer_one_message(struct spi_controller *ctlr,
1097                                    struct spi_message *msg)
1098{
1099        struct spi_transfer *xfer;
1100        bool keep_cs = false;
1101        int ret = 0;
1102        struct spi_statistics *statm = &ctlr->statistics;
1103        struct spi_statistics *stats = &msg->spi->statistics;
1104
1105        spi_set_cs(msg->spi, true);
1106
1107        SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1108        SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1109
1110        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1111                trace_spi_transfer_start(msg, xfer);
1112
1113                spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1114                spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1115
1116                if (xfer->tx_buf || xfer->rx_buf) {
1117                        reinit_completion(&ctlr->xfer_completion);
1118
1119                        ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1120                        if (ret < 0) {
1121                                SPI_STATISTICS_INCREMENT_FIELD(statm,
1122                                                               errors);
1123                                SPI_STATISTICS_INCREMENT_FIELD(stats,
1124                                                               errors);
1125                                dev_err(&msg->spi->dev,
1126                                        "SPI transfer failed: %d\n", ret);
1127                                goto out;
1128                        }
1129
1130                        if (ret > 0) {
1131                                ret = spi_transfer_wait(ctlr, msg, xfer);
1132                                if (ret < 0)
1133                                        msg->status = ret;
1134                        }
1135                } else {
1136                        if (xfer->len)
1137                                dev_err(&msg->spi->dev,
1138                                        "Bufferless transfer has length %u\n",
1139                                        xfer->len);
1140                }
1141
1142                trace_spi_transfer_stop(msg, xfer);
1143
1144                if (msg->status != -EINPROGRESS)
1145                        goto out;
1146
1147                if (xfer->delay_usecs) {
1148                        u16 us = xfer->delay_usecs;
1149
1150                        if (us <= 10)
1151                                udelay(us);
1152                        else
1153                                usleep_range(us, us + DIV_ROUND_UP(us, 10));
1154                }
1155
1156                if (xfer->cs_change) {
1157                        if (list_is_last(&xfer->transfer_list,
1158                                         &msg->transfers)) {
1159                                keep_cs = true;
1160                        } else {
1161                                spi_set_cs(msg->spi, false);
1162                                udelay(10);
1163                                spi_set_cs(msg->spi, true);
1164                        }
1165                }
1166
1167                msg->actual_length += xfer->len;
1168        }
1169
1170out:
1171        if (ret != 0 || !keep_cs)
1172                spi_set_cs(msg->spi, false);
1173
1174        if (msg->status == -EINPROGRESS)
1175                msg->status = ret;
1176
1177        if (msg->status && ctlr->handle_err)
1178                ctlr->handle_err(ctlr, msg);
1179
1180        spi_res_release(ctlr, msg);
1181
1182        spi_finalize_current_message(ctlr);
1183
1184        return ret;
1185}
1186
1187/**
1188 * spi_finalize_current_transfer - report completion of a transfer
1189 * @ctlr: the controller reporting completion
1190 *
1191 * Called by SPI drivers using the core transfer_one_message()
1192 * implementation to notify it that the current interrupt driven
1193 * transfer has finished and the next one may be scheduled.
1194 */
1195void spi_finalize_current_transfer(struct spi_controller *ctlr)
1196{
1197        complete(&ctlr->xfer_completion);
1198}
1199EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1200
1201/**
1202 * __spi_pump_messages - function which processes spi message queue
1203 * @ctlr: controller to process queue for
1204 * @in_kthread: true if we are in the context of the message pump thread
1205 *
1206 * This function checks if there is any spi message in the queue that
1207 * needs processing and if so call out to the driver to initialize hardware
1208 * and transfer each message.
1209 *
1210 * Note that it is called both from the kthread itself and also from
1211 * inside spi_sync(); the queue extraction handling at the top of the
1212 * function should deal with this safely.
1213 */
1214static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1215{
1216        unsigned long flags;
1217        bool was_busy = false;
1218        int ret;
1219
1220        /* Lock queue */
1221        spin_lock_irqsave(&ctlr->queue_lock, flags);
1222
1223        /* Make sure we are not already running a message */
1224        if (ctlr->cur_msg) {
1225                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1226                return;
1227        }
1228
1229        /* If another context is idling the device then defer */
1230        if (ctlr->idling) {
1231                kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1232                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1233                return;
1234        }
1235
1236        /* Check if the queue is idle */
1237        if (list_empty(&ctlr->queue) || !ctlr->running) {
1238                if (!ctlr->busy) {
1239                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1240                        return;
1241                }
1242
1243                /* Only do teardown in the thread */
1244                if (!in_kthread) {
1245                        kthread_queue_work(&ctlr->kworker,
1246                                           &ctlr->pump_messages);
1247                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1248                        return;
1249                }
1250
1251                ctlr->busy = false;
1252                ctlr->idling = true;
1253                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1254
1255                kfree(ctlr->dummy_rx);
1256                ctlr->dummy_rx = NULL;
1257                kfree(ctlr->dummy_tx);
1258                ctlr->dummy_tx = NULL;
1259                if (ctlr->unprepare_transfer_hardware &&
1260                    ctlr->unprepare_transfer_hardware(ctlr))
1261                        dev_err(&ctlr->dev,
1262                                "failed to unprepare transfer hardware\n");
1263                if (ctlr->auto_runtime_pm) {
1264                        pm_runtime_mark_last_busy(ctlr->dev.parent);
1265                        pm_runtime_put_autosuspend(ctlr->dev.parent);
1266                }
1267                trace_spi_controller_idle(ctlr);
1268
1269                spin_lock_irqsave(&ctlr->queue_lock, flags);
1270                ctlr->idling = false;
1271                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1272                return;
1273        }
1274
1275        /* Extract head of queue */
1276        ctlr->cur_msg =
1277                list_first_entry(&ctlr->queue, struct spi_message, queue);
1278
1279        list_del_init(&ctlr->cur_msg->queue);
1280        if (ctlr->busy)
1281                was_busy = true;
1282        else
1283                ctlr->busy = true;
1284        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1285
1286        mutex_lock(&ctlr->io_mutex);
1287
1288        if (!was_busy && ctlr->auto_runtime_pm) {
1289                ret = pm_runtime_get_sync(ctlr->dev.parent);
1290                if (ret < 0) {
1291                        pm_runtime_put_noidle(ctlr->dev.parent);
1292                        dev_err(&ctlr->dev, "Failed to power device: %d\n",
1293                                ret);
1294                        mutex_unlock(&ctlr->io_mutex);
1295                        return;
1296                }
1297        }
1298
1299        if (!was_busy)
1300                trace_spi_controller_busy(ctlr);
1301
1302        if (!was_busy && ctlr->prepare_transfer_hardware) {
1303                ret = ctlr->prepare_transfer_hardware(ctlr);
1304                if (ret) {
1305                        dev_err(&ctlr->dev,
1306                                "failed to prepare transfer hardware\n");
1307
1308                        if (ctlr->auto_runtime_pm)
1309                                pm_runtime_put(ctlr->dev.parent);
1310                        mutex_unlock(&ctlr->io_mutex);
1311                        return;
1312                }
1313        }
1314
1315        trace_spi_message_start(ctlr->cur_msg);
1316
1317        if (ctlr->prepare_message) {
1318                ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1319                if (ret) {
1320                        dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1321                                ret);
1322                        ctlr->cur_msg->status = ret;
1323                        spi_finalize_current_message(ctlr);
1324                        goto out;
1325                }
1326                ctlr->cur_msg_prepared = true;
1327        }
1328
1329        ret = spi_map_msg(ctlr, ctlr->cur_msg);
1330        if (ret) {
1331                ctlr->cur_msg->status = ret;
1332                spi_finalize_current_message(ctlr);
1333                goto out;
1334        }
1335
1336        ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1337        if (ret) {
1338                dev_err(&ctlr->dev,
1339                        "failed to transfer one message from queue\n");
1340                goto out;
1341        }
1342
1343out:
1344        mutex_unlock(&ctlr->io_mutex);
1345
1346        /* Prod the scheduler in case transfer_one() was busy waiting */
1347        if (!ret)
1348                cond_resched();
1349}
1350
1351/**
1352 * spi_pump_messages - kthread work function which processes spi message queue
1353 * @work: pointer to kthread work struct contained in the controller struct
1354 */
1355static void spi_pump_messages(struct kthread_work *work)
1356{
1357        struct spi_controller *ctlr =
1358                container_of(work, struct spi_controller, pump_messages);
1359
1360        __spi_pump_messages(ctlr, true);
1361}
1362
1363static int spi_init_queue(struct spi_controller *ctlr)
1364{
1365        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1366
1367        ctlr->running = false;
1368        ctlr->busy = false;
1369
1370        kthread_init_worker(&ctlr->kworker);
1371        ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1372                                         "%s", dev_name(&ctlr->dev));
1373        if (IS_ERR(ctlr->kworker_task)) {
1374                dev_err(&ctlr->dev, "failed to create message pump task\n");
1375                return PTR_ERR(ctlr->kworker_task);
1376        }
1377        kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1378
1379        /*
1380         * Controller config will indicate if this controller should run the
1381         * message pump with high (realtime) priority to reduce the transfer
1382         * latency on the bus by minimising the delay between a transfer
1383         * request and the scheduling of the message pump thread. Without this
1384         * setting the message pump thread will remain at default priority.
1385         */
1386        if (ctlr->rt) {
1387                dev_info(&ctlr->dev,
1388                        "will run message pump with realtime priority\n");
1389                sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
1390        }
1391
1392        return 0;
1393}
1394
1395/**
1396 * spi_get_next_queued_message() - called by driver to check for queued
1397 * messages
1398 * @ctlr: the controller to check for queued messages
1399 *
1400 * If there are more messages in the queue, the next message is returned from
1401 * this call.
1402 *
1403 * Return: the next message in the queue, else NULL if the queue is empty.
1404 */
1405struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1406{
1407        struct spi_message *next;
1408        unsigned long flags;
1409
1410        /* get a pointer to the next message, if any */
1411        spin_lock_irqsave(&ctlr->queue_lock, flags);
1412        next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1413                                        queue);
1414        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1415
1416        return next;
1417}
1418EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1419
1420/**
1421 * spi_finalize_current_message() - the current message is complete
1422 * @ctlr: the controller to return the message to
1423 *
1424 * Called by the driver to notify the core that the message in the front of the
1425 * queue is complete and can be removed from the queue.
1426 */
1427void spi_finalize_current_message(struct spi_controller *ctlr)
1428{
1429        struct spi_message *mesg;
1430        unsigned long flags;
1431        int ret;
1432
1433        spin_lock_irqsave(&ctlr->queue_lock, flags);
1434        mesg = ctlr->cur_msg;
1435        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1436
1437        spi_unmap_msg(ctlr, mesg);
1438
1439        if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1440                ret = ctlr->unprepare_message(ctlr, mesg);
1441                if (ret) {
1442                        dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1443                                ret);
1444                }
1445        }
1446
1447        spin_lock_irqsave(&ctlr->queue_lock, flags);
1448        ctlr->cur_msg = NULL;
1449        ctlr->cur_msg_prepared = false;
1450        kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1451        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1452
1453        trace_spi_message_done(mesg);
1454
1455        mesg->state = NULL;
1456        if (mesg->complete)
1457                mesg->complete(mesg->context);
1458}
1459EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1460
1461static int spi_start_queue(struct spi_controller *ctlr)
1462{
1463        unsigned long flags;
1464
1465        spin_lock_irqsave(&ctlr->queue_lock, flags);
1466
1467        if (ctlr->running || ctlr->busy) {
1468                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1469                return -EBUSY;
1470        }
1471
1472        ctlr->running = true;
1473        ctlr->cur_msg = NULL;
1474        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1475
1476        kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1477
1478        return 0;
1479}
1480
1481static int spi_stop_queue(struct spi_controller *ctlr)
1482{
1483        unsigned long flags;
1484        unsigned limit = 500;
1485        int ret = 0;
1486
1487        spin_lock_irqsave(&ctlr->queue_lock, flags);
1488
1489        /*
1490         * This is a bit lame, but is optimized for the common execution path.
1491         * A wait_queue on the ctlr->busy could be used, but then the common
1492         * execution path (pump_messages) would be required to call wake_up or
1493         * friends on every SPI message. Do this instead.
1494         */
1495        while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1496                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1497                usleep_range(10000, 11000);
1498                spin_lock_irqsave(&ctlr->queue_lock, flags);
1499        }
1500
1501        if (!list_empty(&ctlr->queue) || ctlr->busy)
1502                ret = -EBUSY;
1503        else
1504                ctlr->running = false;
1505
1506        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1507
1508        if (ret) {
1509                dev_warn(&ctlr->dev, "could not stop message queue\n");
1510                return ret;
1511        }
1512        return ret;
1513}
1514
1515static int spi_destroy_queue(struct spi_controller *ctlr)
1516{
1517        int ret;
1518
1519        ret = spi_stop_queue(ctlr);
1520
1521        /*
1522         * kthread_flush_worker will block until all work is done.
1523         * If the reason that stop_queue timed out is that the work will never
1524         * finish, then it does no good to call flush/stop thread, so
1525         * return anyway.
1526         */
1527        if (ret) {
1528                dev_err(&ctlr->dev, "problem destroying queue\n");
1529                return ret;
1530        }
1531
1532        kthread_flush_worker(&ctlr->kworker);
1533        kthread_stop(ctlr->kworker_task);
1534
1535        return 0;
1536}
1537
1538static int __spi_queued_transfer(struct spi_device *spi,
1539                                 struct spi_message *msg,
1540                                 bool need_pump)
1541{
1542        struct spi_controller *ctlr = spi->controller;
1543        unsigned long flags;
1544
1545        spin_lock_irqsave(&ctlr->queue_lock, flags);
1546
1547        if (!ctlr->running) {
1548                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1549                return -ESHUTDOWN;
1550        }
1551        msg->actual_length = 0;
1552        msg->status = -EINPROGRESS;
1553
1554        list_add_tail(&msg->queue, &ctlr->queue);
1555        if (!ctlr->busy && need_pump)
1556                kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1557
1558        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1559        return 0;
1560}
1561
1562/**
1563 * spi_queued_transfer - transfer function for queued transfers
1564 * @spi: spi device which is requesting transfer
1565 * @msg: spi message which is to handled is queued to driver queue
1566 *
1567 * Return: zero on success, else a negative error code.
1568 */
1569static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1570{
1571        return __spi_queued_transfer(spi, msg, true);
1572}
1573
1574static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1575{
1576        int ret;
1577
1578        ctlr->transfer = spi_queued_transfer;
1579        if (!ctlr->transfer_one_message)
1580                ctlr->transfer_one_message = spi_transfer_one_message;
1581
1582        /* Initialize and start queue */
1583        ret = spi_init_queue(ctlr);
1584        if (ret) {
1585                dev_err(&ctlr->dev, "problem initializing queue\n");
1586                goto err_init_queue;
1587        }
1588        ctlr->queued = true;
1589        ret = spi_start_queue(ctlr);
1590        if (ret) {
1591                dev_err(&ctlr->dev, "problem starting queue\n");
1592                goto err_start_queue;
1593        }
1594
1595        return 0;
1596
1597err_start_queue:
1598        spi_destroy_queue(ctlr);
1599err_init_queue:
1600        return ret;
1601}
1602
1603/**
1604 * spi_flush_queue - Send all pending messages in the queue from the callers'
1605 *                   context
1606 * @ctlr: controller to process queue for
1607 *
1608 * This should be used when one wants to ensure all pending messages have been
1609 * sent before doing something. Is used by the spi-mem code to make sure SPI
1610 * memory operations do not preempt regular SPI transfers that have been queued
1611 * before the spi-mem operation.
1612 */
1613void spi_flush_queue(struct spi_controller *ctlr)
1614{
1615        if (ctlr->transfer == spi_queued_transfer)
1616                __spi_pump_messages(ctlr, false);
1617}
1618
1619/*-------------------------------------------------------------------------*/
1620
1621#if defined(CONFIG_OF)
1622static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1623                           struct device_node *nc)
1624{
1625        u32 value;
1626        int rc;
1627
1628        /* Mode (clock phase/polarity/etc.) */
1629        if (of_property_read_bool(nc, "spi-cpha"))
1630                spi->mode |= SPI_CPHA;
1631        if (of_property_read_bool(nc, "spi-cpol"))
1632                spi->mode |= SPI_CPOL;
1633        if (of_property_read_bool(nc, "spi-3wire"))
1634                spi->mode |= SPI_3WIRE;
1635        if (of_property_read_bool(nc, "spi-lsb-first"))
1636                spi->mode |= SPI_LSB_FIRST;
1637
1638        /*
1639         * For descriptors associated with the device, polarity inversion is
1640         * handled in the gpiolib, so all chip selects are "active high" in
1641         * the logical sense, the gpiolib will invert the line if need be.
1642         */
1643        if (ctlr->use_gpio_descriptors)
1644                spi->mode |= SPI_CS_HIGH;
1645        else if (of_property_read_bool(nc, "spi-cs-high"))
1646                spi->mode |= SPI_CS_HIGH;
1647
1648        /* Device DUAL/QUAD mode */
1649        if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1650                switch (value) {
1651                case 1:
1652                        break;
1653                case 2:
1654                        spi->mode |= SPI_TX_DUAL;
1655                        break;
1656                case 4:
1657                        spi->mode |= SPI_TX_QUAD;
1658                        break;
1659                case 8:
1660                        spi->mode |= SPI_TX_OCTAL;
1661                        break;
1662                default:
1663                        dev_warn(&ctlr->dev,
1664                                "spi-tx-bus-width %d not supported\n",
1665                                value);
1666                        break;
1667                }
1668        }
1669
1670        if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1671                switch (value) {
1672                case 1:
1673                        break;
1674                case 2:
1675                        spi->mode |= SPI_RX_DUAL;
1676                        break;
1677                case 4:
1678                        spi->mode |= SPI_RX_QUAD;
1679                        break;
1680                case 8:
1681                        spi->mode |= SPI_RX_OCTAL;
1682                        break;
1683                default:
1684                        dev_warn(&ctlr->dev,
1685                                "spi-rx-bus-width %d not supported\n",
1686                                value);
1687                        break;
1688                }
1689        }
1690
1691        if (spi_controller_is_slave(ctlr)) {
1692                if (!of_node_name_eq(nc, "slave")) {
1693                        dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1694                                nc);
1695                        return -EINVAL;
1696                }
1697                return 0;
1698        }
1699
1700        /* Device address */
1701        rc = of_property_read_u32(nc, "reg", &value);
1702        if (rc) {
1703                dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1704                        nc, rc);
1705                return rc;
1706        }
1707        spi->chip_select = value;
1708
1709        /* Device speed */
1710        rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1711        if (rc) {
1712                dev_err(&ctlr->dev,
1713                        "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1714                return rc;
1715        }
1716        spi->max_speed_hz = value;
1717
1718        return 0;
1719}
1720
1721static struct spi_device *
1722of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1723{
1724        struct spi_device *spi;
1725        int rc;
1726
1727        /* Alloc an spi_device */
1728        spi = spi_alloc_device(ctlr);
1729        if (!spi) {
1730                dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1731                rc = -ENOMEM;
1732                goto err_out;
1733        }
1734
1735        /* Select device driver */
1736        rc = of_modalias_node(nc, spi->modalias,
1737                                sizeof(spi->modalias));
1738        if (rc < 0) {
1739                dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1740                goto err_out;
1741        }
1742
1743        rc = of_spi_parse_dt(ctlr, spi, nc);
1744        if (rc)
1745                goto err_out;
1746
1747        /* Store a pointer to the node in the device structure */
1748        of_node_get(nc);
1749        spi->dev.of_node = nc;
1750
1751        /* Register the new device */
1752        rc = spi_add_device(spi);
1753        if (rc) {
1754                dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1755                goto err_of_node_put;
1756        }
1757
1758        return spi;
1759
1760err_of_node_put:
1761        of_node_put(nc);
1762err_out:
1763        spi_dev_put(spi);
1764        return ERR_PTR(rc);
1765}
1766
1767/**
1768 * of_register_spi_devices() - Register child devices onto the SPI bus
1769 * @ctlr:       Pointer to spi_controller device
1770 *
1771 * Registers an spi_device for each child node of controller node which
1772 * represents a valid SPI slave.
1773 */
1774static void of_register_spi_devices(struct spi_controller *ctlr)
1775{
1776        struct spi_device *spi;
1777        struct device_node *nc;
1778
1779        if (!ctlr->dev.of_node)
1780                return;
1781
1782        for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1783                if (of_node_test_and_set_flag(nc, OF_POPULATED))
1784                        continue;
1785                spi = of_register_spi_device(ctlr, nc);
1786                if (IS_ERR(spi)) {
1787                        dev_warn(&ctlr->dev,
1788                                 "Failed to create SPI device for %pOF\n", nc);
1789                        of_node_clear_flag(nc, OF_POPULATED);
1790                }
1791        }
1792}
1793#else
1794static void of_register_spi_devices(struct spi_controller *ctlr) { }
1795#endif
1796
1797#ifdef CONFIG_ACPI
1798static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1799{
1800        struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1801        const union acpi_object *obj;
1802
1803        if (!x86_apple_machine)
1804                return;
1805
1806        if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1807            && obj->buffer.length >= 4)
1808                spi->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1809
1810        if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1811            && obj->buffer.length == 8)
1812                spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1813
1814        if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1815            && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1816                spi->mode |= SPI_LSB_FIRST;
1817
1818        if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1819            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1820                spi->mode |= SPI_CPOL;
1821
1822        if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1823            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1824                spi->mode |= SPI_CPHA;
1825}
1826
1827static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1828{
1829        struct spi_device *spi = data;
1830        struct spi_controller *ctlr = spi->controller;
1831
1832        if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1833                struct acpi_resource_spi_serialbus *sb;
1834
1835                sb = &ares->data.spi_serial_bus;
1836                if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1837                        /*
1838                         * ACPI DeviceSelection numbering is handled by the
1839                         * host controller driver in Windows and can vary
1840                         * from driver to driver. In Linux we always expect
1841                         * 0 .. max - 1 so we need to ask the driver to
1842                         * translate between the two schemes.
1843                         */
1844                        if (ctlr->fw_translate_cs) {
1845                                int cs = ctlr->fw_translate_cs(ctlr,
1846                                                sb->device_selection);
1847                                if (cs < 0)
1848                                        return cs;
1849                                spi->chip_select = cs;
1850                        } else {
1851                                spi->chip_select = sb->device_selection;
1852                        }
1853
1854                        spi->max_speed_hz = sb->connection_speed;
1855
1856                        if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1857                                spi->mode |= SPI_CPHA;
1858                        if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1859                                spi->mode |= SPI_CPOL;
1860                        if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1861                                spi->mode |= SPI_CS_HIGH;
1862                }
1863        } else if (spi->irq < 0) {
1864                struct resource r;
1865
1866                if (acpi_dev_resource_interrupt(ares, 0, &r))
1867                        spi->irq = r.start;
1868        }
1869
1870        /* Always tell the ACPI core to skip this resource */
1871        return 1;
1872}
1873
1874static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1875                                            struct acpi_device *adev)
1876{
1877        struct list_head resource_list;
1878        struct spi_device *spi;
1879        int ret;
1880
1881        if (acpi_bus_get_status(adev) || !adev->status.present ||
1882            acpi_device_enumerated(adev))
1883                return AE_OK;
1884
1885        spi = spi_alloc_device(ctlr);
1886        if (!spi) {
1887                dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1888                        dev_name(&adev->dev));
1889                return AE_NO_MEMORY;
1890        }
1891
1892        ACPI_COMPANION_SET(&spi->dev, adev);
1893        spi->irq = -1;
1894
1895        INIT_LIST_HEAD(&resource_list);
1896        ret = acpi_dev_get_resources(adev, &resource_list,
1897                                     acpi_spi_add_resource, spi);
1898        acpi_dev_free_resource_list(&resource_list);
1899
1900        acpi_spi_parse_apple_properties(spi);
1901
1902        if (ret < 0 || !spi->max_speed_hz) {
1903                spi_dev_put(spi);
1904                return AE_OK;
1905        }
1906
1907        acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1908                          sizeof(spi->modalias));
1909
1910        if (spi->irq < 0)
1911                spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1912
1913        acpi_device_set_enumerated(adev);
1914
1915        adev->power.flags.ignore_parent = true;
1916        if (spi_add_device(spi)) {
1917                adev->power.flags.ignore_parent = false;
1918                dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1919                        dev_name(&adev->dev));
1920                spi_dev_put(spi);
1921        }
1922
1923        return AE_OK;
1924}
1925
1926static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1927                                       void *data, void **return_value)
1928{
1929        struct spi_controller *ctlr = data;
1930        struct acpi_device *adev;
1931
1932        if (acpi_bus_get_device(handle, &adev))
1933                return AE_OK;
1934
1935        return acpi_register_spi_device(ctlr, adev);
1936}
1937
1938static void acpi_register_spi_devices(struct spi_controller *ctlr)
1939{
1940        acpi_status status;
1941        acpi_handle handle;
1942
1943        handle = ACPI_HANDLE(ctlr->dev.parent);
1944        if (!handle)
1945                return;
1946
1947        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1948                                     acpi_spi_add_device, NULL, ctlr, NULL);
1949        if (ACPI_FAILURE(status))
1950                dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1951}
1952#else
1953static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1954#endif /* CONFIG_ACPI */
1955
1956static void spi_controller_release(struct device *dev)
1957{
1958        struct spi_controller *ctlr;
1959
1960        ctlr = container_of(dev, struct spi_controller, dev);
1961        kfree(ctlr);
1962}
1963
1964static struct class spi_master_class = {
1965        .name           = "spi_master",
1966        .owner          = THIS_MODULE,
1967        .dev_release    = spi_controller_release,
1968        .dev_groups     = spi_master_groups,
1969};
1970
1971#ifdef CONFIG_SPI_SLAVE
1972/**
1973 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1974 *                   controller
1975 * @spi: device used for the current transfer
1976 */
1977int spi_slave_abort(struct spi_device *spi)
1978{
1979        struct spi_controller *ctlr = spi->controller;
1980
1981        if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1982                return ctlr->slave_abort(ctlr);
1983
1984        return -ENOTSUPP;
1985}
1986EXPORT_SYMBOL_GPL(spi_slave_abort);
1987
1988static int match_true(struct device *dev, void *data)
1989{
1990        return 1;
1991}
1992
1993static ssize_t spi_slave_show(struct device *dev,
1994                              struct device_attribute *attr, char *buf)
1995{
1996        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1997                                                   dev);
1998        struct device *child;
1999
2000        child = device_find_child(&ctlr->dev, NULL, match_true);
2001        return sprintf(buf, "%s\n",
2002                       child ? to_spi_device(child)->modalias : NULL);
2003}
2004
2005static ssize_t spi_slave_store(struct device *dev,
2006                               struct device_attribute *attr, const char *buf,
2007                               size_t count)
2008{
2009        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2010                                                   dev);
2011        struct spi_device *spi;
2012        struct device *child;
2013        char name[32];
2014        int rc;
2015
2016        rc = sscanf(buf, "%31s", name);
2017        if (rc != 1 || !name[0])
2018                return -EINVAL;
2019
2020        child = device_find_child(&ctlr->dev, NULL, match_true);
2021        if (child) {
2022                /* Remove registered slave */
2023                device_unregister(child);
2024                put_device(child);
2025        }
2026
2027        if (strcmp(name, "(null)")) {
2028                /* Register new slave */
2029                spi = spi_alloc_device(ctlr);
2030                if (!spi)
2031                        return -ENOMEM;
2032
2033                strlcpy(spi->modalias, name, sizeof(spi->modalias));
2034
2035                rc = spi_add_device(spi);
2036                if (rc) {
2037                        spi_dev_put(spi);
2038                        return rc;
2039                }
2040        }
2041
2042        return count;
2043}
2044
2045static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
2046
2047static struct attribute *spi_slave_attrs[] = {
2048        &dev_attr_slave.attr,
2049        NULL,
2050};
2051
2052static const struct attribute_group spi_slave_group = {
2053        .attrs = spi_slave_attrs,
2054};
2055
2056static const struct attribute_group *spi_slave_groups[] = {
2057        &spi_controller_statistics_group,
2058        &spi_slave_group,
2059        NULL,
2060};
2061
2062static struct class spi_slave_class = {
2063        .name           = "spi_slave",
2064        .owner          = THIS_MODULE,
2065        .dev_release    = spi_controller_release,
2066        .dev_groups     = spi_slave_groups,
2067};
2068#else
2069extern struct class spi_slave_class;    /* dummy */
2070#endif
2071
2072/**
2073 * __spi_alloc_controller - allocate an SPI master or slave controller
2074 * @dev: the controller, possibly using the platform_bus
2075 * @size: how much zeroed driver-private data to allocate; the pointer to this
2076 *      memory is in the driver_data field of the returned device,
2077 *      accessible with spi_controller_get_devdata().
2078 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2079 *      slave (true) controller
2080 * Context: can sleep
2081 *
2082 * This call is used only by SPI controller drivers, which are the
2083 * only ones directly touching chip registers.  It's how they allocate
2084 * an spi_controller structure, prior to calling spi_register_controller().
2085 *
2086 * This must be called from context that can sleep.
2087 *
2088 * The caller is responsible for assigning the bus number and initializing the
2089 * controller's methods before calling spi_register_controller(); and (after
2090 * errors adding the device) calling spi_controller_put() to prevent a memory
2091 * leak.
2092 *
2093 * Return: the SPI controller structure on success, else NULL.
2094 */
2095struct spi_controller *__spi_alloc_controller(struct device *dev,
2096                                              unsigned int size, bool slave)
2097{
2098        struct spi_controller   *ctlr;
2099
2100        if (!dev)
2101                return NULL;
2102
2103        ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2104        if (!ctlr)
2105                return NULL;
2106
2107        device_initialize(&ctlr->dev);
2108        ctlr->bus_num = -1;
2109        ctlr->num_chipselect = 1;
2110        ctlr->slave = slave;
2111        if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2112                ctlr->dev.class = &spi_slave_class;
2113        else
2114                ctlr->dev.class = &spi_master_class;
2115        ctlr->dev.parent = dev;
2116        pm_suspend_ignore_children(&ctlr->dev, true);
2117        spi_controller_set_devdata(ctlr, &ctlr[1]);
2118
2119        return ctlr;
2120}
2121EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2122
2123#ifdef CONFIG_OF
2124static int of_spi_register_master(struct spi_controller *ctlr)
2125{
2126        int nb, i, *cs;
2127        struct device_node *np = ctlr->dev.of_node;
2128
2129        if (!np)
2130                return 0;
2131
2132        nb = of_gpio_named_count(np, "cs-gpios");
2133        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2134
2135        /* Return error only for an incorrectly formed cs-gpios property */
2136        if (nb == 0 || nb == -ENOENT)
2137                return 0;
2138        else if (nb < 0)
2139                return nb;
2140
2141        cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2142                          GFP_KERNEL);
2143        ctlr->cs_gpios = cs;
2144
2145        if (!ctlr->cs_gpios)
2146                return -ENOMEM;
2147
2148        for (i = 0; i < ctlr->num_chipselect; i++)
2149                cs[i] = -ENOENT;
2150
2151        for (i = 0; i < nb; i++)
2152                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2153
2154        return 0;
2155}
2156#else
2157static int of_spi_register_master(struct spi_controller *ctlr)
2158{
2159        return 0;
2160}
2161#endif
2162
2163/**
2164 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2165 * @ctlr: The SPI master to grab GPIO descriptors for
2166 */
2167static int spi_get_gpio_descs(struct spi_controller *ctlr)
2168{
2169        int nb, i;
2170        struct gpio_desc **cs;
2171        struct device *dev = &ctlr->dev;
2172
2173        nb = gpiod_count(dev, "cs");
2174        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2175
2176        /* No GPIOs at all is fine, else return the error */
2177        if (nb == 0 || nb == -ENOENT)
2178                return 0;
2179        else if (nb < 0)
2180                return nb;
2181
2182        cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2183                          GFP_KERNEL);
2184        if (!cs)
2185                return -ENOMEM;
2186        ctlr->cs_gpiods = cs;
2187
2188        for (i = 0; i < nb; i++) {
2189                /*
2190                 * Most chipselects are active low, the inverted
2191                 * semantics are handled by special quirks in gpiolib,
2192                 * so initializing them GPIOD_OUT_LOW here means
2193                 * "unasserted", in most cases this will drive the physical
2194                 * line high.
2195                 */
2196                cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2197                                                      GPIOD_OUT_LOW);
2198
2199                if (cs[i]) {
2200                        /*
2201                         * If we find a CS GPIO, name it after the device and
2202                         * chip select line.
2203                         */
2204                        char *gpioname;
2205
2206                        gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2207                                                  dev_name(dev), i);
2208                        if (!gpioname)
2209                                return -ENOMEM;
2210                        gpiod_set_consumer_name(cs[i], gpioname);
2211                }
2212        }
2213
2214        return 0;
2215}
2216
2217static int spi_controller_check_ops(struct spi_controller *ctlr)
2218{
2219        /*
2220         * The controller may implement only the high-level SPI-memory like
2221         * operations if it does not support regular SPI transfers, and this is
2222         * valid use case.
2223         * If ->mem_ops is NULL, we request that at least one of the
2224         * ->transfer_xxx() method be implemented.
2225         */
2226        if (ctlr->mem_ops) {
2227                if (!ctlr->mem_ops->exec_op)
2228                        return -EINVAL;
2229        } else if (!ctlr->transfer && !ctlr->transfer_one &&
2230                   !ctlr->transfer_one_message) {
2231                return -EINVAL;
2232        }
2233
2234        return 0;
2235}
2236
2237/**
2238 * spi_register_controller - register SPI master or slave controller
2239 * @ctlr: initialized master, originally from spi_alloc_master() or
2240 *      spi_alloc_slave()
2241 * Context: can sleep
2242 *
2243 * SPI controllers connect to their drivers using some non-SPI bus,
2244 * such as the platform bus.  The final stage of probe() in that code
2245 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2246 *
2247 * SPI controllers use board specific (often SOC specific) bus numbers,
2248 * and board-specific addressing for SPI devices combines those numbers
2249 * with chip select numbers.  Since SPI does not directly support dynamic
2250 * device identification, boards need configuration tables telling which
2251 * chip is at which address.
2252 *
2253 * This must be called from context that can sleep.  It returns zero on
2254 * success, else a negative error code (dropping the controller's refcount).
2255 * After a successful return, the caller is responsible for calling
2256 * spi_unregister_controller().
2257 *
2258 * Return: zero on success, else a negative error code.
2259 */
2260int spi_register_controller(struct spi_controller *ctlr)
2261{
2262        struct device           *dev = ctlr->dev.parent;
2263        struct boardinfo        *bi;
2264        int                     status = -ENODEV;
2265        int                     id, first_dynamic;
2266
2267        if (!dev)
2268                return -ENODEV;
2269
2270        /*
2271         * Make sure all necessary hooks are implemented before registering
2272         * the SPI controller.
2273         */
2274        status = spi_controller_check_ops(ctlr);
2275        if (status)
2276                return status;
2277
2278        if (!spi_controller_is_slave(ctlr)) {
2279                if (ctlr->use_gpio_descriptors) {
2280                        status = spi_get_gpio_descs(ctlr);
2281                        if (status)
2282                                return status;
2283                        /*
2284                         * A controller using GPIO descriptors always
2285                         * supports SPI_CS_HIGH if need be.
2286                         */
2287                        ctlr->mode_bits |= SPI_CS_HIGH;
2288                } else {
2289                        /* Legacy code path for GPIOs from DT */
2290                        status = of_spi_register_master(ctlr);
2291                        if (status)
2292                                return status;
2293                }
2294        }
2295
2296        /* even if it's just one always-selected device, there must
2297         * be at least one chipselect
2298         */
2299        if (ctlr->num_chipselect == 0)
2300                return -EINVAL;
2301        if (ctlr->bus_num >= 0) {
2302                /* devices with a fixed bus num must check-in with the num */
2303                mutex_lock(&board_lock);
2304                id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2305                        ctlr->bus_num + 1, GFP_KERNEL);
2306                mutex_unlock(&board_lock);
2307                if (WARN(id < 0, "couldn't get idr"))
2308                        return id == -ENOSPC ? -EBUSY : id;
2309                ctlr->bus_num = id;
2310        } else if (ctlr->dev.of_node) {
2311                /* allocate dynamic bus number using Linux idr */
2312                id = of_alias_get_id(ctlr->dev.of_node, "spi");
2313                if (id >= 0) {
2314                        ctlr->bus_num = id;
2315                        mutex_lock(&board_lock);
2316                        id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2317                                       ctlr->bus_num + 1, GFP_KERNEL);
2318                        mutex_unlock(&board_lock);
2319                        if (WARN(id < 0, "couldn't get idr"))
2320                                return id == -ENOSPC ? -EBUSY : id;
2321                }
2322        }
2323        if (ctlr->bus_num < 0) {
2324                first_dynamic = of_alias_get_highest_id("spi");
2325                if (first_dynamic < 0)
2326                        first_dynamic = 0;
2327                else
2328                        first_dynamic++;
2329
2330                mutex_lock(&board_lock);
2331                id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2332                               0, GFP_KERNEL);
2333                mutex_unlock(&board_lock);
2334                if (WARN(id < 0, "couldn't get idr"))
2335                        return id;
2336                ctlr->bus_num = id;
2337        }
2338        INIT_LIST_HEAD(&ctlr->queue);
2339        spin_lock_init(&ctlr->queue_lock);
2340        spin_lock_init(&ctlr->bus_lock_spinlock);
2341        mutex_init(&ctlr->bus_lock_mutex);
2342        mutex_init(&ctlr->io_mutex);
2343        ctlr->bus_lock_flag = 0;
2344        init_completion(&ctlr->xfer_completion);
2345        if (!ctlr->max_dma_len)
2346                ctlr->max_dma_len = INT_MAX;
2347
2348        /* register the device, then userspace will see it.
2349         * registration fails if the bus ID is in use.
2350         */
2351        dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2352        status = device_add(&ctlr->dev);
2353        if (status < 0) {
2354                /* free bus id */
2355                mutex_lock(&board_lock);
2356                idr_remove(&spi_master_idr, ctlr->bus_num);
2357                mutex_unlock(&board_lock);
2358                goto done;
2359        }
2360        dev_dbg(dev, "registered %s %s\n",
2361                        spi_controller_is_slave(ctlr) ? "slave" : "master",
2362                        dev_name(&ctlr->dev));
2363
2364        /*
2365         * If we're using a queued driver, start the queue. Note that we don't
2366         * need the queueing logic if the driver is only supporting high-level
2367         * memory operations.
2368         */
2369        if (ctlr->transfer) {
2370                dev_info(dev, "controller is unqueued, this is deprecated\n");
2371        } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2372                status = spi_controller_initialize_queue(ctlr);
2373                if (status) {
2374                        device_del(&ctlr->dev);
2375                        /* free bus id */
2376                        mutex_lock(&board_lock);
2377                        idr_remove(&spi_master_idr, ctlr->bus_num);
2378                        mutex_unlock(&board_lock);
2379                        goto done;
2380                }
2381        }
2382        /* add statistics */
2383        spin_lock_init(&ctlr->statistics.lock);
2384
2385        mutex_lock(&board_lock);
2386        list_add_tail(&ctlr->list, &spi_controller_list);
2387        list_for_each_entry(bi, &board_list, list)
2388                spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2389        mutex_unlock(&board_lock);
2390
2391        /* Register devices from the device tree and ACPI */
2392        of_register_spi_devices(ctlr);
2393        acpi_register_spi_devices(ctlr);
2394done:
2395        return status;
2396}
2397EXPORT_SYMBOL_GPL(spi_register_controller);
2398
2399static void devm_spi_unregister(struct device *dev, void *res)
2400{
2401        spi_unregister_controller(*(struct spi_controller **)res);
2402}
2403
2404/**
2405 * devm_spi_register_controller - register managed SPI master or slave
2406 *      controller
2407 * @dev:    device managing SPI controller
2408 * @ctlr: initialized controller, originally from spi_alloc_master() or
2409 *      spi_alloc_slave()
2410 * Context: can sleep
2411 *
2412 * Register a SPI device as with spi_register_controller() which will
2413 * automatically be unregistered and freed.
2414 *
2415 * Return: zero on success, else a negative error code.
2416 */
2417int devm_spi_register_controller(struct device *dev,
2418                                 struct spi_controller *ctlr)
2419{
2420        struct spi_controller **ptr;
2421        int ret;
2422
2423        ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2424        if (!ptr)
2425                return -ENOMEM;
2426
2427        ret = spi_register_controller(ctlr);
2428        if (!ret) {
2429                *ptr = ctlr;
2430                devres_add(dev, ptr);
2431        } else {
2432                devres_free(ptr);
2433        }
2434
2435        return ret;
2436}
2437EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2438
2439static int __unregister(struct device *dev, void *null)
2440{
2441        spi_unregister_device(to_spi_device(dev));
2442        return 0;
2443}
2444
2445/**
2446 * spi_unregister_controller - unregister SPI master or slave controller
2447 * @ctlr: the controller being unregistered
2448 * Context: can sleep
2449 *
2450 * This call is used only by SPI controller drivers, which are the
2451 * only ones directly touching chip registers.
2452 *
2453 * This must be called from context that can sleep.
2454 *
2455 * Note that this function also drops a reference to the controller.
2456 */
2457void spi_unregister_controller(struct spi_controller *ctlr)
2458{
2459        struct spi_controller *found;
2460        int id = ctlr->bus_num;
2461        int dummy;
2462
2463        /* First make sure that this controller was ever added */
2464        mutex_lock(&board_lock);
2465        found = idr_find(&spi_master_idr, id);
2466        mutex_unlock(&board_lock);
2467        if (ctlr->queued) {
2468                if (spi_destroy_queue(ctlr))
2469                        dev_err(&ctlr->dev, "queue remove failed\n");
2470        }
2471        mutex_lock(&board_lock);
2472        list_del(&ctlr->list);
2473        mutex_unlock(&board_lock);
2474
2475        dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2476        device_unregister(&ctlr->dev);
2477        /* free bus id */
2478        mutex_lock(&board_lock);
2479        if (found == ctlr)
2480                idr_remove(&spi_master_idr, id);
2481        mutex_unlock(&board_lock);
2482}
2483EXPORT_SYMBOL_GPL(spi_unregister_controller);
2484
2485int spi_controller_suspend(struct spi_controller *ctlr)
2486{
2487        int ret;
2488
2489        /* Basically no-ops for non-queued controllers */
2490        if (!ctlr->queued)
2491                return 0;
2492
2493        ret = spi_stop_queue(ctlr);
2494        if (ret)
2495                dev_err(&ctlr->dev, "queue stop failed\n");
2496
2497        return ret;
2498}
2499EXPORT_SYMBOL_GPL(spi_controller_suspend);
2500
2501int spi_controller_resume(struct spi_controller *ctlr)
2502{
2503        int ret;
2504
2505        if (!ctlr->queued)
2506                return 0;
2507
2508        ret = spi_start_queue(ctlr);
2509        if (ret)
2510                dev_err(&ctlr->dev, "queue restart failed\n");
2511
2512        return ret;
2513}
2514EXPORT_SYMBOL_GPL(spi_controller_resume);
2515
2516static int __spi_controller_match(struct device *dev, const void *data)
2517{
2518        struct spi_controller *ctlr;
2519        const u16 *bus_num = data;
2520
2521        ctlr = container_of(dev, struct spi_controller, dev);
2522        return ctlr->bus_num == *bus_num;
2523}
2524
2525/**
2526 * spi_busnum_to_master - look up master associated with bus_num
2527 * @bus_num: the master's bus number
2528 * Context: can sleep
2529 *
2530 * This call may be used with devices that are registered after
2531 * arch init time.  It returns a refcounted pointer to the relevant
2532 * spi_controller (which the caller must release), or NULL if there is
2533 * no such master registered.
2534 *
2535 * Return: the SPI master structure on success, else NULL.
2536 */
2537struct spi_controller *spi_busnum_to_master(u16 bus_num)
2538{
2539        struct device           *dev;
2540        struct spi_controller   *ctlr = NULL;
2541
2542        dev = class_find_device(&spi_master_class, NULL, &bus_num,
2543                                __spi_controller_match);
2544        if (dev)
2545                ctlr = container_of(dev, struct spi_controller, dev);
2546        /* reference got in class_find_device */
2547        return ctlr;
2548}
2549EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2550
2551/*-------------------------------------------------------------------------*/
2552
2553/* Core methods for SPI resource management */
2554
2555/**
2556 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2557 *                 during the processing of a spi_message while using
2558 *                 spi_transfer_one
2559 * @spi:     the spi device for which we allocate memory
2560 * @release: the release code to execute for this resource
2561 * @size:    size to alloc and return
2562 * @gfp:     GFP allocation flags
2563 *
2564 * Return: the pointer to the allocated data
2565 *
2566 * This may get enhanced in the future to allocate from a memory pool
2567 * of the @spi_device or @spi_controller to avoid repeated allocations.
2568 */
2569void *spi_res_alloc(struct spi_device *spi,
2570                    spi_res_release_t release,
2571                    size_t size, gfp_t gfp)
2572{
2573        struct spi_res *sres;
2574
2575        sres = kzalloc(sizeof(*sres) + size, gfp);
2576        if (!sres)
2577                return NULL;
2578
2579        INIT_LIST_HEAD(&sres->entry);
2580        sres->release = release;
2581
2582        return sres->data;
2583}
2584EXPORT_SYMBOL_GPL(spi_res_alloc);
2585
2586/**
2587 * spi_res_free - free an spi resource
2588 * @res: pointer to the custom data of a resource
2589 *
2590 */
2591void spi_res_free(void *res)
2592{
2593        struct spi_res *sres = container_of(res, struct spi_res, data);
2594
2595        if (!res)
2596                return;
2597
2598        WARN_ON(!list_empty(&sres->entry));
2599        kfree(sres);
2600}
2601EXPORT_SYMBOL_GPL(spi_res_free);
2602
2603/**
2604 * spi_res_add - add a spi_res to the spi_message
2605 * @message: the spi message
2606 * @res:     the spi_resource
2607 */
2608void spi_res_add(struct spi_message *message, void *res)
2609{
2610        struct spi_res *sres = container_of(res, struct spi_res, data);
2611
2612        WARN_ON(!list_empty(&sres->entry));
2613        list_add_tail(&sres->entry, &message->resources);
2614}
2615EXPORT_SYMBOL_GPL(spi_res_add);
2616
2617/**
2618 * spi_res_release - release all spi resources for this message
2619 * @ctlr:  the @spi_controller
2620 * @message: the @spi_message
2621 */
2622void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2623{
2624        struct spi_res *res;
2625
2626        while (!list_empty(&message->resources)) {
2627                res = list_last_entry(&message->resources,
2628                                      struct spi_res, entry);
2629
2630                if (res->release)
2631                        res->release(ctlr, message, res->data);
2632
2633                list_del(&res->entry);
2634
2635                kfree(res);
2636        }
2637}
2638EXPORT_SYMBOL_GPL(spi_res_release);
2639
2640/*-------------------------------------------------------------------------*/
2641
2642/* Core methods for spi_message alterations */
2643
2644static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2645                                            struct spi_message *msg,
2646                                            void *res)
2647{
2648        struct spi_replaced_transfers *rxfer = res;
2649        size_t i;
2650
2651        /* call extra callback if requested */
2652        if (rxfer->release)
2653                rxfer->release(ctlr, msg, res);
2654
2655        /* insert replaced transfers back into the message */
2656        list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2657
2658        /* remove the formerly inserted entries */
2659        for (i = 0; i < rxfer->inserted; i++)
2660                list_del(&rxfer->inserted_transfers[i].transfer_list);
2661}
2662
2663/**
2664 * spi_replace_transfers - replace transfers with several transfers
2665 *                         and register change with spi_message.resources
2666 * @msg:           the spi_message we work upon
2667 * @xfer_first:    the first spi_transfer we want to replace
2668 * @remove:        number of transfers to remove
2669 * @insert:        the number of transfers we want to insert instead
2670 * @release:       extra release code necessary in some circumstances
2671 * @extradatasize: extra data to allocate (with alignment guarantees
2672 *                 of struct @spi_transfer)
2673 * @gfp:           gfp flags
2674 *
2675 * Returns: pointer to @spi_replaced_transfers,
2676 *          PTR_ERR(...) in case of errors.
2677 */
2678struct spi_replaced_transfers *spi_replace_transfers(
2679        struct spi_message *msg,
2680        struct spi_transfer *xfer_first,
2681        size_t remove,
2682        size_t insert,
2683        spi_replaced_release_t release,
2684        size_t extradatasize,
2685        gfp_t gfp)
2686{
2687        struct spi_replaced_transfers *rxfer;
2688        struct spi_transfer *xfer;
2689        size_t i;
2690
2691        /* allocate the structure using spi_res */
2692        rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2693                              insert * sizeof(struct spi_transfer)
2694                              + sizeof(struct spi_replaced_transfers)
2695                              + extradatasize,
2696                              gfp);
2697        if (!rxfer)
2698                return ERR_PTR(-ENOMEM);
2699
2700        /* the release code to invoke before running the generic release */
2701        rxfer->release = release;
2702
2703        /* assign extradata */
2704        if (extradatasize)
2705                rxfer->extradata =
2706                        &rxfer->inserted_transfers[insert];
2707
2708        /* init the replaced_transfers list */
2709        INIT_LIST_HEAD(&rxfer->replaced_transfers);
2710
2711        /* assign the list_entry after which we should reinsert
2712         * the @replaced_transfers - it may be spi_message.messages!
2713         */
2714        rxfer->replaced_after = xfer_first->transfer_list.prev;
2715
2716        /* remove the requested number of transfers */
2717        for (i = 0; i < remove; i++) {
2718                /* if the entry after replaced_after it is msg->transfers
2719                 * then we have been requested to remove more transfers
2720                 * than are in the list
2721                 */
2722                if (rxfer->replaced_after->next == &msg->transfers) {
2723                        dev_err(&msg->spi->dev,
2724                                "requested to remove more spi_transfers than are available\n");
2725                        /* insert replaced transfers back into the message */
2726                        list_splice(&rxfer->replaced_transfers,
2727                                    rxfer->replaced_after);
2728
2729                        /* free the spi_replace_transfer structure */
2730                        spi_res_free(rxfer);
2731
2732                        /* and return with an error */
2733                        return ERR_PTR(-EINVAL);
2734                }
2735
2736                /* remove the entry after replaced_after from list of
2737                 * transfers and add it to list of replaced_transfers
2738                 */
2739                list_move_tail(rxfer->replaced_after->next,
2740                               &rxfer->replaced_transfers);
2741        }
2742
2743        /* create copy of the given xfer with identical settings
2744         * based on the first transfer to get removed
2745         */
2746        for (i = 0; i < insert; i++) {
2747                /* we need to run in reverse order */
2748                xfer = &rxfer->inserted_transfers[insert - 1 - i];
2749
2750                /* copy all spi_transfer data */
2751                memcpy(xfer, xfer_first, sizeof(*xfer));
2752
2753                /* add to list */
2754                list_add(&xfer->transfer_list, rxfer->replaced_after);
2755
2756                /* clear cs_change and delay_usecs for all but the last */
2757                if (i) {
2758                        xfer->cs_change = false;
2759                        xfer->delay_usecs = 0;
2760                }
2761        }
2762
2763        /* set up inserted */
2764        rxfer->inserted = insert;
2765
2766        /* and register it with spi_res/spi_message */
2767        spi_res_add(msg, rxfer);
2768
2769        return rxfer;
2770}
2771EXPORT_SYMBOL_GPL(spi_replace_transfers);
2772
2773static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2774                                        struct spi_message *msg,
2775                                        struct spi_transfer **xferp,
2776                                        size_t maxsize,
2777                                        gfp_t gfp)
2778{
2779        struct spi_transfer *xfer = *xferp, *xfers;
2780        struct spi_replaced_transfers *srt;
2781        size_t offset;
2782        size_t count, i;
2783
2784        /* warn once about this fact that we are splitting a transfer */
2785        dev_warn_once(&msg->spi->dev,
2786                      "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2787                      xfer->len, maxsize);
2788
2789        /* calculate how many we have to replace */
2790        count = DIV_ROUND_UP(xfer->len, maxsize);
2791
2792        /* create replacement */
2793        srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2794        if (IS_ERR(srt))
2795                return PTR_ERR(srt);
2796        xfers = srt->inserted_transfers;
2797
2798        /* now handle each of those newly inserted spi_transfers
2799         * note that the replacements spi_transfers all are preset
2800         * to the same values as *xferp, so tx_buf, rx_buf and len
2801         * are all identical (as well as most others)
2802         * so we just have to fix up len and the pointers.
2803         *
2804         * this also includes support for the depreciated
2805         * spi_message.is_dma_mapped interface
2806         */
2807
2808        /* the first transfer just needs the length modified, so we
2809         * run it outside the loop
2810         */
2811        xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2812
2813        /* all the others need rx_buf/tx_buf also set */
2814        for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2815                /* update rx_buf, tx_buf and dma */
2816                if (xfers[i].rx_buf)
2817                        xfers[i].rx_buf += offset;
2818                if (xfers[i].rx_dma)
2819                        xfers[i].rx_dma += offset;
2820                if (xfers[i].tx_buf)
2821                        xfers[i].tx_buf += offset;
2822                if (xfers[i].tx_dma)
2823                        xfers[i].tx_dma += offset;
2824
2825                /* update length */
2826                xfers[i].len = min(maxsize, xfers[i].len - offset);
2827        }
2828
2829        /* we set up xferp to the last entry we have inserted,
2830         * so that we skip those already split transfers
2831         */
2832        *xferp = &xfers[count - 1];
2833
2834        /* increment statistics counters */
2835        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2836                                       transfers_split_maxsize);
2837        SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2838                                       transfers_split_maxsize);
2839
2840        return 0;
2841}
2842
2843/**
2844 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2845 *                              when an individual transfer exceeds a
2846 *                              certain size
2847 * @ctlr:    the @spi_controller for this transfer
2848 * @msg:   the @spi_message to transform
2849 * @maxsize:  the maximum when to apply this
2850 * @gfp: GFP allocation flags
2851 *
2852 * Return: status of transformation
2853 */
2854int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2855                                struct spi_message *msg,
2856                                size_t maxsize,
2857                                gfp_t gfp)
2858{
2859        struct spi_transfer *xfer;
2860        int ret;
2861
2862        /* iterate over the transfer_list,
2863         * but note that xfer is advanced to the last transfer inserted
2864         * to avoid checking sizes again unnecessarily (also xfer does
2865         * potentiall belong to a different list by the time the
2866         * replacement has happened
2867         */
2868        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2869                if (xfer->len > maxsize) {
2870                        ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2871                                                           maxsize, gfp);
2872                        if (ret)
2873                                return ret;
2874                }
2875        }
2876
2877        return 0;
2878}
2879EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2880
2881/*-------------------------------------------------------------------------*/
2882
2883/* Core methods for SPI controller protocol drivers.  Some of the
2884 * other core methods are currently defined as inline functions.
2885 */
2886
2887static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2888                                        u8 bits_per_word)
2889{
2890        if (ctlr->bits_per_word_mask) {
2891                /* Only 32 bits fit in the mask */
2892                if (bits_per_word > 32)
2893                        return -EINVAL;
2894                if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2895                        return -EINVAL;
2896        }
2897
2898        return 0;
2899}
2900
2901/**
2902 * spi_setup - setup SPI mode and clock rate
2903 * @spi: the device whose settings are being modified
2904 * Context: can sleep, and no requests are queued to the device
2905 *
2906 * SPI protocol drivers may need to update the transfer mode if the
2907 * device doesn't work with its default.  They may likewise need
2908 * to update clock rates or word sizes from initial values.  This function
2909 * changes those settings, and must be called from a context that can sleep.
2910 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2911 * effect the next time the device is selected and data is transferred to
2912 * or from it.  When this function returns, the spi device is deselected.
2913 *
2914 * Note that this call will fail if the protocol driver specifies an option
2915 * that the underlying controller or its driver does not support.  For
2916 * example, not all hardware supports wire transfers using nine bit words,
2917 * LSB-first wire encoding, or active-high chipselects.
2918 *
2919 * Return: zero on success, else a negative error code.
2920 */
2921int spi_setup(struct spi_device *spi)
2922{
2923        unsigned        bad_bits, ugly_bits;
2924        int             status;
2925
2926        /* check mode to prevent that DUAL and QUAD set at the same time
2927         */
2928        if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2929                ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2930                dev_err(&spi->dev,
2931                "setup: can not select dual and quad at the same time\n");
2932                return -EINVAL;
2933        }
2934        /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2935         */
2936        if ((spi->mode & SPI_3WIRE) && (spi->mode &
2937                (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2938                 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
2939                return -EINVAL;
2940        /* help drivers fail *cleanly* when they need options
2941         * that aren't supported with their current controller
2942         * SPI_CS_WORD has a fallback software implementation,
2943         * so it is ignored here.
2944         */
2945        bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
2946        ugly_bits = bad_bits &
2947                    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
2948                     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
2949        if (ugly_bits) {
2950                dev_warn(&spi->dev,
2951                         "setup: ignoring unsupported mode bits %x\n",
2952                         ugly_bits);
2953                spi->mode &= ~ugly_bits;
2954                bad_bits &= ~ugly_bits;
2955        }
2956        if (bad_bits) {
2957                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2958                        bad_bits);
2959                return -EINVAL;
2960        }
2961
2962        if (!spi->bits_per_word)
2963                spi->bits_per_word = 8;
2964
2965        status = __spi_validate_bits_per_word(spi->controller,
2966                                              spi->bits_per_word);
2967        if (status)
2968                return status;
2969
2970        if (!spi->max_speed_hz)
2971                spi->max_speed_hz = spi->controller->max_speed_hz;
2972
2973        if (spi->controller->setup)
2974                status = spi->controller->setup(spi);
2975
2976        spi_set_cs(spi, false);
2977
2978        dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2979                        (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2980                        (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2981                        (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2982                        (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2983                        (spi->mode & SPI_LOOP) ? "loopback, " : "",
2984                        spi->bits_per_word, spi->max_speed_hz,
2985                        status);
2986
2987        return status;
2988}
2989EXPORT_SYMBOL_GPL(spi_setup);
2990
2991static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2992{
2993        struct spi_controller *ctlr = spi->controller;
2994        struct spi_transfer *xfer;
2995        int w_size;
2996
2997        if (list_empty(&message->transfers))
2998                return -EINVAL;
2999
3000        /* If an SPI controller does not support toggling the CS line on each
3001         * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3002         * for the CS line, we can emulate the CS-per-word hardware function by
3003         * splitting transfers into one-word transfers and ensuring that
3004         * cs_change is set for each transfer.
3005         */
3006        if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3007                                          spi->cs_gpiod ||
3008                                          gpio_is_valid(spi->cs_gpio))) {
3009                size_t maxsize;
3010                int ret;
3011
3012                maxsize = (spi->bits_per_word + 7) / 8;
3013
3014                /* spi_split_transfers_maxsize() requires message->spi */
3015                message->spi = spi;
3016
3017                ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3018                                                  GFP_KERNEL);
3019                if (ret)
3020                        return ret;
3021
3022                list_for_each_entry(xfer, &message->transfers, transfer_list) {
3023                        /* don't change cs_change on the last entry in the list */
3024                        if (list_is_last(&xfer->transfer_list, &message->transfers))
3025                                break;
3026                        xfer->cs_change = 1;
3027                }
3028        }
3029
3030        /* Half-duplex links include original MicroWire, and ones with
3031         * only one data pin like SPI_3WIRE (switches direction) or where
3032         * either MOSI or MISO is missing.  They can also be caused by
3033         * software limitations.
3034         */
3035        if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3036            (spi->mode & SPI_3WIRE)) {
3037                unsigned flags = ctlr->flags;
3038
3039                list_for_each_entry(xfer, &message->transfers, transfer_list) {
3040                        if (xfer->rx_buf && xfer->tx_buf)
3041                                return -EINVAL;
3042                        if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3043                                return -EINVAL;
3044                        if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3045                                return -EINVAL;
3046                }
3047        }
3048
3049        /**
3050         * Set transfer bits_per_word and max speed as spi device default if
3051         * it is not set for this transfer.
3052         * Set transfer tx_nbits and rx_nbits as single transfer default
3053         * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3054         * Ensure transfer word_delay is at least as long as that required by
3055         * device itself.
3056         */
3057        message->frame_length = 0;
3058        list_for_each_entry(xfer, &message->transfers, transfer_list) {
3059                message->frame_length += xfer->len;
3060                if (!xfer->bits_per_word)
3061                        xfer->bits_per_word = spi->bits_per_word;
3062
3063                if (!xfer->speed_hz)
3064                        xfer->speed_hz = spi->max_speed_hz;
3065                if (!xfer->speed_hz)
3066                        xfer->speed_hz = ctlr->max_speed_hz;
3067
3068                if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3069                        xfer->speed_hz = ctlr->max_speed_hz;
3070
3071                if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3072                        return -EINVAL;
3073
3074                /*
3075                 * SPI transfer length should be multiple of SPI word size
3076                 * where SPI word size should be power-of-two multiple
3077                 */
3078                if (xfer->bits_per_word <= 8)
3079                        w_size = 1;
3080                else if (xfer->bits_per_word <= 16)
3081                        w_size = 2;
3082                else
3083                        w_size = 4;
3084
3085                /* No partial transfers accepted */
3086                if (xfer->len % w_size)
3087                        return -EINVAL;
3088
3089                if (xfer->speed_hz && ctlr->min_speed_hz &&
3090                    xfer->speed_hz < ctlr->min_speed_hz)
3091                        return -EINVAL;
3092
3093                if (xfer->tx_buf && !xfer->tx_nbits)
3094                        xfer->tx_nbits = SPI_NBITS_SINGLE;
3095                if (xfer->rx_buf && !xfer->rx_nbits)
3096                        xfer->rx_nbits = SPI_NBITS_SINGLE;
3097                /* check transfer tx/rx_nbits:
3098                 * 1. check the value matches one of single, dual and quad
3099                 * 2. check tx/rx_nbits match the mode in spi_device
3100                 */
3101                if (xfer->tx_buf) {
3102                        if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3103                                xfer->tx_nbits != SPI_NBITS_DUAL &&
3104                                xfer->tx_nbits != SPI_NBITS_QUAD)
3105                                return -EINVAL;
3106                        if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3107                                !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3108                                return -EINVAL;
3109                        if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3110                                !(spi->mode & SPI_TX_QUAD))
3111                                return -EINVAL;
3112                }
3113                /* check transfer rx_nbits */
3114                if (xfer->rx_buf) {
3115                        if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3116                                xfer->rx_nbits != SPI_NBITS_DUAL &&
3117                                xfer->rx_nbits != SPI_NBITS_QUAD)
3118                                return -EINVAL;
3119                        if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3120                                !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3121                                return -EINVAL;
3122                        if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3123                                !(spi->mode & SPI_RX_QUAD))
3124                                return -EINVAL;
3125                }
3126
3127                if (xfer->word_delay_usecs < spi->word_delay_usecs)
3128                        xfer->word_delay_usecs = spi->word_delay_usecs;
3129        }
3130
3131        message->status = -EINPROGRESS;
3132
3133        return 0;
3134}
3135
3136static int __spi_async(struct spi_device *spi, struct spi_message *message)
3137{
3138        struct spi_controller *ctlr = spi->controller;
3139
3140        /*
3141         * Some controllers do not support doing regular SPI transfers. Return
3142         * ENOTSUPP when this is the case.
3143         */
3144        if (!ctlr->transfer)
3145                return -ENOTSUPP;
3146
3147        message->spi = spi;
3148
3149        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3150        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3151
3152        trace_spi_message_submit(message);
3153
3154        return ctlr->transfer(spi, message);
3155}
3156
3157/**
3158 * spi_async - asynchronous SPI transfer
3159 * @spi: device with which data will be exchanged
3160 * @message: describes the data transfers, including completion callback
3161 * Context: any (irqs may be blocked, etc)
3162 *
3163 * This call may be used in_irq and other contexts which can't sleep,
3164 * as well as from task contexts which can sleep.
3165 *
3166 * The completion callback is invoked in a context which can't sleep.
3167 * Before that invocation, the value of message->status is undefined.
3168 * When the callback is issued, message->status holds either zero (to
3169 * indicate complete success) or a negative error code.  After that
3170 * callback returns, the driver which issued the transfer request may
3171 * deallocate the associated memory; it's no longer in use by any SPI
3172 * core or controller driver code.
3173 *
3174 * Note that although all messages to a spi_device are handled in
3175 * FIFO order, messages may go to different devices in other orders.
3176 * Some device might be higher priority, or have various "hard" access
3177 * time requirements, for example.
3178 *
3179 * On detection of any fault during the transfer, processing of
3180 * the entire message is aborted, and the device is deselected.
3181 * Until returning from the associated message completion callback,
3182 * no other spi_message queued to that device will be processed.
3183 * (This rule applies equally to all the synchronous transfer calls,
3184 * which are wrappers around this core asynchronous primitive.)
3185 *
3186 * Return: zero on success, else a negative error code.
3187 */
3188int spi_async(struct spi_device *spi, struct spi_message *message)
3189{
3190        struct spi_controller *ctlr = spi->controller;
3191        int ret;
3192        unsigned long flags;
3193
3194        ret = __spi_validate(spi, message);
3195        if (ret != 0)
3196                return ret;
3197
3198        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3199
3200        if (ctlr->bus_lock_flag)
3201                ret = -EBUSY;
3202        else
3203                ret = __spi_async(spi, message);
3204
3205        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3206
3207        return ret;
3208}
3209EXPORT_SYMBOL_GPL(spi_async);
3210
3211/**
3212 * spi_async_locked - version of spi_async with exclusive bus usage
3213 * @spi: device with which data will be exchanged
3214 * @message: describes the data transfers, including completion callback
3215 * Context: any (irqs may be blocked, etc)
3216 *
3217 * This call may be used in_irq and other contexts which can't sleep,
3218 * as well as from task contexts which can sleep.
3219 *
3220 * The completion callback is invoked in a context which can't sleep.
3221 * Before that invocation, the value of message->status is undefined.
3222 * When the callback is issued, message->status holds either zero (to
3223 * indicate complete success) or a negative error code.  After that
3224 * callback returns, the driver which issued the transfer request may
3225 * deallocate the associated memory; it's no longer in use by any SPI
3226 * core or controller driver code.
3227 *
3228 * Note that although all messages to a spi_device are handled in
3229 * FIFO order, messages may go to different devices in other orders.
3230 * Some device might be higher priority, or have various "hard" access
3231 * time requirements, for example.
3232 *
3233 * On detection of any fault during the transfer, processing of
3234 * the entire message is aborted, and the device is deselected.
3235 * Until returning from the associated message completion callback,
3236 * no other spi_message queued to that device will be processed.
3237 * (This rule applies equally to all the synchronous transfer calls,
3238 * which are wrappers around this core asynchronous primitive.)
3239 *
3240 * Return: zero on success, else a negative error code.
3241 */
3242int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3243{
3244        struct spi_controller *ctlr = spi->controller;
3245        int ret;
3246        unsigned long flags;
3247
3248        ret = __spi_validate(spi, message);
3249        if (ret != 0)
3250                return ret;
3251
3252        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3253
3254        ret = __spi_async(spi, message);
3255
3256        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3257
3258        return ret;
3259
3260}
3261EXPORT_SYMBOL_GPL(spi_async_locked);
3262
3263/*-------------------------------------------------------------------------*/
3264
3265/* Utility methods for SPI protocol drivers, layered on
3266 * top of the core.  Some other utility methods are defined as
3267 * inline functions.
3268 */
3269
3270static void spi_complete(void *arg)
3271{
3272        complete(arg);
3273}
3274
3275static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3276{
3277        DECLARE_COMPLETION_ONSTACK(done);
3278        int status;
3279        struct spi_controller *ctlr = spi->controller;
3280        unsigned long flags;
3281
3282        status = __spi_validate(spi, message);
3283        if (status != 0)
3284                return status;
3285
3286        message->complete = spi_complete;
3287        message->context = &done;
3288        message->spi = spi;
3289
3290        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3291        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3292
3293        /* If we're not using the legacy transfer method then we will
3294         * try to transfer in the calling context so special case.
3295         * This code would be less tricky if we could remove the
3296         * support for driver implemented message queues.
3297         */
3298        if (ctlr->transfer == spi_queued_transfer) {
3299                spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3300
3301                trace_spi_message_submit(message);
3302
3303                status = __spi_queued_transfer(spi, message, false);
3304
3305                spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3306        } else {
3307                status = spi_async_locked(spi, message);
3308        }
3309
3310        if (status == 0) {
3311                /* Push out the messages in the calling context if we
3312                 * can.
3313                 */
3314                if (ctlr->transfer == spi_queued_transfer) {
3315                        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3316                                                       spi_sync_immediate);
3317                        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3318                                                       spi_sync_immediate);
3319                        __spi_pump_messages(ctlr, false);
3320                }
3321
3322                wait_for_completion(&done);
3323                status = message->status;
3324        }
3325        message->context = NULL;
3326        return status;
3327}
3328
3329/**
3330 * spi_sync - blocking/synchronous SPI data transfers
3331 * @spi: device with which data will be exchanged
3332 * @message: describes the data transfers
3333 * Context: can sleep
3334 *
3335 * This call may only be used from a context that may sleep.  The sleep
3336 * is non-interruptible, and has no timeout.  Low-overhead controller
3337 * drivers may DMA directly into and out of the message buffers.
3338 *
3339 * Note that the SPI device's chip select is active during the message,
3340 * and then is normally disabled between messages.  Drivers for some
3341 * frequently-used devices may want to minimize costs of selecting a chip,
3342 * by leaving it selected in anticipation that the next message will go
3343 * to the same chip.  (That may increase power usage.)
3344 *
3345 * Also, the caller is guaranteeing that the memory associated with the
3346 * message will not be freed before this call returns.
3347 *
3348 * Return: zero on success, else a negative error code.
3349 */
3350int spi_sync(struct spi_device *spi, struct spi_message *message)
3351{
3352        int ret;
3353
3354        mutex_lock(&spi->controller->bus_lock_mutex);
3355        ret = __spi_sync(spi, message);
3356        mutex_unlock(&spi->controller->bus_lock_mutex);
3357
3358        return ret;
3359}
3360EXPORT_SYMBOL_GPL(spi_sync);
3361
3362/**
3363 * spi_sync_locked - version of spi_sync with exclusive bus usage
3364 * @spi: device with which data will be exchanged
3365 * @message: describes the data transfers
3366 * Context: can sleep
3367 *
3368 * This call may only be used from a context that may sleep.  The sleep
3369 * is non-interruptible, and has no timeout.  Low-overhead controller
3370 * drivers may DMA directly into and out of the message buffers.
3371 *
3372 * This call should be used by drivers that require exclusive access to the
3373 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3374 * be released by a spi_bus_unlock call when the exclusive access is over.
3375 *
3376 * Return: zero on success, else a negative error code.
3377 */
3378int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3379{
3380        return __spi_sync(spi, message);
3381}
3382EXPORT_SYMBOL_GPL(spi_sync_locked);
3383
3384/**
3385 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3386 * @ctlr: SPI bus master that should be locked for exclusive bus access
3387 * Context: can sleep
3388 *
3389 * This call may only be used from a context that may sleep.  The sleep
3390 * is non-interruptible, and has no timeout.
3391 *
3392 * This call should be used by drivers that require exclusive access to the
3393 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3394 * exclusive access is over. Data transfer must be done by spi_sync_locked
3395 * and spi_async_locked calls when the SPI bus lock is held.
3396 *
3397 * Return: always zero.
3398 */
3399int spi_bus_lock(struct spi_controller *ctlr)
3400{
3401        unsigned long flags;
3402
3403        mutex_lock(&ctlr->bus_lock_mutex);
3404
3405        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3406        ctlr->bus_lock_flag = 1;
3407        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3408
3409        /* mutex remains locked until spi_bus_unlock is called */
3410
3411        return 0;
3412}
3413EXPORT_SYMBOL_GPL(spi_bus_lock);
3414
3415/**
3416 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3417 * @ctlr: SPI bus master that was locked for exclusive bus access
3418 * Context: can sleep
3419 *
3420 * This call may only be used from a context that may sleep.  The sleep
3421 * is non-interruptible, and has no timeout.
3422 *
3423 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3424 * call.
3425 *
3426 * Return: always zero.
3427 */
3428int spi_bus_unlock(struct spi_controller *ctlr)
3429{
3430        ctlr->bus_lock_flag = 0;
3431
3432        mutex_unlock(&ctlr->bus_lock_mutex);
3433
3434        return 0;
3435}
3436EXPORT_SYMBOL_GPL(spi_bus_unlock);
3437
3438/* portable code must never pass more than 32 bytes */
3439#define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
3440
3441static u8       *buf;
3442
3443/**
3444 * spi_write_then_read - SPI synchronous write followed by read
3445 * @spi: device with which data will be exchanged
3446 * @txbuf: data to be written (need not be dma-safe)
3447 * @n_tx: size of txbuf, in bytes
3448 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3449 * @n_rx: size of rxbuf, in bytes
3450 * Context: can sleep
3451 *
3452 * This performs a half duplex MicroWire style transaction with the
3453 * device, sending txbuf and then reading rxbuf.  The return value
3454 * is zero for success, else a negative errno status code.
3455 * This call may only be used from a context that may sleep.
3456 *
3457 * Parameters to this routine are always copied using a small buffer;
3458 * portable code should never use this for more than 32 bytes.
3459 * Performance-sensitive or bulk transfer code should instead use
3460 * spi_{async,sync}() calls with dma-safe buffers.
3461 *
3462 * Return: zero on success, else a negative error code.
3463 */
3464int spi_write_then_read(struct spi_device *spi,
3465                const void *txbuf, unsigned n_tx,
3466                void *rxbuf, unsigned n_rx)
3467{
3468        static DEFINE_MUTEX(lock);
3469
3470        int                     status;
3471        struct spi_message      message;
3472        struct spi_transfer     x[2];
3473        u8                      *local_buf;
3474
3475        /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3476         * copying here, (as a pure convenience thing), but we can
3477         * keep heap costs out of the hot path unless someone else is
3478         * using the pre-allocated buffer or the transfer is too large.
3479         */
3480        if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3481                local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3482                                    GFP_KERNEL | GFP_DMA);
3483                if (!local_buf)
3484                        return -ENOMEM;
3485        } else {
3486                local_buf = buf;
3487        }
3488
3489        spi_message_init(&message);
3490        memset(x, 0, sizeof(x));
3491        if (n_tx) {
3492                x[0].len = n_tx;
3493                spi_message_add_tail(&x[0], &message);
3494        }
3495        if (n_rx) {
3496                x[1].len = n_rx;
3497                spi_message_add_tail(&x[1], &message);
3498        }
3499
3500        memcpy(local_buf, txbuf, n_tx);
3501        x[0].tx_buf = local_buf;
3502        x[1].rx_buf = local_buf + n_tx;
3503
3504        /* do the i/o */
3505        status = spi_sync(spi, &message);
3506        if (status == 0)
3507                memcpy(rxbuf, x[1].rx_buf, n_rx);
3508
3509        if (x[0].tx_buf == buf)
3510                mutex_unlock(&lock);
3511        else
3512                kfree(local_buf);
3513
3514        return status;
3515}
3516EXPORT_SYMBOL_GPL(spi_write_then_read);
3517
3518/*-------------------------------------------------------------------------*/
3519
3520#if IS_ENABLED(CONFIG_OF)
3521/* must call put_device() when done with returned spi_device device */
3522struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3523{
3524        struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
3525
3526        return dev ? to_spi_device(dev) : NULL;
3527}
3528EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3529#endif /* IS_ENABLED(CONFIG_OF) */
3530
3531#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3532/* the spi controllers are not using spi_bus, so we find it with another way */
3533static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3534{
3535        struct device *dev;
3536
3537        dev = class_find_device_by_of_node(&spi_master_class, node);
3538        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3539                dev = class_find_device_by_of_node(&spi_slave_class, node);
3540        if (!dev)
3541                return NULL;
3542
3543        /* reference got in class_find_device */
3544        return container_of(dev, struct spi_controller, dev);
3545}
3546
3547static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3548                         void *arg)
3549{
3550        struct of_reconfig_data *rd = arg;
3551        struct spi_controller *ctlr;
3552        struct spi_device *spi;
3553
3554        switch (of_reconfig_get_state_change(action, arg)) {
3555        case OF_RECONFIG_CHANGE_ADD:
3556                ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3557                if (ctlr == NULL)
3558                        return NOTIFY_OK;       /* not for us */
3559
3560                if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3561                        put_device(&ctlr->dev);
3562                        return NOTIFY_OK;
3563                }
3564
3565                spi = of_register_spi_device(ctlr, rd->dn);
3566                put_device(&ctlr->dev);
3567
3568                if (IS_ERR(spi)) {
3569                        pr_err("%s: failed to create for '%pOF'\n",
3570                                        __func__, rd->dn);
3571                        of_node_clear_flag(rd->dn, OF_POPULATED);
3572                        return notifier_from_errno(PTR_ERR(spi));
3573                }
3574                break;
3575
3576        case OF_RECONFIG_CHANGE_REMOVE:
3577                /* already depopulated? */
3578                if (!of_node_check_flag(rd->dn, OF_POPULATED))
3579                        return NOTIFY_OK;
3580
3581                /* find our device by node */
3582                spi = of_find_spi_device_by_node(rd->dn);
3583                if (spi == NULL)
3584                        return NOTIFY_OK;       /* no? not meant for us */
3585
3586                /* unregister takes one ref away */
3587                spi_unregister_device(spi);
3588
3589                /* and put the reference of the find */
3590                put_device(&spi->dev);
3591                break;
3592        }
3593
3594        return NOTIFY_OK;
3595}
3596
3597static struct notifier_block spi_of_notifier = {
3598        .notifier_call = of_spi_notify,
3599};
3600#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3601extern struct notifier_block spi_of_notifier;
3602#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3603
3604#if IS_ENABLED(CONFIG_ACPI)
3605static int spi_acpi_controller_match(struct device *dev, const void *data)
3606{
3607        return ACPI_COMPANION(dev->parent) == data;
3608}
3609
3610static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3611{
3612        struct device *dev;
3613
3614        dev = class_find_device(&spi_master_class, NULL, adev,
3615                                spi_acpi_controller_match);
3616        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3617                dev = class_find_device(&spi_slave_class, NULL, adev,
3618                                        spi_acpi_controller_match);
3619        if (!dev)
3620                return NULL;
3621
3622        return container_of(dev, struct spi_controller, dev);
3623}
3624
3625static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3626{
3627        struct device *dev;
3628
3629        dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
3630        return dev ? to_spi_device(dev) : NULL;
3631}
3632
3633static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3634                           void *arg)
3635{
3636        struct acpi_device *adev = arg;
3637        struct spi_controller *ctlr;
3638        struct spi_device *spi;
3639
3640        switch (value) {
3641        case ACPI_RECONFIG_DEVICE_ADD:
3642                ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3643                if (!ctlr)
3644                        break;
3645
3646                acpi_register_spi_device(ctlr, adev);
3647                put_device(&ctlr->dev);
3648                break;
3649        case ACPI_RECONFIG_DEVICE_REMOVE:
3650                if (!acpi_device_enumerated(adev))
3651                        break;
3652
3653                spi = acpi_spi_find_device_by_adev(adev);
3654                if (!spi)
3655                        break;
3656
3657                spi_unregister_device(spi);
3658                put_device(&spi->dev);
3659                break;
3660        }
3661
3662        return NOTIFY_OK;
3663}
3664
3665static struct notifier_block spi_acpi_notifier = {
3666        .notifier_call = acpi_spi_notify,
3667};
3668#else
3669extern struct notifier_block spi_acpi_notifier;
3670#endif
3671
3672static int __init spi_init(void)
3673{
3674        int     status;
3675
3676        buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3677        if (!buf) {
3678                status = -ENOMEM;
3679                goto err0;
3680        }
3681
3682        status = bus_register(&spi_bus_type);
3683        if (status < 0)
3684                goto err1;
3685
3686        status = class_register(&spi_master_class);
3687        if (status < 0)
3688                goto err2;
3689
3690        if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3691                status = class_register(&spi_slave_class);
3692                if (status < 0)
3693                        goto err3;
3694        }
3695
3696        if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3697                WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3698        if (IS_ENABLED(CONFIG_ACPI))
3699                WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3700
3701        return 0;
3702
3703err3:
3704        class_unregister(&spi_master_class);
3705err2:
3706        bus_unregister(&spi_bus_type);
3707err1:
3708        kfree(buf);
3709        buf = NULL;
3710err0:
3711        return status;
3712}
3713
3714/* board_info is normally registered in arch_initcall(),
3715 * but even essential drivers wait till later
3716 *
3717 * REVISIT only boardinfo really needs static linking. the rest (device and
3718 * driver registration) _could_ be dynamically linked (modular) ... costs
3719 * include needing to have boardinfo data structures be much more public.
3720 */
3721postcore_initcall(spi_init);
3722
3723