linux/drivers/spi/spi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2// SPI init/core code
   3//
   4// Copyright (C) 2005 David Brownell
   5// Copyright (C) 2008 Secret Lab Technologies Ltd.
   6
   7#include <linux/kernel.h>
   8#include <linux/device.h>
   9#include <linux/init.h>
  10#include <linux/cache.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/mutex.h>
  14#include <linux/of_device.h>
  15#include <linux/of_irq.h>
  16#include <linux/clk/clk-conf.h>
  17#include <linux/slab.h>
  18#include <linux/mod_devicetable.h>
  19#include <linux/spi/spi.h>
  20#include <linux/spi/spi-mem.h>
  21#include <linux/of_gpio.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/pm_domain.h>
  25#include <linux/property.h>
  26#include <linux/export.h>
  27#include <linux/sched/rt.h>
  28#include <uapi/linux/sched/types.h>
  29#include <linux/delay.h>
  30#include <linux/kthread.h>
  31#include <linux/ioport.h>
  32#include <linux/acpi.h>
  33#include <linux/highmem.h>
  34#include <linux/idr.h>
  35#include <linux/platform_data/x86/apple.h>
  36
  37#define CREATE_TRACE_POINTS
  38#include <trace/events/spi.h>
  39EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
  40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
  41
  42#include "internals.h"
  43
  44static DEFINE_IDR(spi_master_idr);
  45
  46static void spidev_release(struct device *dev)
  47{
  48        struct spi_device       *spi = to_spi_device(dev);
  49
  50        /* spi controllers may cleanup for released devices */
  51        if (spi->controller->cleanup)
  52                spi->controller->cleanup(spi);
  53
  54        spi_controller_put(spi->controller);
  55        kfree(spi->driver_override);
  56        kfree(spi);
  57}
  58
  59static ssize_t
  60modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  61{
  62        const struct spi_device *spi = to_spi_device(dev);
  63        int len;
  64
  65        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  66        if (len != -ENODEV)
  67                return len;
  68
  69        return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  70}
  71static DEVICE_ATTR_RO(modalias);
  72
  73static ssize_t driver_override_store(struct device *dev,
  74                                     struct device_attribute *a,
  75                                     const char *buf, size_t count)
  76{
  77        struct spi_device *spi = to_spi_device(dev);
  78        const char *end = memchr(buf, '\n', count);
  79        const size_t len = end ? end - buf : count;
  80        const char *driver_override, *old;
  81
  82        /* We need to keep extra room for a newline when displaying value */
  83        if (len >= (PAGE_SIZE - 1))
  84                return -EINVAL;
  85
  86        driver_override = kstrndup(buf, len, GFP_KERNEL);
  87        if (!driver_override)
  88                return -ENOMEM;
  89
  90        device_lock(dev);
  91        old = spi->driver_override;
  92        if (len) {
  93                spi->driver_override = driver_override;
  94        } else {
  95                /* Emptry string, disable driver override */
  96                spi->driver_override = NULL;
  97                kfree(driver_override);
  98        }
  99        device_unlock(dev);
 100        kfree(old);
 101
 102        return count;
 103}
 104
 105static ssize_t driver_override_show(struct device *dev,
 106                                    struct device_attribute *a, char *buf)
 107{
 108        const struct spi_device *spi = to_spi_device(dev);
 109        ssize_t len;
 110
 111        device_lock(dev);
 112        len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
 113        device_unlock(dev);
 114        return len;
 115}
 116static DEVICE_ATTR_RW(driver_override);
 117
 118#define SPI_STATISTICS_ATTRS(field, file)                               \
 119static ssize_t spi_controller_##field##_show(struct device *dev,        \
 120                                             struct device_attribute *attr, \
 121                                             char *buf)                 \
 122{                                                                       \
 123        struct spi_controller *ctlr = container_of(dev,                 \
 124                                         struct spi_controller, dev);   \
 125        return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
 126}                                                                       \
 127static struct device_attribute dev_attr_spi_controller_##field = {      \
 128        .attr = { .name = file, .mode = 0444 },                         \
 129        .show = spi_controller_##field##_show,                          \
 130};                                                                      \
 131static ssize_t spi_device_##field##_show(struct device *dev,            \
 132                                         struct device_attribute *attr, \
 133                                        char *buf)                      \
 134{                                                                       \
 135        struct spi_device *spi = to_spi_device(dev);                    \
 136        return spi_statistics_##field##_show(&spi->statistics, buf);    \
 137}                                                                       \
 138static struct device_attribute dev_attr_spi_device_##field = {          \
 139        .attr = { .name = file, .mode = 0444 },                         \
 140        .show = spi_device_##field##_show,                              \
 141}
 142
 143#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
 144static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
 145                                            char *buf)                  \
 146{                                                                       \
 147        unsigned long flags;                                            \
 148        ssize_t len;                                                    \
 149        spin_lock_irqsave(&stat->lock, flags);                          \
 150        len = sprintf(buf, format_string, stat->field);                 \
 151        spin_unlock_irqrestore(&stat->lock, flags);                     \
 152        return len;                                                     \
 153}                                                                       \
 154SPI_STATISTICS_ATTRS(name, file)
 155
 156#define SPI_STATISTICS_SHOW(field, format_string)                       \
 157        SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
 158                                 field, format_string)
 159
 160SPI_STATISTICS_SHOW(messages, "%lu");
 161SPI_STATISTICS_SHOW(transfers, "%lu");
 162SPI_STATISTICS_SHOW(errors, "%lu");
 163SPI_STATISTICS_SHOW(timedout, "%lu");
 164
 165SPI_STATISTICS_SHOW(spi_sync, "%lu");
 166SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
 167SPI_STATISTICS_SHOW(spi_async, "%lu");
 168
 169SPI_STATISTICS_SHOW(bytes, "%llu");
 170SPI_STATISTICS_SHOW(bytes_rx, "%llu");
 171SPI_STATISTICS_SHOW(bytes_tx, "%llu");
 172
 173#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
 174        SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
 175                                 "transfer_bytes_histo_" number,        \
 176                                 transfer_bytes_histo[index],  "%lu")
 177SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
 178SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
 179SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
 180SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
 181SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
 182SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
 183SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
 184SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
 185SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
 186SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
 187SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
 188SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
 189SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
 190SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
 191SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
 192SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 193SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 194
 195SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
 196
 197static struct attribute *spi_dev_attrs[] = {
 198        &dev_attr_modalias.attr,
 199        &dev_attr_driver_override.attr,
 200        NULL,
 201};
 202
 203static const struct attribute_group spi_dev_group = {
 204        .attrs  = spi_dev_attrs,
 205};
 206
 207static struct attribute *spi_device_statistics_attrs[] = {
 208        &dev_attr_spi_device_messages.attr,
 209        &dev_attr_spi_device_transfers.attr,
 210        &dev_attr_spi_device_errors.attr,
 211        &dev_attr_spi_device_timedout.attr,
 212        &dev_attr_spi_device_spi_sync.attr,
 213        &dev_attr_spi_device_spi_sync_immediate.attr,
 214        &dev_attr_spi_device_spi_async.attr,
 215        &dev_attr_spi_device_bytes.attr,
 216        &dev_attr_spi_device_bytes_rx.attr,
 217        &dev_attr_spi_device_bytes_tx.attr,
 218        &dev_attr_spi_device_transfer_bytes_histo0.attr,
 219        &dev_attr_spi_device_transfer_bytes_histo1.attr,
 220        &dev_attr_spi_device_transfer_bytes_histo2.attr,
 221        &dev_attr_spi_device_transfer_bytes_histo3.attr,
 222        &dev_attr_spi_device_transfer_bytes_histo4.attr,
 223        &dev_attr_spi_device_transfer_bytes_histo5.attr,
 224        &dev_attr_spi_device_transfer_bytes_histo6.attr,
 225        &dev_attr_spi_device_transfer_bytes_histo7.attr,
 226        &dev_attr_spi_device_transfer_bytes_histo8.attr,
 227        &dev_attr_spi_device_transfer_bytes_histo9.attr,
 228        &dev_attr_spi_device_transfer_bytes_histo10.attr,
 229        &dev_attr_spi_device_transfer_bytes_histo11.attr,
 230        &dev_attr_spi_device_transfer_bytes_histo12.attr,
 231        &dev_attr_spi_device_transfer_bytes_histo13.attr,
 232        &dev_attr_spi_device_transfer_bytes_histo14.attr,
 233        &dev_attr_spi_device_transfer_bytes_histo15.attr,
 234        &dev_attr_spi_device_transfer_bytes_histo16.attr,
 235        &dev_attr_spi_device_transfers_split_maxsize.attr,
 236        NULL,
 237};
 238
 239static const struct attribute_group spi_device_statistics_group = {
 240        .name  = "statistics",
 241        .attrs  = spi_device_statistics_attrs,
 242};
 243
 244static const struct attribute_group *spi_dev_groups[] = {
 245        &spi_dev_group,
 246        &spi_device_statistics_group,
 247        NULL,
 248};
 249
 250static struct attribute *spi_controller_statistics_attrs[] = {
 251        &dev_attr_spi_controller_messages.attr,
 252        &dev_attr_spi_controller_transfers.attr,
 253        &dev_attr_spi_controller_errors.attr,
 254        &dev_attr_spi_controller_timedout.attr,
 255        &dev_attr_spi_controller_spi_sync.attr,
 256        &dev_attr_spi_controller_spi_sync_immediate.attr,
 257        &dev_attr_spi_controller_spi_async.attr,
 258        &dev_attr_spi_controller_bytes.attr,
 259        &dev_attr_spi_controller_bytes_rx.attr,
 260        &dev_attr_spi_controller_bytes_tx.attr,
 261        &dev_attr_spi_controller_transfer_bytes_histo0.attr,
 262        &dev_attr_spi_controller_transfer_bytes_histo1.attr,
 263        &dev_attr_spi_controller_transfer_bytes_histo2.attr,
 264        &dev_attr_spi_controller_transfer_bytes_histo3.attr,
 265        &dev_attr_spi_controller_transfer_bytes_histo4.attr,
 266        &dev_attr_spi_controller_transfer_bytes_histo5.attr,
 267        &dev_attr_spi_controller_transfer_bytes_histo6.attr,
 268        &dev_attr_spi_controller_transfer_bytes_histo7.attr,
 269        &dev_attr_spi_controller_transfer_bytes_histo8.attr,
 270        &dev_attr_spi_controller_transfer_bytes_histo9.attr,
 271        &dev_attr_spi_controller_transfer_bytes_histo10.attr,
 272        &dev_attr_spi_controller_transfer_bytes_histo11.attr,
 273        &dev_attr_spi_controller_transfer_bytes_histo12.attr,
 274        &dev_attr_spi_controller_transfer_bytes_histo13.attr,
 275        &dev_attr_spi_controller_transfer_bytes_histo14.attr,
 276        &dev_attr_spi_controller_transfer_bytes_histo15.attr,
 277        &dev_attr_spi_controller_transfer_bytes_histo16.attr,
 278        &dev_attr_spi_controller_transfers_split_maxsize.attr,
 279        NULL,
 280};
 281
 282static const struct attribute_group spi_controller_statistics_group = {
 283        .name  = "statistics",
 284        .attrs  = spi_controller_statistics_attrs,
 285};
 286
 287static const struct attribute_group *spi_master_groups[] = {
 288        &spi_controller_statistics_group,
 289        NULL,
 290};
 291
 292void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 293                                       struct spi_transfer *xfer,
 294                                       struct spi_controller *ctlr)
 295{
 296        unsigned long flags;
 297        int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
 298
 299        if (l2len < 0)
 300                l2len = 0;
 301
 302        spin_lock_irqsave(&stats->lock, flags);
 303
 304        stats->transfers++;
 305        stats->transfer_bytes_histo[l2len]++;
 306
 307        stats->bytes += xfer->len;
 308        if ((xfer->tx_buf) &&
 309            (xfer->tx_buf != ctlr->dummy_tx))
 310                stats->bytes_tx += xfer->len;
 311        if ((xfer->rx_buf) &&
 312            (xfer->rx_buf != ctlr->dummy_rx))
 313                stats->bytes_rx += xfer->len;
 314
 315        spin_unlock_irqrestore(&stats->lock, flags);
 316}
 317EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
 318
 319/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 320 * and the sysfs version makes coldplug work too.
 321 */
 322
 323static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
 324                                                const struct spi_device *sdev)
 325{
 326        while (id->name[0]) {
 327                if (!strcmp(sdev->modalias, id->name))
 328                        return id;
 329                id++;
 330        }
 331        return NULL;
 332}
 333
 334const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
 335{
 336        const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
 337
 338        return spi_match_id(sdrv->id_table, sdev);
 339}
 340EXPORT_SYMBOL_GPL(spi_get_device_id);
 341
 342static int spi_match_device(struct device *dev, struct device_driver *drv)
 343{
 344        const struct spi_device *spi = to_spi_device(dev);
 345        const struct spi_driver *sdrv = to_spi_driver(drv);
 346
 347        /* Check override first, and if set, only use the named driver */
 348        if (spi->driver_override)
 349                return strcmp(spi->driver_override, drv->name) == 0;
 350
 351        /* Attempt an OF style match */
 352        if (of_driver_match_device(dev, drv))
 353                return 1;
 354
 355        /* Then try ACPI */
 356        if (acpi_driver_match_device(dev, drv))
 357                return 1;
 358
 359        if (sdrv->id_table)
 360                return !!spi_match_id(sdrv->id_table, spi);
 361
 362        return strcmp(spi->modalias, drv->name) == 0;
 363}
 364
 365static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 366{
 367        const struct spi_device         *spi = to_spi_device(dev);
 368        int rc;
 369
 370        rc = acpi_device_uevent_modalias(dev, env);
 371        if (rc != -ENODEV)
 372                return rc;
 373
 374        return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 375}
 376
 377struct bus_type spi_bus_type = {
 378        .name           = "spi",
 379        .dev_groups     = spi_dev_groups,
 380        .match          = spi_match_device,
 381        .uevent         = spi_uevent,
 382};
 383EXPORT_SYMBOL_GPL(spi_bus_type);
 384
 385
 386static int spi_drv_probe(struct device *dev)
 387{
 388        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 389        struct spi_device               *spi = to_spi_device(dev);
 390        int ret;
 391
 392        ret = of_clk_set_defaults(dev->of_node, false);
 393        if (ret)
 394                return ret;
 395
 396        if (dev->of_node) {
 397                spi->irq = of_irq_get(dev->of_node, 0);
 398                if (spi->irq == -EPROBE_DEFER)
 399                        return -EPROBE_DEFER;
 400                if (spi->irq < 0)
 401                        spi->irq = 0;
 402        }
 403
 404        ret = dev_pm_domain_attach(dev, true);
 405        if (ret)
 406                return ret;
 407
 408        ret = sdrv->probe(spi);
 409        if (ret)
 410                dev_pm_domain_detach(dev, true);
 411
 412        return ret;
 413}
 414
 415static int spi_drv_remove(struct device *dev)
 416{
 417        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 418        int ret;
 419
 420        ret = sdrv->remove(to_spi_device(dev));
 421        dev_pm_domain_detach(dev, true);
 422
 423        return ret;
 424}
 425
 426static void spi_drv_shutdown(struct device *dev)
 427{
 428        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 429
 430        sdrv->shutdown(to_spi_device(dev));
 431}
 432
 433/**
 434 * __spi_register_driver - register a SPI driver
 435 * @owner: owner module of the driver to register
 436 * @sdrv: the driver to register
 437 * Context: can sleep
 438 *
 439 * Return: zero on success, else a negative error code.
 440 */
 441int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
 442{
 443        sdrv->driver.owner = owner;
 444        sdrv->driver.bus = &spi_bus_type;
 445        if (sdrv->probe)
 446                sdrv->driver.probe = spi_drv_probe;
 447        if (sdrv->remove)
 448                sdrv->driver.remove = spi_drv_remove;
 449        if (sdrv->shutdown)
 450                sdrv->driver.shutdown = spi_drv_shutdown;
 451        return driver_register(&sdrv->driver);
 452}
 453EXPORT_SYMBOL_GPL(__spi_register_driver);
 454
 455/*-------------------------------------------------------------------------*/
 456
 457/* SPI devices should normally not be created by SPI device drivers; that
 458 * would make them board-specific.  Similarly with SPI controller drivers.
 459 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 460 * with other readonly (flashable) information about mainboard devices.
 461 */
 462
 463struct boardinfo {
 464        struct list_head        list;
 465        struct spi_board_info   board_info;
 466};
 467
 468static LIST_HEAD(board_list);
 469static LIST_HEAD(spi_controller_list);
 470
 471/*
 472 * Used to protect add/del opertion for board_info list and
 473 * spi_controller list, and their matching process
 474 * also used to protect object of type struct idr
 475 */
 476static DEFINE_MUTEX(board_lock);
 477
 478/**
 479 * spi_alloc_device - Allocate a new SPI device
 480 * @ctlr: Controller to which device is connected
 481 * Context: can sleep
 482 *
 483 * Allows a driver to allocate and initialize a spi_device without
 484 * registering it immediately.  This allows a driver to directly
 485 * fill the spi_device with device parameters before calling
 486 * spi_add_device() on it.
 487 *
 488 * Caller is responsible to call spi_add_device() on the returned
 489 * spi_device structure to add it to the SPI controller.  If the caller
 490 * needs to discard the spi_device without adding it, then it should
 491 * call spi_dev_put() on it.
 492 *
 493 * Return: a pointer to the new device, or NULL.
 494 */
 495struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
 496{
 497        struct spi_device       *spi;
 498
 499        if (!spi_controller_get(ctlr))
 500                return NULL;
 501
 502        spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 503        if (!spi) {
 504                spi_controller_put(ctlr);
 505                return NULL;
 506        }
 507
 508        spi->master = spi->controller = ctlr;
 509        spi->dev.parent = &ctlr->dev;
 510        spi->dev.bus = &spi_bus_type;
 511        spi->dev.release = spidev_release;
 512        spi->cs_gpio = -ENOENT;
 513
 514        spin_lock_init(&spi->statistics.lock);
 515
 516        device_initialize(&spi->dev);
 517        return spi;
 518}
 519EXPORT_SYMBOL_GPL(spi_alloc_device);
 520
 521static void spi_dev_set_name(struct spi_device *spi)
 522{
 523        struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
 524
 525        if (adev) {
 526                dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
 527                return;
 528        }
 529
 530        dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
 531                     spi->chip_select);
 532}
 533
 534static int spi_dev_check(struct device *dev, void *data)
 535{
 536        struct spi_device *spi = to_spi_device(dev);
 537        struct spi_device *new_spi = data;
 538
 539        if (spi->controller == new_spi->controller &&
 540            spi->chip_select == new_spi->chip_select)
 541                return -EBUSY;
 542        return 0;
 543}
 544
 545/**
 546 * spi_add_device - Add spi_device allocated with spi_alloc_device
 547 * @spi: spi_device to register
 548 *
 549 * Companion function to spi_alloc_device.  Devices allocated with
 550 * spi_alloc_device can be added onto the spi bus with this function.
 551 *
 552 * Return: 0 on success; negative errno on failure
 553 */
 554int spi_add_device(struct spi_device *spi)
 555{
 556        static DEFINE_MUTEX(spi_add_lock);
 557        struct spi_controller *ctlr = spi->controller;
 558        struct device *dev = ctlr->dev.parent;
 559        int status;
 560
 561        /* Chipselects are numbered 0..max; validate. */
 562        if (spi->chip_select >= ctlr->num_chipselect) {
 563                dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
 564                        ctlr->num_chipselect);
 565                return -EINVAL;
 566        }
 567
 568        /* Set the bus ID string */
 569        spi_dev_set_name(spi);
 570
 571        /* We need to make sure there's no other device with this
 572         * chipselect **BEFORE** we call setup(), else we'll trash
 573         * its configuration.  Lock against concurrent add() calls.
 574         */
 575        mutex_lock(&spi_add_lock);
 576
 577        status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
 578        if (status) {
 579                dev_err(dev, "chipselect %d already in use\n",
 580                                spi->chip_select);
 581                goto done;
 582        }
 583
 584        /* Descriptors take precedence */
 585        if (ctlr->cs_gpiods)
 586                spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
 587        else if (ctlr->cs_gpios)
 588                spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
 589
 590        /* Drivers may modify this initial i/o setup, but will
 591         * normally rely on the device being setup.  Devices
 592         * using SPI_CS_HIGH can't coexist well otherwise...
 593         */
 594        status = spi_setup(spi);
 595        if (status < 0) {
 596                dev_err(dev, "can't setup %s, status %d\n",
 597                                dev_name(&spi->dev), status);
 598                goto done;
 599        }
 600
 601        /* Device may be bound to an active driver when this returns */
 602        status = device_add(&spi->dev);
 603        if (status < 0)
 604                dev_err(dev, "can't add %s, status %d\n",
 605                                dev_name(&spi->dev), status);
 606        else
 607                dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 608
 609done:
 610        mutex_unlock(&spi_add_lock);
 611        return status;
 612}
 613EXPORT_SYMBOL_GPL(spi_add_device);
 614
 615/**
 616 * spi_new_device - instantiate one new SPI device
 617 * @ctlr: Controller to which device is connected
 618 * @chip: Describes the SPI device
 619 * Context: can sleep
 620 *
 621 * On typical mainboards, this is purely internal; and it's not needed
 622 * after board init creates the hard-wired devices.  Some development
 623 * platforms may not be able to use spi_register_board_info though, and
 624 * this is exported so that for example a USB or parport based adapter
 625 * driver could add devices (which it would learn about out-of-band).
 626 *
 627 * Return: the new device, or NULL.
 628 */
 629struct spi_device *spi_new_device(struct spi_controller *ctlr,
 630                                  struct spi_board_info *chip)
 631{
 632        struct spi_device       *proxy;
 633        int                     status;
 634
 635        /* NOTE:  caller did any chip->bus_num checks necessary.
 636         *
 637         * Also, unless we change the return value convention to use
 638         * error-or-pointer (not NULL-or-pointer), troubleshootability
 639         * suggests syslogged diagnostics are best here (ugh).
 640         */
 641
 642        proxy = spi_alloc_device(ctlr);
 643        if (!proxy)
 644                return NULL;
 645
 646        WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 647
 648        proxy->chip_select = chip->chip_select;
 649        proxy->max_speed_hz = chip->max_speed_hz;
 650        proxy->mode = chip->mode;
 651        proxy->irq = chip->irq;
 652        strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 653        proxy->dev.platform_data = (void *) chip->platform_data;
 654        proxy->controller_data = chip->controller_data;
 655        proxy->controller_state = NULL;
 656
 657        if (chip->properties) {
 658                status = device_add_properties(&proxy->dev, chip->properties);
 659                if (status) {
 660                        dev_err(&ctlr->dev,
 661                                "failed to add properties to '%s': %d\n",
 662                                chip->modalias, status);
 663                        goto err_dev_put;
 664                }
 665        }
 666
 667        status = spi_add_device(proxy);
 668        if (status < 0)
 669                goto err_remove_props;
 670
 671        return proxy;
 672
 673err_remove_props:
 674        if (chip->properties)
 675                device_remove_properties(&proxy->dev);
 676err_dev_put:
 677        spi_dev_put(proxy);
 678        return NULL;
 679}
 680EXPORT_SYMBOL_GPL(spi_new_device);
 681
 682/**
 683 * spi_unregister_device - unregister a single SPI device
 684 * @spi: spi_device to unregister
 685 *
 686 * Start making the passed SPI device vanish. Normally this would be handled
 687 * by spi_unregister_controller().
 688 */
 689void spi_unregister_device(struct spi_device *spi)
 690{
 691        if (!spi)
 692                return;
 693
 694        if (spi->dev.of_node) {
 695                of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
 696                of_node_put(spi->dev.of_node);
 697        }
 698        if (ACPI_COMPANION(&spi->dev))
 699                acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
 700        device_unregister(&spi->dev);
 701}
 702EXPORT_SYMBOL_GPL(spi_unregister_device);
 703
 704static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
 705                                              struct spi_board_info *bi)
 706{
 707        struct spi_device *dev;
 708
 709        if (ctlr->bus_num != bi->bus_num)
 710                return;
 711
 712        dev = spi_new_device(ctlr, bi);
 713        if (!dev)
 714                dev_err(ctlr->dev.parent, "can't create new device for %s\n",
 715                        bi->modalias);
 716}
 717
 718/**
 719 * spi_register_board_info - register SPI devices for a given board
 720 * @info: array of chip descriptors
 721 * @n: how many descriptors are provided
 722 * Context: can sleep
 723 *
 724 * Board-specific early init code calls this (probably during arch_initcall)
 725 * with segments of the SPI device table.  Any device nodes are created later,
 726 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 727 * this table of devices forever, so that reloading a controller driver will
 728 * not make Linux forget about these hard-wired devices.
 729 *
 730 * Other code can also call this, e.g. a particular add-on board might provide
 731 * SPI devices through its expansion connector, so code initializing that board
 732 * would naturally declare its SPI devices.
 733 *
 734 * The board info passed can safely be __initdata ... but be careful of
 735 * any embedded pointers (platform_data, etc), they're copied as-is.
 736 * Device properties are deep-copied though.
 737 *
 738 * Return: zero on success, else a negative error code.
 739 */
 740int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 741{
 742        struct boardinfo *bi;
 743        int i;
 744
 745        if (!n)
 746                return 0;
 747
 748        bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
 749        if (!bi)
 750                return -ENOMEM;
 751
 752        for (i = 0; i < n; i++, bi++, info++) {
 753                struct spi_controller *ctlr;
 754
 755                memcpy(&bi->board_info, info, sizeof(*info));
 756                if (info->properties) {
 757                        bi->board_info.properties =
 758                                        property_entries_dup(info->properties);
 759                        if (IS_ERR(bi->board_info.properties))
 760                                return PTR_ERR(bi->board_info.properties);
 761                }
 762
 763                mutex_lock(&board_lock);
 764                list_add_tail(&bi->list, &board_list);
 765                list_for_each_entry(ctlr, &spi_controller_list, list)
 766                        spi_match_controller_to_boardinfo(ctlr,
 767                                                          &bi->board_info);
 768                mutex_unlock(&board_lock);
 769        }
 770
 771        return 0;
 772}
 773
 774/*-------------------------------------------------------------------------*/
 775
 776static void spi_set_cs(struct spi_device *spi, bool enable)
 777{
 778        if (spi->mode & SPI_CS_HIGH)
 779                enable = !enable;
 780
 781        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
 782                /*
 783                 * Honour the SPI_NO_CS flag and invert the enable line, as
 784                 * active low is default for SPI. Execution paths that handle
 785                 * polarity inversion in gpiolib (such as device tree) will
 786                 * enforce active high using the SPI_CS_HIGH resulting in a
 787                 * double inversion through the code above.
 788                 */
 789                if (!(spi->mode & SPI_NO_CS)) {
 790                        if (spi->cs_gpiod)
 791                                gpiod_set_value_cansleep(spi->cs_gpiod,
 792                                                         !enable);
 793                        else
 794                                gpio_set_value_cansleep(spi->cs_gpio, !enable);
 795                }
 796                /* Some SPI masters need both GPIO CS & slave_select */
 797                if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
 798                    spi->controller->set_cs)
 799                        spi->controller->set_cs(spi, !enable);
 800        } else if (spi->controller->set_cs) {
 801                spi->controller->set_cs(spi, !enable);
 802        }
 803}
 804
 805#ifdef CONFIG_HAS_DMA
 806int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
 807                struct sg_table *sgt, void *buf, size_t len,
 808                enum dma_data_direction dir)
 809{
 810        const bool vmalloced_buf = is_vmalloc_addr(buf);
 811        unsigned int max_seg_size = dma_get_max_seg_size(dev);
 812#ifdef CONFIG_HIGHMEM
 813        const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
 814                                (unsigned long)buf < (PKMAP_BASE +
 815                                        (LAST_PKMAP * PAGE_SIZE)));
 816#else
 817        const bool kmap_buf = false;
 818#endif
 819        int desc_len;
 820        int sgs;
 821        struct page *vm_page;
 822        struct scatterlist *sg;
 823        void *sg_buf;
 824        size_t min;
 825        int i, ret;
 826
 827        if (vmalloced_buf || kmap_buf) {
 828                desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 829                sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 830        } else if (virt_addr_valid(buf)) {
 831                desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
 832                sgs = DIV_ROUND_UP(len, desc_len);
 833        } else {
 834                return -EINVAL;
 835        }
 836
 837        ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 838        if (ret != 0)
 839                return ret;
 840
 841        sg = &sgt->sgl[0];
 842        for (i = 0; i < sgs; i++) {
 843
 844                if (vmalloced_buf || kmap_buf) {
 845                        /*
 846                         * Next scatterlist entry size is the minimum between
 847                         * the desc_len and the remaining buffer length that
 848                         * fits in a page.
 849                         */
 850                        min = min_t(size_t, desc_len,
 851                                    min_t(size_t, len,
 852                                          PAGE_SIZE - offset_in_page(buf)));
 853                        if (vmalloced_buf)
 854                                vm_page = vmalloc_to_page(buf);
 855                        else
 856                                vm_page = kmap_to_page(buf);
 857                        if (!vm_page) {
 858                                sg_free_table(sgt);
 859                                return -ENOMEM;
 860                        }
 861                        sg_set_page(sg, vm_page,
 862                                    min, offset_in_page(buf));
 863                } else {
 864                        min = min_t(size_t, len, desc_len);
 865                        sg_buf = buf;
 866                        sg_set_buf(sg, sg_buf, min);
 867                }
 868
 869                buf += min;
 870                len -= min;
 871                sg = sg_next(sg);
 872        }
 873
 874        ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
 875        if (!ret)
 876                ret = -ENOMEM;
 877        if (ret < 0) {
 878                sg_free_table(sgt);
 879                return ret;
 880        }
 881
 882        sgt->nents = ret;
 883
 884        return 0;
 885}
 886
 887void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
 888                   struct sg_table *sgt, enum dma_data_direction dir)
 889{
 890        if (sgt->orig_nents) {
 891                dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
 892                sg_free_table(sgt);
 893        }
 894}
 895
 896static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 897{
 898        struct device *tx_dev, *rx_dev;
 899        struct spi_transfer *xfer;
 900        int ret;
 901
 902        if (!ctlr->can_dma)
 903                return 0;
 904
 905        if (ctlr->dma_tx)
 906                tx_dev = ctlr->dma_tx->device->dev;
 907        else
 908                tx_dev = ctlr->dev.parent;
 909
 910        if (ctlr->dma_rx)
 911                rx_dev = ctlr->dma_rx->device->dev;
 912        else
 913                rx_dev = ctlr->dev.parent;
 914
 915        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 916                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 917                        continue;
 918
 919                if (xfer->tx_buf != NULL) {
 920                        ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
 921                                          (void *)xfer->tx_buf, xfer->len,
 922                                          DMA_TO_DEVICE);
 923                        if (ret != 0)
 924                                return ret;
 925                }
 926
 927                if (xfer->rx_buf != NULL) {
 928                        ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
 929                                          xfer->rx_buf, xfer->len,
 930                                          DMA_FROM_DEVICE);
 931                        if (ret != 0) {
 932                                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
 933                                              DMA_TO_DEVICE);
 934                                return ret;
 935                        }
 936                }
 937        }
 938
 939        ctlr->cur_msg_mapped = true;
 940
 941        return 0;
 942}
 943
 944static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
 945{
 946        struct spi_transfer *xfer;
 947        struct device *tx_dev, *rx_dev;
 948
 949        if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
 950                return 0;
 951
 952        if (ctlr->dma_tx)
 953                tx_dev = ctlr->dma_tx->device->dev;
 954        else
 955                tx_dev = ctlr->dev.parent;
 956
 957        if (ctlr->dma_rx)
 958                rx_dev = ctlr->dma_rx->device->dev;
 959        else
 960                rx_dev = ctlr->dev.parent;
 961
 962        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 963                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 964                        continue;
 965
 966                spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 967                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 968        }
 969
 970        return 0;
 971}
 972#else /* !CONFIG_HAS_DMA */
 973static inline int __spi_map_msg(struct spi_controller *ctlr,
 974                                struct spi_message *msg)
 975{
 976        return 0;
 977}
 978
 979static inline int __spi_unmap_msg(struct spi_controller *ctlr,
 980                                  struct spi_message *msg)
 981{
 982        return 0;
 983}
 984#endif /* !CONFIG_HAS_DMA */
 985
 986static inline int spi_unmap_msg(struct spi_controller *ctlr,
 987                                struct spi_message *msg)
 988{
 989        struct spi_transfer *xfer;
 990
 991        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 992                /*
 993                 * Restore the original value of tx_buf or rx_buf if they are
 994                 * NULL.
 995                 */
 996                if (xfer->tx_buf == ctlr->dummy_tx)
 997                        xfer->tx_buf = NULL;
 998                if (xfer->rx_buf == ctlr->dummy_rx)
 999                        xfer->rx_buf = NULL;
1000        }
1001
1002        return __spi_unmap_msg(ctlr, msg);
1003}
1004
1005static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1006{
1007        struct spi_transfer *xfer;
1008        void *tmp;
1009        unsigned int max_tx, max_rx;
1010
1011        if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
1012                max_tx = 0;
1013                max_rx = 0;
1014
1015                list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1016                        if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1017                            !xfer->tx_buf)
1018                                max_tx = max(xfer->len, max_tx);
1019                        if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1020                            !xfer->rx_buf)
1021                                max_rx = max(xfer->len, max_rx);
1022                }
1023
1024                if (max_tx) {
1025                        tmp = krealloc(ctlr->dummy_tx, max_tx,
1026                                       GFP_KERNEL | GFP_DMA);
1027                        if (!tmp)
1028                                return -ENOMEM;
1029                        ctlr->dummy_tx = tmp;
1030                        memset(tmp, 0, max_tx);
1031                }
1032
1033                if (max_rx) {
1034                        tmp = krealloc(ctlr->dummy_rx, max_rx,
1035                                       GFP_KERNEL | GFP_DMA);
1036                        if (!tmp)
1037                                return -ENOMEM;
1038                        ctlr->dummy_rx = tmp;
1039                }
1040
1041                if (max_tx || max_rx) {
1042                        list_for_each_entry(xfer, &msg->transfers,
1043                                            transfer_list) {
1044                                if (!xfer->len)
1045                                        continue;
1046                                if (!xfer->tx_buf)
1047                                        xfer->tx_buf = ctlr->dummy_tx;
1048                                if (!xfer->rx_buf)
1049                                        xfer->rx_buf = ctlr->dummy_rx;
1050                        }
1051                }
1052        }
1053
1054        return __spi_map_msg(ctlr, msg);
1055}
1056
1057static int spi_transfer_wait(struct spi_controller *ctlr,
1058                             struct spi_message *msg,
1059                             struct spi_transfer *xfer)
1060{
1061        struct spi_statistics *statm = &ctlr->statistics;
1062        struct spi_statistics *stats = &msg->spi->statistics;
1063        unsigned long long ms = 1;
1064
1065        if (spi_controller_is_slave(ctlr)) {
1066                if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1067                        dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1068                        return -EINTR;
1069                }
1070        } else {
1071                ms = 8LL * 1000LL * xfer->len;
1072                do_div(ms, xfer->speed_hz);
1073                ms += ms + 200; /* some tolerance */
1074
1075                if (ms > UINT_MAX)
1076                        ms = UINT_MAX;
1077
1078                ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1079                                                 msecs_to_jiffies(ms));
1080
1081                if (ms == 0) {
1082                        SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1083                        SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1084                        dev_err(&msg->spi->dev,
1085                                "SPI transfer timed out\n");
1086                        return -ETIMEDOUT;
1087                }
1088        }
1089
1090        return 0;
1091}
1092
1093static void _spi_transfer_delay_ns(u32 ns)
1094{
1095        if (!ns)
1096                return;
1097        if (ns <= 1000) {
1098                ndelay(ns);
1099        } else {
1100                u32 us = DIV_ROUND_UP(ns, 1000);
1101
1102                if (us <= 10)
1103                        udelay(us);
1104                else
1105                        usleep_range(us, us + DIV_ROUND_UP(us, 10));
1106        }
1107}
1108
1109static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1110                                          struct spi_transfer *xfer)
1111{
1112        u32 delay = xfer->cs_change_delay;
1113        u32 unit = xfer->cs_change_delay_unit;
1114        u32 hz;
1115
1116        /* return early on "fast" mode - for everything but USECS */
1117        if (!delay && unit != SPI_DELAY_UNIT_USECS)
1118                return;
1119
1120        switch (unit) {
1121        case SPI_DELAY_UNIT_USECS:
1122                /* for compatibility use default of 10us */
1123                if (!delay)
1124                        delay = 10000;
1125                else
1126                        delay *= 1000;
1127                break;
1128        case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
1129                break;
1130        case SPI_DELAY_UNIT_SCK:
1131                /* if there is no effective speed know, then approximate
1132                 * by underestimating with half the requested hz
1133                 */
1134                hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1135                delay *= DIV_ROUND_UP(1000000000, hz);
1136                break;
1137        default:
1138                dev_err_once(&msg->spi->dev,
1139                             "Use of unsupported delay unit %i, using default of 10us\n",
1140                             xfer->cs_change_delay_unit);
1141                delay = 10000;
1142        }
1143        /* now sleep for the requested amount of time */
1144        _spi_transfer_delay_ns(delay);
1145}
1146
1147/*
1148 * spi_transfer_one_message - Default implementation of transfer_one_message()
1149 *
1150 * This is a standard implementation of transfer_one_message() for
1151 * drivers which implement a transfer_one() operation.  It provides
1152 * standard handling of delays and chip select management.
1153 */
1154static int spi_transfer_one_message(struct spi_controller *ctlr,
1155                                    struct spi_message *msg)
1156{
1157        struct spi_transfer *xfer;
1158        bool keep_cs = false;
1159        int ret = 0;
1160        struct spi_statistics *statm = &ctlr->statistics;
1161        struct spi_statistics *stats = &msg->spi->statistics;
1162
1163        spi_set_cs(msg->spi, true);
1164
1165        SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1166        SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1167
1168        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1169                trace_spi_transfer_start(msg, xfer);
1170
1171                spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1172                spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1173
1174                if (xfer->tx_buf || xfer->rx_buf) {
1175                        reinit_completion(&ctlr->xfer_completion);
1176
1177                        ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1178                        if (ret < 0) {
1179                                SPI_STATISTICS_INCREMENT_FIELD(statm,
1180                                                               errors);
1181                                SPI_STATISTICS_INCREMENT_FIELD(stats,
1182                                                               errors);
1183                                dev_err(&msg->spi->dev,
1184                                        "SPI transfer failed: %d\n", ret);
1185                                goto out;
1186                        }
1187
1188                        if (ret > 0) {
1189                                ret = spi_transfer_wait(ctlr, msg, xfer);
1190                                if (ret < 0)
1191                                        msg->status = ret;
1192                        }
1193                } else {
1194                        if (xfer->len)
1195                                dev_err(&msg->spi->dev,
1196                                        "Bufferless transfer has length %u\n",
1197                                        xfer->len);
1198                }
1199
1200                trace_spi_transfer_stop(msg, xfer);
1201
1202                if (msg->status != -EINPROGRESS)
1203                        goto out;
1204
1205                if (xfer->delay_usecs)
1206                        _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
1207
1208                if (xfer->cs_change) {
1209                        if (list_is_last(&xfer->transfer_list,
1210                                         &msg->transfers)) {
1211                                keep_cs = true;
1212                        } else {
1213                                spi_set_cs(msg->spi, false);
1214                                _spi_transfer_cs_change_delay(msg, xfer);
1215                                spi_set_cs(msg->spi, true);
1216                        }
1217                }
1218
1219                msg->actual_length += xfer->len;
1220        }
1221
1222out:
1223        if (ret != 0 || !keep_cs)
1224                spi_set_cs(msg->spi, false);
1225
1226        if (msg->status == -EINPROGRESS)
1227                msg->status = ret;
1228
1229        if (msg->status && ctlr->handle_err)
1230                ctlr->handle_err(ctlr, msg);
1231
1232        spi_res_release(ctlr, msg);
1233
1234        spi_finalize_current_message(ctlr);
1235
1236        return ret;
1237}
1238
1239/**
1240 * spi_finalize_current_transfer - report completion of a transfer
1241 * @ctlr: the controller reporting completion
1242 *
1243 * Called by SPI drivers using the core transfer_one_message()
1244 * implementation to notify it that the current interrupt driven
1245 * transfer has finished and the next one may be scheduled.
1246 */
1247void spi_finalize_current_transfer(struct spi_controller *ctlr)
1248{
1249        complete(&ctlr->xfer_completion);
1250}
1251EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1252
1253/**
1254 * __spi_pump_messages - function which processes spi message queue
1255 * @ctlr: controller to process queue for
1256 * @in_kthread: true if we are in the context of the message pump thread
1257 *
1258 * This function checks if there is any spi message in the queue that
1259 * needs processing and if so call out to the driver to initialize hardware
1260 * and transfer each message.
1261 *
1262 * Note that it is called both from the kthread itself and also from
1263 * inside spi_sync(); the queue extraction handling at the top of the
1264 * function should deal with this safely.
1265 */
1266static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1267{
1268        unsigned long flags;
1269        bool was_busy = false;
1270        int ret;
1271
1272        /* Lock queue */
1273        spin_lock_irqsave(&ctlr->queue_lock, flags);
1274
1275        /* Make sure we are not already running a message */
1276        if (ctlr->cur_msg) {
1277                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1278                return;
1279        }
1280
1281        /* If another context is idling the device then defer */
1282        if (ctlr->idling) {
1283                kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1284                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1285                return;
1286        }
1287
1288        /* Check if the queue is idle */
1289        if (list_empty(&ctlr->queue) || !ctlr->running) {
1290                if (!ctlr->busy) {
1291                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1292                        return;
1293                }
1294
1295                /* Only do teardown in the thread */
1296                if (!in_kthread) {
1297                        kthread_queue_work(&ctlr->kworker,
1298                                           &ctlr->pump_messages);
1299                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1300                        return;
1301                }
1302
1303                ctlr->busy = false;
1304                ctlr->idling = true;
1305                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1306
1307                kfree(ctlr->dummy_rx);
1308                ctlr->dummy_rx = NULL;
1309                kfree(ctlr->dummy_tx);
1310                ctlr->dummy_tx = NULL;
1311                if (ctlr->unprepare_transfer_hardware &&
1312                    ctlr->unprepare_transfer_hardware(ctlr))
1313                        dev_err(&ctlr->dev,
1314                                "failed to unprepare transfer hardware\n");
1315                if (ctlr->auto_runtime_pm) {
1316                        pm_runtime_mark_last_busy(ctlr->dev.parent);
1317                        pm_runtime_put_autosuspend(ctlr->dev.parent);
1318                }
1319                trace_spi_controller_idle(ctlr);
1320
1321                spin_lock_irqsave(&ctlr->queue_lock, flags);
1322                ctlr->idling = false;
1323                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1324                return;
1325        }
1326
1327        /* Extract head of queue */
1328        ctlr->cur_msg =
1329                list_first_entry(&ctlr->queue, struct spi_message, queue);
1330
1331        list_del_init(&ctlr->cur_msg->queue);
1332        if (ctlr->busy)
1333                was_busy = true;
1334        else
1335                ctlr->busy = true;
1336        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1337
1338        mutex_lock(&ctlr->io_mutex);
1339
1340        if (!was_busy && ctlr->auto_runtime_pm) {
1341                ret = pm_runtime_get_sync(ctlr->dev.parent);
1342                if (ret < 0) {
1343                        pm_runtime_put_noidle(ctlr->dev.parent);
1344                        dev_err(&ctlr->dev, "Failed to power device: %d\n",
1345                                ret);
1346                        mutex_unlock(&ctlr->io_mutex);
1347                        return;
1348                }
1349        }
1350
1351        if (!was_busy)
1352                trace_spi_controller_busy(ctlr);
1353
1354        if (!was_busy && ctlr->prepare_transfer_hardware) {
1355                ret = ctlr->prepare_transfer_hardware(ctlr);
1356                if (ret) {
1357                        dev_err(&ctlr->dev,
1358                                "failed to prepare transfer hardware: %d\n",
1359                                ret);
1360
1361                        if (ctlr->auto_runtime_pm)
1362                                pm_runtime_put(ctlr->dev.parent);
1363
1364                        ctlr->cur_msg->status = ret;
1365                        spi_finalize_current_message(ctlr);
1366
1367                        mutex_unlock(&ctlr->io_mutex);
1368                        return;
1369                }
1370        }
1371
1372        trace_spi_message_start(ctlr->cur_msg);
1373
1374        if (ctlr->prepare_message) {
1375                ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1376                if (ret) {
1377                        dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1378                                ret);
1379                        ctlr->cur_msg->status = ret;
1380                        spi_finalize_current_message(ctlr);
1381                        goto out;
1382                }
1383                ctlr->cur_msg_prepared = true;
1384        }
1385
1386        ret = spi_map_msg(ctlr, ctlr->cur_msg);
1387        if (ret) {
1388                ctlr->cur_msg->status = ret;
1389                spi_finalize_current_message(ctlr);
1390                goto out;
1391        }
1392
1393        ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1394        if (ret) {
1395                dev_err(&ctlr->dev,
1396                        "failed to transfer one message from queue\n");
1397                goto out;
1398        }
1399
1400out:
1401        mutex_unlock(&ctlr->io_mutex);
1402
1403        /* Prod the scheduler in case transfer_one() was busy waiting */
1404        if (!ret)
1405                cond_resched();
1406}
1407
1408/**
1409 * spi_pump_messages - kthread work function which processes spi message queue
1410 * @work: pointer to kthread work struct contained in the controller struct
1411 */
1412static void spi_pump_messages(struct kthread_work *work)
1413{
1414        struct spi_controller *ctlr =
1415                container_of(work, struct spi_controller, pump_messages);
1416
1417        __spi_pump_messages(ctlr, true);
1418}
1419
1420/**
1421 * spi_set_thread_rt - set the controller to pump at realtime priority
1422 * @ctlr: controller to boost priority of
1423 *
1424 * This can be called because the controller requested realtime priority
1425 * (by setting the ->rt value before calling spi_register_controller()) or
1426 * because a device on the bus said that its transfers needed realtime
1427 * priority.
1428 *
1429 * NOTE: at the moment if any device on a bus says it needs realtime then
1430 * the thread will be at realtime priority for all transfers on that
1431 * controller.  If this eventually becomes a problem we may see if we can
1432 * find a way to boost the priority only temporarily during relevant
1433 * transfers.
1434 */
1435static void spi_set_thread_rt(struct spi_controller *ctlr)
1436{
1437        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1438
1439        dev_info(&ctlr->dev,
1440                "will run message pump with realtime priority\n");
1441        sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
1442}
1443
1444static int spi_init_queue(struct spi_controller *ctlr)
1445{
1446        ctlr->running = false;
1447        ctlr->busy = false;
1448
1449        kthread_init_worker(&ctlr->kworker);
1450        ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1451                                         "%s", dev_name(&ctlr->dev));
1452        if (IS_ERR(ctlr->kworker_task)) {
1453                dev_err(&ctlr->dev, "failed to create message pump task\n");
1454                return PTR_ERR(ctlr->kworker_task);
1455        }
1456        kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1457
1458        /*
1459         * Controller config will indicate if this controller should run the
1460         * message pump with high (realtime) priority to reduce the transfer
1461         * latency on the bus by minimising the delay between a transfer
1462         * request and the scheduling of the message pump thread. Without this
1463         * setting the message pump thread will remain at default priority.
1464         */
1465        if (ctlr->rt)
1466                spi_set_thread_rt(ctlr);
1467
1468        return 0;
1469}
1470
1471/**
1472 * spi_get_next_queued_message() - called by driver to check for queued
1473 * messages
1474 * @ctlr: the controller to check for queued messages
1475 *
1476 * If there are more messages in the queue, the next message is returned from
1477 * this call.
1478 *
1479 * Return: the next message in the queue, else NULL if the queue is empty.
1480 */
1481struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1482{
1483        struct spi_message *next;
1484        unsigned long flags;
1485
1486        /* get a pointer to the next message, if any */
1487        spin_lock_irqsave(&ctlr->queue_lock, flags);
1488        next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1489                                        queue);
1490        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1491
1492        return next;
1493}
1494EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1495
1496/**
1497 * spi_finalize_current_message() - the current message is complete
1498 * @ctlr: the controller to return the message to
1499 *
1500 * Called by the driver to notify the core that the message in the front of the
1501 * queue is complete and can be removed from the queue.
1502 */
1503void spi_finalize_current_message(struct spi_controller *ctlr)
1504{
1505        struct spi_message *mesg;
1506        unsigned long flags;
1507        int ret;
1508
1509        spin_lock_irqsave(&ctlr->queue_lock, flags);
1510        mesg = ctlr->cur_msg;
1511        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1512
1513        spi_unmap_msg(ctlr, mesg);
1514
1515        if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1516                ret = ctlr->unprepare_message(ctlr, mesg);
1517                if (ret) {
1518                        dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1519                                ret);
1520                }
1521        }
1522
1523        spin_lock_irqsave(&ctlr->queue_lock, flags);
1524        ctlr->cur_msg = NULL;
1525        ctlr->cur_msg_prepared = false;
1526        kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1527        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1528
1529        trace_spi_message_done(mesg);
1530
1531        mesg->state = NULL;
1532        if (mesg->complete)
1533                mesg->complete(mesg->context);
1534}
1535EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1536
1537static int spi_start_queue(struct spi_controller *ctlr)
1538{
1539        unsigned long flags;
1540
1541        spin_lock_irqsave(&ctlr->queue_lock, flags);
1542
1543        if (ctlr->running || ctlr->busy) {
1544                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1545                return -EBUSY;
1546        }
1547
1548        ctlr->running = true;
1549        ctlr->cur_msg = NULL;
1550        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1551
1552        kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1553
1554        return 0;
1555}
1556
1557static int spi_stop_queue(struct spi_controller *ctlr)
1558{
1559        unsigned long flags;
1560        unsigned limit = 500;
1561        int ret = 0;
1562
1563        spin_lock_irqsave(&ctlr->queue_lock, flags);
1564
1565        /*
1566         * This is a bit lame, but is optimized for the common execution path.
1567         * A wait_queue on the ctlr->busy could be used, but then the common
1568         * execution path (pump_messages) would be required to call wake_up or
1569         * friends on every SPI message. Do this instead.
1570         */
1571        while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1572                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1573                usleep_range(10000, 11000);
1574                spin_lock_irqsave(&ctlr->queue_lock, flags);
1575        }
1576
1577        if (!list_empty(&ctlr->queue) || ctlr->busy)
1578                ret = -EBUSY;
1579        else
1580                ctlr->running = false;
1581
1582        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1583
1584        if (ret) {
1585                dev_warn(&ctlr->dev, "could not stop message queue\n");
1586                return ret;
1587        }
1588        return ret;
1589}
1590
1591static int spi_destroy_queue(struct spi_controller *ctlr)
1592{
1593        int ret;
1594
1595        ret = spi_stop_queue(ctlr);
1596
1597        /*
1598         * kthread_flush_worker will block until all work is done.
1599         * If the reason that stop_queue timed out is that the work will never
1600         * finish, then it does no good to call flush/stop thread, so
1601         * return anyway.
1602         */
1603        if (ret) {
1604                dev_err(&ctlr->dev, "problem destroying queue\n");
1605                return ret;
1606        }
1607
1608        kthread_flush_worker(&ctlr->kworker);
1609        kthread_stop(ctlr->kworker_task);
1610
1611        return 0;
1612}
1613
1614static int __spi_queued_transfer(struct spi_device *spi,
1615                                 struct spi_message *msg,
1616                                 bool need_pump)
1617{
1618        struct spi_controller *ctlr = spi->controller;
1619        unsigned long flags;
1620
1621        spin_lock_irqsave(&ctlr->queue_lock, flags);
1622
1623        if (!ctlr->running) {
1624                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1625                return -ESHUTDOWN;
1626        }
1627        msg->actual_length = 0;
1628        msg->status = -EINPROGRESS;
1629
1630        list_add_tail(&msg->queue, &ctlr->queue);
1631        if (!ctlr->busy && need_pump)
1632                kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1633
1634        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1635        return 0;
1636}
1637
1638/**
1639 * spi_queued_transfer - transfer function for queued transfers
1640 * @spi: spi device which is requesting transfer
1641 * @msg: spi message which is to handled is queued to driver queue
1642 *
1643 * Return: zero on success, else a negative error code.
1644 */
1645static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1646{
1647        return __spi_queued_transfer(spi, msg, true);
1648}
1649
1650static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1651{
1652        int ret;
1653
1654        ctlr->transfer = spi_queued_transfer;
1655        if (!ctlr->transfer_one_message)
1656                ctlr->transfer_one_message = spi_transfer_one_message;
1657
1658        /* Initialize and start queue */
1659        ret = spi_init_queue(ctlr);
1660        if (ret) {
1661                dev_err(&ctlr->dev, "problem initializing queue\n");
1662                goto err_init_queue;
1663        }
1664        ctlr->queued = true;
1665        ret = spi_start_queue(ctlr);
1666        if (ret) {
1667                dev_err(&ctlr->dev, "problem starting queue\n");
1668                goto err_start_queue;
1669        }
1670
1671        return 0;
1672
1673err_start_queue:
1674        spi_destroy_queue(ctlr);
1675err_init_queue:
1676        return ret;
1677}
1678
1679/**
1680 * spi_flush_queue - Send all pending messages in the queue from the callers'
1681 *                   context
1682 * @ctlr: controller to process queue for
1683 *
1684 * This should be used when one wants to ensure all pending messages have been
1685 * sent before doing something. Is used by the spi-mem code to make sure SPI
1686 * memory operations do not preempt regular SPI transfers that have been queued
1687 * before the spi-mem operation.
1688 */
1689void spi_flush_queue(struct spi_controller *ctlr)
1690{
1691        if (ctlr->transfer == spi_queued_transfer)
1692                __spi_pump_messages(ctlr, false);
1693}
1694
1695/*-------------------------------------------------------------------------*/
1696
1697#if defined(CONFIG_OF)
1698static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1699                           struct device_node *nc)
1700{
1701        u32 value;
1702        int rc;
1703
1704        /* Mode (clock phase/polarity/etc.) */
1705        if (of_property_read_bool(nc, "spi-cpha"))
1706                spi->mode |= SPI_CPHA;
1707        if (of_property_read_bool(nc, "spi-cpol"))
1708                spi->mode |= SPI_CPOL;
1709        if (of_property_read_bool(nc, "spi-3wire"))
1710                spi->mode |= SPI_3WIRE;
1711        if (of_property_read_bool(nc, "spi-lsb-first"))
1712                spi->mode |= SPI_LSB_FIRST;
1713
1714        /*
1715         * For descriptors associated with the device, polarity inversion is
1716         * handled in the gpiolib, so all chip selects are "active high" in
1717         * the logical sense, the gpiolib will invert the line if need be.
1718         */
1719        if (ctlr->use_gpio_descriptors)
1720                spi->mode |= SPI_CS_HIGH;
1721        else if (of_property_read_bool(nc, "spi-cs-high"))
1722                spi->mode |= SPI_CS_HIGH;
1723
1724        /* Device DUAL/QUAD mode */
1725        if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1726                switch (value) {
1727                case 1:
1728                        break;
1729                case 2:
1730                        spi->mode |= SPI_TX_DUAL;
1731                        break;
1732                case 4:
1733                        spi->mode |= SPI_TX_QUAD;
1734                        break;
1735                case 8:
1736                        spi->mode |= SPI_TX_OCTAL;
1737                        break;
1738                default:
1739                        dev_warn(&ctlr->dev,
1740                                "spi-tx-bus-width %d not supported\n",
1741                                value);
1742                        break;
1743                }
1744        }
1745
1746        if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1747                switch (value) {
1748                case 1:
1749                        break;
1750                case 2:
1751                        spi->mode |= SPI_RX_DUAL;
1752                        break;
1753                case 4:
1754                        spi->mode |= SPI_RX_QUAD;
1755                        break;
1756                case 8:
1757                        spi->mode |= SPI_RX_OCTAL;
1758                        break;
1759                default:
1760                        dev_warn(&ctlr->dev,
1761                                "spi-rx-bus-width %d not supported\n",
1762                                value);
1763                        break;
1764                }
1765        }
1766
1767        if (spi_controller_is_slave(ctlr)) {
1768                if (!of_node_name_eq(nc, "slave")) {
1769                        dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1770                                nc);
1771                        return -EINVAL;
1772                }
1773                return 0;
1774        }
1775
1776        /* Device address */
1777        rc = of_property_read_u32(nc, "reg", &value);
1778        if (rc) {
1779                dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1780                        nc, rc);
1781                return rc;
1782        }
1783        spi->chip_select = value;
1784
1785        /* Device speed */
1786        rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1787        if (rc) {
1788                dev_err(&ctlr->dev,
1789                        "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1790                return rc;
1791        }
1792        spi->max_speed_hz = value;
1793
1794        return 0;
1795}
1796
1797static struct spi_device *
1798of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1799{
1800        struct spi_device *spi;
1801        int rc;
1802
1803        /* Alloc an spi_device */
1804        spi = spi_alloc_device(ctlr);
1805        if (!spi) {
1806                dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1807                rc = -ENOMEM;
1808                goto err_out;
1809        }
1810
1811        /* Select device driver */
1812        rc = of_modalias_node(nc, spi->modalias,
1813                                sizeof(spi->modalias));
1814        if (rc < 0) {
1815                dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1816                goto err_out;
1817        }
1818
1819        rc = of_spi_parse_dt(ctlr, spi, nc);
1820        if (rc)
1821                goto err_out;
1822
1823        /* Store a pointer to the node in the device structure */
1824        of_node_get(nc);
1825        spi->dev.of_node = nc;
1826
1827        /* Register the new device */
1828        rc = spi_add_device(spi);
1829        if (rc) {
1830                dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1831                goto err_of_node_put;
1832        }
1833
1834        return spi;
1835
1836err_of_node_put:
1837        of_node_put(nc);
1838err_out:
1839        spi_dev_put(spi);
1840        return ERR_PTR(rc);
1841}
1842
1843/**
1844 * of_register_spi_devices() - Register child devices onto the SPI bus
1845 * @ctlr:       Pointer to spi_controller device
1846 *
1847 * Registers an spi_device for each child node of controller node which
1848 * represents a valid SPI slave.
1849 */
1850static void of_register_spi_devices(struct spi_controller *ctlr)
1851{
1852        struct spi_device *spi;
1853        struct device_node *nc;
1854
1855        if (!ctlr->dev.of_node)
1856                return;
1857
1858        for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1859                if (of_node_test_and_set_flag(nc, OF_POPULATED))
1860                        continue;
1861                spi = of_register_spi_device(ctlr, nc);
1862                if (IS_ERR(spi)) {
1863                        dev_warn(&ctlr->dev,
1864                                 "Failed to create SPI device for %pOF\n", nc);
1865                        of_node_clear_flag(nc, OF_POPULATED);
1866                }
1867        }
1868}
1869#else
1870static void of_register_spi_devices(struct spi_controller *ctlr) { }
1871#endif
1872
1873#ifdef CONFIG_ACPI
1874struct acpi_spi_lookup {
1875        struct spi_controller   *ctlr;
1876        u32                     max_speed_hz;
1877        u32                     mode;
1878        int                     irq;
1879        u8                      bits_per_word;
1880        u8                      chip_select;
1881};
1882
1883static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
1884                                            struct acpi_spi_lookup *lookup)
1885{
1886        const union acpi_object *obj;
1887
1888        if (!x86_apple_machine)
1889                return;
1890
1891        if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1892            && obj->buffer.length >= 4)
1893                lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1894
1895        if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1896            && obj->buffer.length == 8)
1897                lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
1898
1899        if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1900            && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1901                lookup->mode |= SPI_LSB_FIRST;
1902
1903        if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1904            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1905                lookup->mode |= SPI_CPOL;
1906
1907        if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1908            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1909                lookup->mode |= SPI_CPHA;
1910}
1911
1912static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1913{
1914        struct acpi_spi_lookup *lookup = data;
1915        struct spi_controller *ctlr = lookup->ctlr;
1916
1917        if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1918                struct acpi_resource_spi_serialbus *sb;
1919                acpi_handle parent_handle;
1920                acpi_status status;
1921
1922                sb = &ares->data.spi_serial_bus;
1923                if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1924
1925                        status = acpi_get_handle(NULL,
1926                                                 sb->resource_source.string_ptr,
1927                                                 &parent_handle);
1928
1929                        if (ACPI_FAILURE(status) ||
1930                            ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
1931                                return -ENODEV;
1932
1933                        /*
1934                         * ACPI DeviceSelection numbering is handled by the
1935                         * host controller driver in Windows and can vary
1936                         * from driver to driver. In Linux we always expect
1937                         * 0 .. max - 1 so we need to ask the driver to
1938                         * translate between the two schemes.
1939                         */
1940                        if (ctlr->fw_translate_cs) {
1941                                int cs = ctlr->fw_translate_cs(ctlr,
1942                                                sb->device_selection);
1943                                if (cs < 0)
1944                                        return cs;
1945                                lookup->chip_select = cs;
1946                        } else {
1947                                lookup->chip_select = sb->device_selection;
1948                        }
1949
1950                        lookup->max_speed_hz = sb->connection_speed;
1951
1952                        if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1953                                lookup->mode |= SPI_CPHA;
1954                        if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1955                                lookup->mode |= SPI_CPOL;
1956                        if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1957                                lookup->mode |= SPI_CS_HIGH;
1958                }
1959        } else if (lookup->irq < 0) {
1960                struct resource r;
1961
1962                if (acpi_dev_resource_interrupt(ares, 0, &r))
1963                        lookup->irq = r.start;
1964        }
1965
1966        /* Always tell the ACPI core to skip this resource */
1967        return 1;
1968}
1969
1970static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1971                                            struct acpi_device *adev)
1972{
1973        acpi_handle parent_handle = NULL;
1974        struct list_head resource_list;
1975        struct acpi_spi_lookup lookup = {};
1976        struct spi_device *spi;
1977        int ret;
1978
1979        if (acpi_bus_get_status(adev) || !adev->status.present ||
1980            acpi_device_enumerated(adev))
1981                return AE_OK;
1982
1983        lookup.ctlr             = ctlr;
1984        lookup.irq              = -1;
1985
1986        INIT_LIST_HEAD(&resource_list);
1987        ret = acpi_dev_get_resources(adev, &resource_list,
1988                                     acpi_spi_add_resource, &lookup);
1989        acpi_dev_free_resource_list(&resource_list);
1990
1991        if (ret < 0)
1992                /* found SPI in _CRS but it points to another controller */
1993                return AE_OK;
1994
1995        if (!lookup.max_speed_hz &&
1996            !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
1997            ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
1998                /* Apple does not use _CRS but nested devices for SPI slaves */
1999                acpi_spi_parse_apple_properties(adev, &lookup);
2000        }
2001
2002        if (!lookup.max_speed_hz)
2003                return AE_OK;
2004
2005        spi = spi_alloc_device(ctlr);
2006        if (!spi) {
2007                dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2008                        dev_name(&adev->dev));
2009                return AE_NO_MEMORY;
2010        }
2011
2012        ACPI_COMPANION_SET(&spi->dev, adev);
2013        spi->max_speed_hz       = lookup.max_speed_hz;
2014        spi->mode               = lookup.mode;
2015        spi->irq                = lookup.irq;
2016        spi->bits_per_word      = lookup.bits_per_word;
2017        spi->chip_select        = lookup.chip_select;
2018
2019        acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2020                          sizeof(spi->modalias));
2021
2022        if (spi->irq < 0)
2023                spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2024
2025        acpi_device_set_enumerated(adev);
2026
2027        adev->power.flags.ignore_parent = true;
2028        if (spi_add_device(spi)) {
2029                adev->power.flags.ignore_parent = false;
2030                dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2031                        dev_name(&adev->dev));
2032                spi_dev_put(spi);
2033        }
2034
2035        return AE_OK;
2036}
2037
2038static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2039                                       void *data, void **return_value)
2040{
2041        struct spi_controller *ctlr = data;
2042        struct acpi_device *adev;
2043
2044        if (acpi_bus_get_device(handle, &adev))
2045                return AE_OK;
2046
2047        return acpi_register_spi_device(ctlr, adev);
2048}
2049
2050#define SPI_ACPI_ENUMERATE_MAX_DEPTH            32
2051
2052static void acpi_register_spi_devices(struct spi_controller *ctlr)
2053{
2054        acpi_status status;
2055        acpi_handle handle;
2056
2057        handle = ACPI_HANDLE(ctlr->dev.parent);
2058        if (!handle)
2059                return;
2060
2061        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2062                                     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2063                                     acpi_spi_add_device, NULL, ctlr, NULL);
2064        if (ACPI_FAILURE(status))
2065                dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2066}
2067#else
2068static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2069#endif /* CONFIG_ACPI */
2070
2071static void spi_controller_release(struct device *dev)
2072{
2073        struct spi_controller *ctlr;
2074
2075        ctlr = container_of(dev, struct spi_controller, dev);
2076        kfree(ctlr);
2077}
2078
2079static struct class spi_master_class = {
2080        .name           = "spi_master",
2081        .owner          = THIS_MODULE,
2082        .dev_release    = spi_controller_release,
2083        .dev_groups     = spi_master_groups,
2084};
2085
2086#ifdef CONFIG_SPI_SLAVE
2087/**
2088 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2089 *                   controller
2090 * @spi: device used for the current transfer
2091 */
2092int spi_slave_abort(struct spi_device *spi)
2093{
2094        struct spi_controller *ctlr = spi->controller;
2095
2096        if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2097                return ctlr->slave_abort(ctlr);
2098
2099        return -ENOTSUPP;
2100}
2101EXPORT_SYMBOL_GPL(spi_slave_abort);
2102
2103static int match_true(struct device *dev, void *data)
2104{
2105        return 1;
2106}
2107
2108static ssize_t spi_slave_show(struct device *dev,
2109                              struct device_attribute *attr, char *buf)
2110{
2111        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2112                                                   dev);
2113        struct device *child;
2114
2115        child = device_find_child(&ctlr->dev, NULL, match_true);
2116        return sprintf(buf, "%s\n",
2117                       child ? to_spi_device(child)->modalias : NULL);
2118}
2119
2120static ssize_t spi_slave_store(struct device *dev,
2121                               struct device_attribute *attr, const char *buf,
2122                               size_t count)
2123{
2124        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2125                                                   dev);
2126        struct spi_device *spi;
2127        struct device *child;
2128        char name[32];
2129        int rc;
2130
2131        rc = sscanf(buf, "%31s", name);
2132        if (rc != 1 || !name[0])
2133                return -EINVAL;
2134
2135        child = device_find_child(&ctlr->dev, NULL, match_true);
2136        if (child) {
2137                /* Remove registered slave */
2138                device_unregister(child);
2139                put_device(child);
2140        }
2141
2142        if (strcmp(name, "(null)")) {
2143                /* Register new slave */
2144                spi = spi_alloc_device(ctlr);
2145                if (!spi)
2146                        return -ENOMEM;
2147
2148                strlcpy(spi->modalias, name, sizeof(spi->modalias));
2149
2150                rc = spi_add_device(spi);
2151                if (rc) {
2152                        spi_dev_put(spi);
2153                        return rc;
2154                }
2155        }
2156
2157        return count;
2158}
2159
2160static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
2161
2162static struct attribute *spi_slave_attrs[] = {
2163        &dev_attr_slave.attr,
2164        NULL,
2165};
2166
2167static const struct attribute_group spi_slave_group = {
2168        .attrs = spi_slave_attrs,
2169};
2170
2171static const struct attribute_group *spi_slave_groups[] = {
2172        &spi_controller_statistics_group,
2173        &spi_slave_group,
2174        NULL,
2175};
2176
2177static struct class spi_slave_class = {
2178        .name           = "spi_slave",
2179        .owner          = THIS_MODULE,
2180        .dev_release    = spi_controller_release,
2181        .dev_groups     = spi_slave_groups,
2182};
2183#else
2184extern struct class spi_slave_class;    /* dummy */
2185#endif
2186
2187/**
2188 * __spi_alloc_controller - allocate an SPI master or slave controller
2189 * @dev: the controller, possibly using the platform_bus
2190 * @size: how much zeroed driver-private data to allocate; the pointer to this
2191 *      memory is in the driver_data field of the returned device,
2192 *      accessible with spi_controller_get_devdata().
2193 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2194 *      slave (true) controller
2195 * Context: can sleep
2196 *
2197 * This call is used only by SPI controller drivers, which are the
2198 * only ones directly touching chip registers.  It's how they allocate
2199 * an spi_controller structure, prior to calling spi_register_controller().
2200 *
2201 * This must be called from context that can sleep.
2202 *
2203 * The caller is responsible for assigning the bus number and initializing the
2204 * controller's methods before calling spi_register_controller(); and (after
2205 * errors adding the device) calling spi_controller_put() to prevent a memory
2206 * leak.
2207 *
2208 * Return: the SPI controller structure on success, else NULL.
2209 */
2210struct spi_controller *__spi_alloc_controller(struct device *dev,
2211                                              unsigned int size, bool slave)
2212{
2213        struct spi_controller   *ctlr;
2214
2215        if (!dev)
2216                return NULL;
2217
2218        ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2219        if (!ctlr)
2220                return NULL;
2221
2222        device_initialize(&ctlr->dev);
2223        ctlr->bus_num = -1;
2224        ctlr->num_chipselect = 1;
2225        ctlr->slave = slave;
2226        if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2227                ctlr->dev.class = &spi_slave_class;
2228        else
2229                ctlr->dev.class = &spi_master_class;
2230        ctlr->dev.parent = dev;
2231        pm_suspend_ignore_children(&ctlr->dev, true);
2232        spi_controller_set_devdata(ctlr, &ctlr[1]);
2233
2234        return ctlr;
2235}
2236EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2237
2238#ifdef CONFIG_OF
2239static int of_spi_register_master(struct spi_controller *ctlr)
2240{
2241        int nb, i, *cs;
2242        struct device_node *np = ctlr->dev.of_node;
2243
2244        if (!np)
2245                return 0;
2246
2247        nb = of_gpio_named_count(np, "cs-gpios");
2248        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2249
2250        /* Return error only for an incorrectly formed cs-gpios property */
2251        if (nb == 0 || nb == -ENOENT)
2252                return 0;
2253        else if (nb < 0)
2254                return nb;
2255
2256        cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2257                          GFP_KERNEL);
2258        ctlr->cs_gpios = cs;
2259
2260        if (!ctlr->cs_gpios)
2261                return -ENOMEM;
2262
2263        for (i = 0; i < ctlr->num_chipselect; i++)
2264                cs[i] = -ENOENT;
2265
2266        for (i = 0; i < nb; i++)
2267                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2268
2269        return 0;
2270}
2271#else
2272static int of_spi_register_master(struct spi_controller *ctlr)
2273{
2274        return 0;
2275}
2276#endif
2277
2278/**
2279 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2280 * @ctlr: The SPI master to grab GPIO descriptors for
2281 */
2282static int spi_get_gpio_descs(struct spi_controller *ctlr)
2283{
2284        int nb, i;
2285        struct gpio_desc **cs;
2286        struct device *dev = &ctlr->dev;
2287
2288        nb = gpiod_count(dev, "cs");
2289        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2290
2291        /* No GPIOs at all is fine, else return the error */
2292        if (nb == 0 || nb == -ENOENT)
2293                return 0;
2294        else if (nb < 0)
2295                return nb;
2296
2297        cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2298                          GFP_KERNEL);
2299        if (!cs)
2300                return -ENOMEM;
2301        ctlr->cs_gpiods = cs;
2302
2303        for (i = 0; i < nb; i++) {
2304                /*
2305                 * Most chipselects are active low, the inverted
2306                 * semantics are handled by special quirks in gpiolib,
2307                 * so initializing them GPIOD_OUT_LOW here means
2308                 * "unasserted", in most cases this will drive the physical
2309                 * line high.
2310                 */
2311                cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2312                                                      GPIOD_OUT_LOW);
2313                if (IS_ERR(cs[i]))
2314                        return PTR_ERR(cs[i]);
2315
2316                if (cs[i]) {
2317                        /*
2318                         * If we find a CS GPIO, name it after the device and
2319                         * chip select line.
2320                         */
2321                        char *gpioname;
2322
2323                        gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2324                                                  dev_name(dev), i);
2325                        if (!gpioname)
2326                                return -ENOMEM;
2327                        gpiod_set_consumer_name(cs[i], gpioname);
2328                }
2329        }
2330
2331        return 0;
2332}
2333
2334static int spi_controller_check_ops(struct spi_controller *ctlr)
2335{
2336        /*
2337         * The controller may implement only the high-level SPI-memory like
2338         * operations if it does not support regular SPI transfers, and this is
2339         * valid use case.
2340         * If ->mem_ops is NULL, we request that at least one of the
2341         * ->transfer_xxx() method be implemented.
2342         */
2343        if (ctlr->mem_ops) {
2344                if (!ctlr->mem_ops->exec_op)
2345                        return -EINVAL;
2346        } else if (!ctlr->transfer && !ctlr->transfer_one &&
2347                   !ctlr->transfer_one_message) {
2348                return -EINVAL;
2349        }
2350
2351        return 0;
2352}
2353
2354/**
2355 * spi_register_controller - register SPI master or slave controller
2356 * @ctlr: initialized master, originally from spi_alloc_master() or
2357 *      spi_alloc_slave()
2358 * Context: can sleep
2359 *
2360 * SPI controllers connect to their drivers using some non-SPI bus,
2361 * such as the platform bus.  The final stage of probe() in that code
2362 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2363 *
2364 * SPI controllers use board specific (often SOC specific) bus numbers,
2365 * and board-specific addressing for SPI devices combines those numbers
2366 * with chip select numbers.  Since SPI does not directly support dynamic
2367 * device identification, boards need configuration tables telling which
2368 * chip is at which address.
2369 *
2370 * This must be called from context that can sleep.  It returns zero on
2371 * success, else a negative error code (dropping the controller's refcount).
2372 * After a successful return, the caller is responsible for calling
2373 * spi_unregister_controller().
2374 *
2375 * Return: zero on success, else a negative error code.
2376 */
2377int spi_register_controller(struct spi_controller *ctlr)
2378{
2379        struct device           *dev = ctlr->dev.parent;
2380        struct boardinfo        *bi;
2381        int                     status;
2382        int                     id, first_dynamic;
2383
2384        if (!dev)
2385                return -ENODEV;
2386
2387        /*
2388         * Make sure all necessary hooks are implemented before registering
2389         * the SPI controller.
2390         */
2391        status = spi_controller_check_ops(ctlr);
2392        if (status)
2393                return status;
2394
2395        if (ctlr->bus_num >= 0) {
2396                /* devices with a fixed bus num must check-in with the num */
2397                mutex_lock(&board_lock);
2398                id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2399                        ctlr->bus_num + 1, GFP_KERNEL);
2400                mutex_unlock(&board_lock);
2401                if (WARN(id < 0, "couldn't get idr"))
2402                        return id == -ENOSPC ? -EBUSY : id;
2403                ctlr->bus_num = id;
2404        } else if (ctlr->dev.of_node) {
2405                /* allocate dynamic bus number using Linux idr */
2406                id = of_alias_get_id(ctlr->dev.of_node, "spi");
2407                if (id >= 0) {
2408                        ctlr->bus_num = id;
2409                        mutex_lock(&board_lock);
2410                        id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2411                                       ctlr->bus_num + 1, GFP_KERNEL);
2412                        mutex_unlock(&board_lock);
2413                        if (WARN(id < 0, "couldn't get idr"))
2414                                return id == -ENOSPC ? -EBUSY : id;
2415                }
2416        }
2417        if (ctlr->bus_num < 0) {
2418                first_dynamic = of_alias_get_highest_id("spi");
2419                if (first_dynamic < 0)
2420                        first_dynamic = 0;
2421                else
2422                        first_dynamic++;
2423
2424                mutex_lock(&board_lock);
2425                id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2426                               0, GFP_KERNEL);
2427                mutex_unlock(&board_lock);
2428                if (WARN(id < 0, "couldn't get idr"))
2429                        return id;
2430                ctlr->bus_num = id;
2431        }
2432        INIT_LIST_HEAD(&ctlr->queue);
2433        spin_lock_init(&ctlr->queue_lock);
2434        spin_lock_init(&ctlr->bus_lock_spinlock);
2435        mutex_init(&ctlr->bus_lock_mutex);
2436        mutex_init(&ctlr->io_mutex);
2437        ctlr->bus_lock_flag = 0;
2438        init_completion(&ctlr->xfer_completion);
2439        if (!ctlr->max_dma_len)
2440                ctlr->max_dma_len = INT_MAX;
2441
2442        /* register the device, then userspace will see it.
2443         * registration fails if the bus ID is in use.
2444         */
2445        dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2446
2447        if (!spi_controller_is_slave(ctlr)) {
2448                if (ctlr->use_gpio_descriptors) {
2449                        status = spi_get_gpio_descs(ctlr);
2450                        if (status)
2451                                return status;
2452                        /*
2453                         * A controller using GPIO descriptors always
2454                         * supports SPI_CS_HIGH if need be.
2455                         */
2456                        ctlr->mode_bits |= SPI_CS_HIGH;
2457                } else {
2458                        /* Legacy code path for GPIOs from DT */
2459                        status = of_spi_register_master(ctlr);
2460                        if (status)
2461                                return status;
2462                }
2463        }
2464
2465        /*
2466         * Even if it's just one always-selected device, there must
2467         * be at least one chipselect.
2468         */
2469        if (!ctlr->num_chipselect)
2470                return -EINVAL;
2471
2472        status = device_add(&ctlr->dev);
2473        if (status < 0) {
2474                /* free bus id */
2475                mutex_lock(&board_lock);
2476                idr_remove(&spi_master_idr, ctlr->bus_num);
2477                mutex_unlock(&board_lock);
2478                goto done;
2479        }
2480        dev_dbg(dev, "registered %s %s\n",
2481                        spi_controller_is_slave(ctlr) ? "slave" : "master",
2482                        dev_name(&ctlr->dev));
2483
2484        /*
2485         * If we're using a queued driver, start the queue. Note that we don't
2486         * need the queueing logic if the driver is only supporting high-level
2487         * memory operations.
2488         */
2489        if (ctlr->transfer) {
2490                dev_info(dev, "controller is unqueued, this is deprecated\n");
2491        } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2492                status = spi_controller_initialize_queue(ctlr);
2493                if (status) {
2494                        device_del(&ctlr->dev);
2495                        /* free bus id */
2496                        mutex_lock(&board_lock);
2497                        idr_remove(&spi_master_idr, ctlr->bus_num);
2498                        mutex_unlock(&board_lock);
2499                        goto done;
2500                }
2501        }
2502        /* add statistics */
2503        spin_lock_init(&ctlr->statistics.lock);
2504
2505        mutex_lock(&board_lock);
2506        list_add_tail(&ctlr->list, &spi_controller_list);
2507        list_for_each_entry(bi, &board_list, list)
2508                spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2509        mutex_unlock(&board_lock);
2510
2511        /* Register devices from the device tree and ACPI */
2512        of_register_spi_devices(ctlr);
2513        acpi_register_spi_devices(ctlr);
2514done:
2515        return status;
2516}
2517EXPORT_SYMBOL_GPL(spi_register_controller);
2518
2519static void devm_spi_unregister(struct device *dev, void *res)
2520{
2521        spi_unregister_controller(*(struct spi_controller **)res);
2522}
2523
2524/**
2525 * devm_spi_register_controller - register managed SPI master or slave
2526 *      controller
2527 * @dev:    device managing SPI controller
2528 * @ctlr: initialized controller, originally from spi_alloc_master() or
2529 *      spi_alloc_slave()
2530 * Context: can sleep
2531 *
2532 * Register a SPI device as with spi_register_controller() which will
2533 * automatically be unregistered and freed.
2534 *
2535 * Return: zero on success, else a negative error code.
2536 */
2537int devm_spi_register_controller(struct device *dev,
2538                                 struct spi_controller *ctlr)
2539{
2540        struct spi_controller **ptr;
2541        int ret;
2542
2543        ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2544        if (!ptr)
2545                return -ENOMEM;
2546
2547        ret = spi_register_controller(ctlr);
2548        if (!ret) {
2549                *ptr = ctlr;
2550                devres_add(dev, ptr);
2551        } else {
2552                devres_free(ptr);
2553        }
2554
2555        return ret;
2556}
2557EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2558
2559static int __unregister(struct device *dev, void *null)
2560{
2561        spi_unregister_device(to_spi_device(dev));
2562        return 0;
2563}
2564
2565/**
2566 * spi_unregister_controller - unregister SPI master or slave controller
2567 * @ctlr: the controller being unregistered
2568 * Context: can sleep
2569 *
2570 * This call is used only by SPI controller drivers, which are the
2571 * only ones directly touching chip registers.
2572 *
2573 * This must be called from context that can sleep.
2574 *
2575 * Note that this function also drops a reference to the controller.
2576 */
2577void spi_unregister_controller(struct spi_controller *ctlr)
2578{
2579        struct spi_controller *found;
2580        int id = ctlr->bus_num;
2581
2582        /* First make sure that this controller was ever added */
2583        mutex_lock(&board_lock);
2584        found = idr_find(&spi_master_idr, id);
2585        mutex_unlock(&board_lock);
2586        if (ctlr->queued) {
2587                if (spi_destroy_queue(ctlr))
2588                        dev_err(&ctlr->dev, "queue remove failed\n");
2589        }
2590        mutex_lock(&board_lock);
2591        list_del(&ctlr->list);
2592        mutex_unlock(&board_lock);
2593
2594        device_for_each_child(&ctlr->dev, NULL, __unregister);
2595        device_unregister(&ctlr->dev);
2596        /* free bus id */
2597        mutex_lock(&board_lock);
2598        if (found == ctlr)
2599                idr_remove(&spi_master_idr, id);
2600        mutex_unlock(&board_lock);
2601}
2602EXPORT_SYMBOL_GPL(spi_unregister_controller);
2603
2604int spi_controller_suspend(struct spi_controller *ctlr)
2605{
2606        int ret;
2607
2608        /* Basically no-ops for non-queued controllers */
2609        if (!ctlr->queued)
2610                return 0;
2611
2612        ret = spi_stop_queue(ctlr);
2613        if (ret)
2614                dev_err(&ctlr->dev, "queue stop failed\n");
2615
2616        return ret;
2617}
2618EXPORT_SYMBOL_GPL(spi_controller_suspend);
2619
2620int spi_controller_resume(struct spi_controller *ctlr)
2621{
2622        int ret;
2623
2624        if (!ctlr->queued)
2625                return 0;
2626
2627        ret = spi_start_queue(ctlr);
2628        if (ret)
2629                dev_err(&ctlr->dev, "queue restart failed\n");
2630
2631        return ret;
2632}
2633EXPORT_SYMBOL_GPL(spi_controller_resume);
2634
2635static int __spi_controller_match(struct device *dev, const void *data)
2636{
2637        struct spi_controller *ctlr;
2638        const u16 *bus_num = data;
2639
2640        ctlr = container_of(dev, struct spi_controller, dev);
2641        return ctlr->bus_num == *bus_num;
2642}
2643
2644/**
2645 * spi_busnum_to_master - look up master associated with bus_num
2646 * @bus_num: the master's bus number
2647 * Context: can sleep
2648 *
2649 * This call may be used with devices that are registered after
2650 * arch init time.  It returns a refcounted pointer to the relevant
2651 * spi_controller (which the caller must release), or NULL if there is
2652 * no such master registered.
2653 *
2654 * Return: the SPI master structure on success, else NULL.
2655 */
2656struct spi_controller *spi_busnum_to_master(u16 bus_num)
2657{
2658        struct device           *dev;
2659        struct spi_controller   *ctlr = NULL;
2660
2661        dev = class_find_device(&spi_master_class, NULL, &bus_num,
2662                                __spi_controller_match);
2663        if (dev)
2664                ctlr = container_of(dev, struct spi_controller, dev);
2665        /* reference got in class_find_device */
2666        return ctlr;
2667}
2668EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2669
2670/*-------------------------------------------------------------------------*/
2671
2672/* Core methods for SPI resource management */
2673
2674/**
2675 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2676 *                 during the processing of a spi_message while using
2677 *                 spi_transfer_one
2678 * @spi:     the spi device for which we allocate memory
2679 * @release: the release code to execute for this resource
2680 * @size:    size to alloc and return
2681 * @gfp:     GFP allocation flags
2682 *
2683 * Return: the pointer to the allocated data
2684 *
2685 * This may get enhanced in the future to allocate from a memory pool
2686 * of the @spi_device or @spi_controller to avoid repeated allocations.
2687 */
2688void *spi_res_alloc(struct spi_device *spi,
2689                    spi_res_release_t release,
2690                    size_t size, gfp_t gfp)
2691{
2692        struct spi_res *sres;
2693
2694        sres = kzalloc(sizeof(*sres) + size, gfp);
2695        if (!sres)
2696                return NULL;
2697
2698        INIT_LIST_HEAD(&sres->entry);
2699        sres->release = release;
2700
2701        return sres->data;
2702}
2703EXPORT_SYMBOL_GPL(spi_res_alloc);
2704
2705/**
2706 * spi_res_free - free an spi resource
2707 * @res: pointer to the custom data of a resource
2708 *
2709 */
2710void spi_res_free(void *res)
2711{
2712        struct spi_res *sres = container_of(res, struct spi_res, data);
2713
2714        if (!res)
2715                return;
2716
2717        WARN_ON(!list_empty(&sres->entry));
2718        kfree(sres);
2719}
2720EXPORT_SYMBOL_GPL(spi_res_free);
2721
2722/**
2723 * spi_res_add - add a spi_res to the spi_message
2724 * @message: the spi message
2725 * @res:     the spi_resource
2726 */
2727void spi_res_add(struct spi_message *message, void *res)
2728{
2729        struct spi_res *sres = container_of(res, struct spi_res, data);
2730
2731        WARN_ON(!list_empty(&sres->entry));
2732        list_add_tail(&sres->entry, &message->resources);
2733}
2734EXPORT_SYMBOL_GPL(spi_res_add);
2735
2736/**
2737 * spi_res_release - release all spi resources for this message
2738 * @ctlr:  the @spi_controller
2739 * @message: the @spi_message
2740 */
2741void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2742{
2743        struct spi_res *res, *tmp;
2744
2745        list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
2746                if (res->release)
2747                        res->release(ctlr, message, res->data);
2748
2749                list_del(&res->entry);
2750
2751                kfree(res);
2752        }
2753}
2754EXPORT_SYMBOL_GPL(spi_res_release);
2755
2756/*-------------------------------------------------------------------------*/
2757
2758/* Core methods for spi_message alterations */
2759
2760static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2761                                            struct spi_message *msg,
2762                                            void *res)
2763{
2764        struct spi_replaced_transfers *rxfer = res;
2765        size_t i;
2766
2767        /* call extra callback if requested */
2768        if (rxfer->release)
2769                rxfer->release(ctlr, msg, res);
2770
2771        /* insert replaced transfers back into the message */
2772        list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2773
2774        /* remove the formerly inserted entries */
2775        for (i = 0; i < rxfer->inserted; i++)
2776                list_del(&rxfer->inserted_transfers[i].transfer_list);
2777}
2778
2779/**
2780 * spi_replace_transfers - replace transfers with several transfers
2781 *                         and register change with spi_message.resources
2782 * @msg:           the spi_message we work upon
2783 * @xfer_first:    the first spi_transfer we want to replace
2784 * @remove:        number of transfers to remove
2785 * @insert:        the number of transfers we want to insert instead
2786 * @release:       extra release code necessary in some circumstances
2787 * @extradatasize: extra data to allocate (with alignment guarantees
2788 *                 of struct @spi_transfer)
2789 * @gfp:           gfp flags
2790 *
2791 * Returns: pointer to @spi_replaced_transfers,
2792 *          PTR_ERR(...) in case of errors.
2793 */
2794struct spi_replaced_transfers *spi_replace_transfers(
2795        struct spi_message *msg,
2796        struct spi_transfer *xfer_first,
2797        size_t remove,
2798        size_t insert,
2799        spi_replaced_release_t release,
2800        size_t extradatasize,
2801        gfp_t gfp)
2802{
2803        struct spi_replaced_transfers *rxfer;
2804        struct spi_transfer *xfer;
2805        size_t i;
2806
2807        /* allocate the structure using spi_res */
2808        rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2809                              struct_size(rxfer, inserted_transfers, insert)
2810                              + extradatasize,
2811                              gfp);
2812        if (!rxfer)
2813                return ERR_PTR(-ENOMEM);
2814
2815        /* the release code to invoke before running the generic release */
2816        rxfer->release = release;
2817
2818        /* assign extradata */
2819        if (extradatasize)
2820                rxfer->extradata =
2821                        &rxfer->inserted_transfers[insert];
2822
2823        /* init the replaced_transfers list */
2824        INIT_LIST_HEAD(&rxfer->replaced_transfers);
2825
2826        /* assign the list_entry after which we should reinsert
2827         * the @replaced_transfers - it may be spi_message.messages!
2828         */
2829        rxfer->replaced_after = xfer_first->transfer_list.prev;
2830
2831        /* remove the requested number of transfers */
2832        for (i = 0; i < remove; i++) {
2833                /* if the entry after replaced_after it is msg->transfers
2834                 * then we have been requested to remove more transfers
2835                 * than are in the list
2836                 */
2837                if (rxfer->replaced_after->next == &msg->transfers) {
2838                        dev_err(&msg->spi->dev,
2839                                "requested to remove more spi_transfers than are available\n");
2840                        /* insert replaced transfers back into the message */
2841                        list_splice(&rxfer->replaced_transfers,
2842                                    rxfer->replaced_after);
2843
2844                        /* free the spi_replace_transfer structure */
2845                        spi_res_free(rxfer);
2846
2847                        /* and return with an error */
2848                        return ERR_PTR(-EINVAL);
2849                }
2850
2851                /* remove the entry after replaced_after from list of
2852                 * transfers and add it to list of replaced_transfers
2853                 */
2854                list_move_tail(rxfer->replaced_after->next,
2855                               &rxfer->replaced_transfers);
2856        }
2857
2858        /* create copy of the given xfer with identical settings
2859         * based on the first transfer to get removed
2860         */
2861        for (i = 0; i < insert; i++) {
2862                /* we need to run in reverse order */
2863                xfer = &rxfer->inserted_transfers[insert - 1 - i];
2864
2865                /* copy all spi_transfer data */
2866                memcpy(xfer, xfer_first, sizeof(*xfer));
2867
2868                /* add to list */
2869                list_add(&xfer->transfer_list, rxfer->replaced_after);
2870
2871                /* clear cs_change and delay_usecs for all but the last */
2872                if (i) {
2873                        xfer->cs_change = false;
2874                        xfer->delay_usecs = 0;
2875                }
2876        }
2877
2878        /* set up inserted */
2879        rxfer->inserted = insert;
2880
2881        /* and register it with spi_res/spi_message */
2882        spi_res_add(msg, rxfer);
2883
2884        return rxfer;
2885}
2886EXPORT_SYMBOL_GPL(spi_replace_transfers);
2887
2888static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2889                                        struct spi_message *msg,
2890                                        struct spi_transfer **xferp,
2891                                        size_t maxsize,
2892                                        gfp_t gfp)
2893{
2894        struct spi_transfer *xfer = *xferp, *xfers;
2895        struct spi_replaced_transfers *srt;
2896        size_t offset;
2897        size_t count, i;
2898
2899        /* calculate how many we have to replace */
2900        count = DIV_ROUND_UP(xfer->len, maxsize);
2901
2902        /* create replacement */
2903        srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2904        if (IS_ERR(srt))
2905                return PTR_ERR(srt);
2906        xfers = srt->inserted_transfers;
2907
2908        /* now handle each of those newly inserted spi_transfers
2909         * note that the replacements spi_transfers all are preset
2910         * to the same values as *xferp, so tx_buf, rx_buf and len
2911         * are all identical (as well as most others)
2912         * so we just have to fix up len and the pointers.
2913         *
2914         * this also includes support for the depreciated
2915         * spi_message.is_dma_mapped interface
2916         */
2917
2918        /* the first transfer just needs the length modified, so we
2919         * run it outside the loop
2920         */
2921        xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2922
2923        /* all the others need rx_buf/tx_buf also set */
2924        for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2925                /* update rx_buf, tx_buf and dma */
2926                if (xfers[i].rx_buf)
2927                        xfers[i].rx_buf += offset;
2928                if (xfers[i].rx_dma)
2929                        xfers[i].rx_dma += offset;
2930                if (xfers[i].tx_buf)
2931                        xfers[i].tx_buf += offset;
2932                if (xfers[i].tx_dma)
2933                        xfers[i].tx_dma += offset;
2934
2935                /* update length */
2936                xfers[i].len = min(maxsize, xfers[i].len - offset);
2937        }
2938
2939        /* we set up xferp to the last entry we have inserted,
2940         * so that we skip those already split transfers
2941         */
2942        *xferp = &xfers[count - 1];
2943
2944        /* increment statistics counters */
2945        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2946                                       transfers_split_maxsize);
2947        SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2948                                       transfers_split_maxsize);
2949
2950        return 0;
2951}
2952
2953/**
2954 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2955 *                              when an individual transfer exceeds a
2956 *                              certain size
2957 * @ctlr:    the @spi_controller for this transfer
2958 * @msg:   the @spi_message to transform
2959 * @maxsize:  the maximum when to apply this
2960 * @gfp: GFP allocation flags
2961 *
2962 * Return: status of transformation
2963 */
2964int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2965                                struct spi_message *msg,
2966                                size_t maxsize,
2967                                gfp_t gfp)
2968{
2969        struct spi_transfer *xfer;
2970        int ret;
2971
2972        /* iterate over the transfer_list,
2973         * but note that xfer is advanced to the last transfer inserted
2974         * to avoid checking sizes again unnecessarily (also xfer does
2975         * potentiall belong to a different list by the time the
2976         * replacement has happened
2977         */
2978        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2979                if (xfer->len > maxsize) {
2980                        ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2981                                                           maxsize, gfp);
2982                        if (ret)
2983                                return ret;
2984                }
2985        }
2986
2987        return 0;
2988}
2989EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2990
2991/*-------------------------------------------------------------------------*/
2992
2993/* Core methods for SPI controller protocol drivers.  Some of the
2994 * other core methods are currently defined as inline functions.
2995 */
2996
2997static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2998                                        u8 bits_per_word)
2999{
3000        if (ctlr->bits_per_word_mask) {
3001                /* Only 32 bits fit in the mask */
3002                if (bits_per_word > 32)
3003                        return -EINVAL;
3004                if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3005                        return -EINVAL;
3006        }
3007
3008        return 0;
3009}
3010
3011/**
3012 * spi_setup - setup SPI mode and clock rate
3013 * @spi: the device whose settings are being modified
3014 * Context: can sleep, and no requests are queued to the device
3015 *
3016 * SPI protocol drivers may need to update the transfer mode if the
3017 * device doesn't work with its default.  They may likewise need
3018 * to update clock rates or word sizes from initial values.  This function
3019 * changes those settings, and must be called from a context that can sleep.
3020 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3021 * effect the next time the device is selected and data is transferred to
3022 * or from it.  When this function returns, the spi device is deselected.
3023 *
3024 * Note that this call will fail if the protocol driver specifies an option
3025 * that the underlying controller or its driver does not support.  For
3026 * example, not all hardware supports wire transfers using nine bit words,
3027 * LSB-first wire encoding, or active-high chipselects.
3028 *
3029 * Return: zero on success, else a negative error code.
3030 */
3031int spi_setup(struct spi_device *spi)
3032{
3033        unsigned        bad_bits, ugly_bits;
3034        int             status;
3035
3036        /* check mode to prevent that DUAL and QUAD set at the same time
3037         */
3038        if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3039                ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3040                dev_err(&spi->dev,
3041                "setup: can not select dual and quad at the same time\n");
3042                return -EINVAL;
3043        }
3044        /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3045         */
3046        if ((spi->mode & SPI_3WIRE) && (spi->mode &
3047                (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3048                 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3049                return -EINVAL;
3050        /* help drivers fail *cleanly* when they need options
3051         * that aren't supported with their current controller
3052         * SPI_CS_WORD has a fallback software implementation,
3053         * so it is ignored here.
3054         */
3055        bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3056        /* nothing prevents from working with active-high CS in case if it
3057         * is driven by GPIO.
3058         */
3059        if (gpio_is_valid(spi->cs_gpio))
3060                bad_bits &= ~SPI_CS_HIGH;
3061        ugly_bits = bad_bits &
3062                    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3063                     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3064        if (ugly_bits) {
3065                dev_warn(&spi->dev,
3066                         "setup: ignoring unsupported mode bits %x\n",
3067                         ugly_bits);
3068                spi->mode &= ~ugly_bits;
3069                bad_bits &= ~ugly_bits;
3070        }
3071        if (bad_bits) {
3072                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3073                        bad_bits);
3074                return -EINVAL;
3075        }
3076
3077        if (!spi->bits_per_word)
3078                spi->bits_per_word = 8;
3079
3080        status = __spi_validate_bits_per_word(spi->controller,
3081                                              spi->bits_per_word);
3082        if (status)
3083                return status;
3084
3085        if (!spi->max_speed_hz)
3086                spi->max_speed_hz = spi->controller->max_speed_hz;
3087
3088        if (spi->controller->setup)
3089                status = spi->controller->setup(spi);
3090
3091        spi_set_cs(spi, false);
3092
3093        if (spi->rt && !spi->controller->rt) {
3094                spi->controller->rt = true;
3095                spi_set_thread_rt(spi->controller);
3096        }
3097
3098        dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3099                        (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3100                        (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3101                        (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3102                        (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3103                        (spi->mode & SPI_LOOP) ? "loopback, " : "",
3104                        spi->bits_per_word, spi->max_speed_hz,
3105                        status);
3106
3107        return status;
3108}
3109EXPORT_SYMBOL_GPL(spi_setup);
3110
3111/**
3112 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3113 * @spi: the device that requires specific CS timing configuration
3114 * @setup: CS setup time in terms of clock count
3115 * @hold: CS hold time in terms of clock count
3116 * @inactive_dly: CS inactive delay between transfers in terms of clock count
3117 */
3118void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
3119                       u8 inactive_dly)
3120{
3121        if (spi->controller->set_cs_timing)
3122                spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
3123}
3124EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3125
3126static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3127{
3128        struct spi_controller *ctlr = spi->controller;
3129        struct spi_transfer *xfer;
3130        int w_size;
3131
3132        if (list_empty(&message->transfers))
3133                return -EINVAL;
3134
3135        /* If an SPI controller does not support toggling the CS line on each
3136         * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3137         * for the CS line, we can emulate the CS-per-word hardware function by
3138         * splitting transfers into one-word transfers and ensuring that
3139         * cs_change is set for each transfer.
3140         */
3141        if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3142                                          spi->cs_gpiod ||
3143                                          gpio_is_valid(spi->cs_gpio))) {
3144                size_t maxsize;
3145                int ret;
3146
3147                maxsize = (spi->bits_per_word + 7) / 8;
3148
3149                /* spi_split_transfers_maxsize() requires message->spi */
3150                message->spi = spi;
3151
3152                ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3153                                                  GFP_KERNEL);
3154                if (ret)
3155                        return ret;
3156
3157                list_for_each_entry(xfer, &message->transfers, transfer_list) {
3158                        /* don't change cs_change on the last entry in the list */
3159                        if (list_is_last(&xfer->transfer_list, &message->transfers))
3160                                break;
3161                        xfer->cs_change = 1;
3162                }
3163        }
3164
3165        /* Half-duplex links include original MicroWire, and ones with
3166         * only one data pin like SPI_3WIRE (switches direction) or where
3167         * either MOSI or MISO is missing.  They can also be caused by
3168         * software limitations.
3169         */
3170        if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3171            (spi->mode & SPI_3WIRE)) {
3172                unsigned flags = ctlr->flags;
3173
3174                list_for_each_entry(xfer, &message->transfers, transfer_list) {
3175                        if (xfer->rx_buf && xfer->tx_buf)
3176                                return -EINVAL;
3177                        if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3178                                return -EINVAL;
3179                        if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3180                                return -EINVAL;
3181                }
3182        }
3183
3184        /**
3185         * Set transfer bits_per_word and max speed as spi device default if
3186         * it is not set for this transfer.
3187         * Set transfer tx_nbits and rx_nbits as single transfer default
3188         * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3189         * Ensure transfer word_delay is at least as long as that required by
3190         * device itself.
3191         */
3192        message->frame_length = 0;
3193        list_for_each_entry(xfer, &message->transfers, transfer_list) {
3194                xfer->effective_speed_hz = 0;
3195                message->frame_length += xfer->len;
3196                if (!xfer->bits_per_word)
3197                        xfer->bits_per_word = spi->bits_per_word;
3198
3199                if (!xfer->speed_hz)
3200                        xfer->speed_hz = spi->max_speed_hz;
3201
3202                if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3203                        xfer->speed_hz = ctlr->max_speed_hz;
3204
3205                if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3206                        return -EINVAL;
3207
3208                /*
3209                 * SPI transfer length should be multiple of SPI word size
3210                 * where SPI word size should be power-of-two multiple
3211                 */
3212                if (xfer->bits_per_word <= 8)
3213                        w_size = 1;
3214                else if (xfer->bits_per_word <= 16)
3215                        w_size = 2;
3216                else
3217                        w_size = 4;
3218
3219                /* No partial transfers accepted */
3220                if (xfer->len % w_size)
3221                        return -EINVAL;
3222
3223                if (xfer->speed_hz && ctlr->min_speed_hz &&
3224                    xfer->speed_hz < ctlr->min_speed_hz)
3225                        return -EINVAL;
3226
3227                if (xfer->tx_buf && !xfer->tx_nbits)
3228                        xfer->tx_nbits = SPI_NBITS_SINGLE;
3229                if (xfer->rx_buf && !xfer->rx_nbits)
3230                        xfer->rx_nbits = SPI_NBITS_SINGLE;
3231                /* check transfer tx/rx_nbits:
3232                 * 1. check the value matches one of single, dual and quad
3233                 * 2. check tx/rx_nbits match the mode in spi_device
3234                 */
3235                if (xfer->tx_buf) {
3236                        if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3237                                xfer->tx_nbits != SPI_NBITS_DUAL &&
3238                                xfer->tx_nbits != SPI_NBITS_QUAD)
3239                                return -EINVAL;
3240                        if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3241                                !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3242                                return -EINVAL;
3243                        if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3244                                !(spi->mode & SPI_TX_QUAD))
3245                                return -EINVAL;
3246                }
3247                /* check transfer rx_nbits */
3248                if (xfer->rx_buf) {
3249                        if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3250                                xfer->rx_nbits != SPI_NBITS_DUAL &&
3251                                xfer->rx_nbits != SPI_NBITS_QUAD)
3252                                return -EINVAL;
3253                        if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3254                                !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3255                                return -EINVAL;
3256                        if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3257                                !(spi->mode & SPI_RX_QUAD))
3258                                return -EINVAL;
3259                }
3260
3261                if (xfer->word_delay_usecs < spi->word_delay_usecs)
3262                        xfer->word_delay_usecs = spi->word_delay_usecs;
3263        }
3264
3265        message->status = -EINPROGRESS;
3266
3267        return 0;
3268}
3269
3270static int __spi_async(struct spi_device *spi, struct spi_message *message)
3271{
3272        struct spi_controller *ctlr = spi->controller;
3273
3274        /*
3275         * Some controllers do not support doing regular SPI transfers. Return
3276         * ENOTSUPP when this is the case.
3277         */
3278        if (!ctlr->transfer)
3279                return -ENOTSUPP;
3280
3281        message->spi = spi;
3282
3283        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3284        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3285
3286        trace_spi_message_submit(message);
3287
3288        return ctlr->transfer(spi, message);
3289}
3290
3291/**
3292 * spi_async - asynchronous SPI transfer
3293 * @spi: device with which data will be exchanged
3294 * @message: describes the data transfers, including completion callback
3295 * Context: any (irqs may be blocked, etc)
3296 *
3297 * This call may be used in_irq and other contexts which can't sleep,
3298 * as well as from task contexts which can sleep.
3299 *
3300 * The completion callback is invoked in a context which can't sleep.
3301 * Before that invocation, the value of message->status is undefined.
3302 * When the callback is issued, message->status holds either zero (to
3303 * indicate complete success) or a negative error code.  After that
3304 * callback returns, the driver which issued the transfer request may
3305 * deallocate the associated memory; it's no longer in use by any SPI
3306 * core or controller driver code.
3307 *
3308 * Note that although all messages to a spi_device are handled in
3309 * FIFO order, messages may go to different devices in other orders.
3310 * Some device might be higher priority, or have various "hard" access
3311 * time requirements, for example.
3312 *
3313 * On detection of any fault during the transfer, processing of
3314 * the entire message is aborted, and the device is deselected.
3315 * Until returning from the associated message completion callback,
3316 * no other spi_message queued to that device will be processed.
3317 * (This rule applies equally to all the synchronous transfer calls,
3318 * which are wrappers around this core asynchronous primitive.)
3319 *
3320 * Return: zero on success, else a negative error code.
3321 */
3322int spi_async(struct spi_device *spi, struct spi_message *message)
3323{
3324        struct spi_controller *ctlr = spi->controller;
3325        int ret;
3326        unsigned long flags;
3327
3328        ret = __spi_validate(spi, message);
3329        if (ret != 0)
3330                return ret;
3331
3332        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3333
3334        if (ctlr->bus_lock_flag)
3335                ret = -EBUSY;
3336        else
3337                ret = __spi_async(spi, message);
3338
3339        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3340
3341        return ret;
3342}
3343EXPORT_SYMBOL_GPL(spi_async);
3344
3345/**
3346 * spi_async_locked - version of spi_async with exclusive bus usage
3347 * @spi: device with which data will be exchanged
3348 * @message: describes the data transfers, including completion callback
3349 * Context: any (irqs may be blocked, etc)
3350 *
3351 * This call may be used in_irq and other contexts which can't sleep,
3352 * as well as from task contexts which can sleep.
3353 *
3354 * The completion callback is invoked in a context which can't sleep.
3355 * Before that invocation, the value of message->status is undefined.
3356 * When the callback is issued, message->status holds either zero (to
3357 * indicate complete success) or a negative error code.  After that
3358 * callback returns, the driver which issued the transfer request may
3359 * deallocate the associated memory; it's no longer in use by any SPI
3360 * core or controller driver code.
3361 *
3362 * Note that although all messages to a spi_device are handled in
3363 * FIFO order, messages may go to different devices in other orders.
3364 * Some device might be higher priority, or have various "hard" access
3365 * time requirements, for example.
3366 *
3367 * On detection of any fault during the transfer, processing of
3368 * the entire message is aborted, and the device is deselected.
3369 * Until returning from the associated message completion callback,
3370 * no other spi_message queued to that device will be processed.
3371 * (This rule applies equally to all the synchronous transfer calls,
3372 * which are wrappers around this core asynchronous primitive.)
3373 *
3374 * Return: zero on success, else a negative error code.
3375 */
3376int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3377{
3378        struct spi_controller *ctlr = spi->controller;
3379        int ret;
3380        unsigned long flags;
3381
3382        ret = __spi_validate(spi, message);
3383        if (ret != 0)
3384                return ret;
3385
3386        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3387
3388        ret = __spi_async(spi, message);
3389
3390        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3391
3392        return ret;
3393
3394}
3395EXPORT_SYMBOL_GPL(spi_async_locked);
3396
3397/*-------------------------------------------------------------------------*/
3398
3399/* Utility methods for SPI protocol drivers, layered on
3400 * top of the core.  Some other utility methods are defined as
3401 * inline functions.
3402 */
3403
3404static void spi_complete(void *arg)
3405{
3406        complete(arg);
3407}
3408
3409static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3410{
3411        DECLARE_COMPLETION_ONSTACK(done);
3412        int status;
3413        struct spi_controller *ctlr = spi->controller;
3414        unsigned long flags;
3415
3416        status = __spi_validate(spi, message);
3417        if (status != 0)
3418                return status;
3419
3420        message->complete = spi_complete;
3421        message->context = &done;
3422        message->spi = spi;
3423
3424        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3425        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3426
3427        /* If we're not using the legacy transfer method then we will
3428         * try to transfer in the calling context so special case.
3429         * This code would be less tricky if we could remove the
3430         * support for driver implemented message queues.
3431         */
3432        if (ctlr->transfer == spi_queued_transfer) {
3433                spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3434
3435                trace_spi_message_submit(message);
3436
3437                status = __spi_queued_transfer(spi, message, false);
3438
3439                spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3440        } else {
3441                status = spi_async_locked(spi, message);
3442        }
3443
3444        if (status == 0) {
3445                /* Push out the messages in the calling context if we
3446                 * can.
3447                 */
3448                if (ctlr->transfer == spi_queued_transfer) {
3449                        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3450                                                       spi_sync_immediate);
3451                        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3452                                                       spi_sync_immediate);
3453                        __spi_pump_messages(ctlr, false);
3454                }
3455
3456                wait_for_completion(&done);
3457                status = message->status;
3458        }
3459        message->context = NULL;
3460        return status;
3461}
3462
3463/**
3464 * spi_sync - blocking/synchronous SPI data transfers
3465 * @spi: device with which data will be exchanged
3466 * @message: describes the data transfers
3467 * Context: can sleep
3468 *
3469 * This call may only be used from a context that may sleep.  The sleep
3470 * is non-interruptible, and has no timeout.  Low-overhead controller
3471 * drivers may DMA directly into and out of the message buffers.
3472 *
3473 * Note that the SPI device's chip select is active during the message,
3474 * and then is normally disabled between messages.  Drivers for some
3475 * frequently-used devices may want to minimize costs of selecting a chip,
3476 * by leaving it selected in anticipation that the next message will go
3477 * to the same chip.  (That may increase power usage.)
3478 *
3479 * Also, the caller is guaranteeing that the memory associated with the
3480 * message will not be freed before this call returns.
3481 *
3482 * Return: zero on success, else a negative error code.
3483 */
3484int spi_sync(struct spi_device *spi, struct spi_message *message)
3485{
3486        int ret;
3487
3488        mutex_lock(&spi->controller->bus_lock_mutex);
3489        ret = __spi_sync(spi, message);
3490        mutex_unlock(&spi->controller->bus_lock_mutex);
3491
3492        return ret;
3493}
3494EXPORT_SYMBOL_GPL(spi_sync);
3495
3496/**
3497 * spi_sync_locked - version of spi_sync with exclusive bus usage
3498 * @spi: device with which data will be exchanged
3499 * @message: describes the data transfers
3500 * Context: can sleep
3501 *
3502 * This call may only be used from a context that may sleep.  The sleep
3503 * is non-interruptible, and has no timeout.  Low-overhead controller
3504 * drivers may DMA directly into and out of the message buffers.
3505 *
3506 * This call should be used by drivers that require exclusive access to the
3507 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3508 * be released by a spi_bus_unlock call when the exclusive access is over.
3509 *
3510 * Return: zero on success, else a negative error code.
3511 */
3512int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3513{
3514        return __spi_sync(spi, message);
3515}
3516EXPORT_SYMBOL_GPL(spi_sync_locked);
3517
3518/**
3519 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3520 * @ctlr: SPI bus master that should be locked for exclusive bus access
3521 * Context: can sleep
3522 *
3523 * This call may only be used from a context that may sleep.  The sleep
3524 * is non-interruptible, and has no timeout.
3525 *
3526 * This call should be used by drivers that require exclusive access to the
3527 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3528 * exclusive access is over. Data transfer must be done by spi_sync_locked
3529 * and spi_async_locked calls when the SPI bus lock is held.
3530 *
3531 * Return: always zero.
3532 */
3533int spi_bus_lock(struct spi_controller *ctlr)
3534{
3535        unsigned long flags;
3536
3537        mutex_lock(&ctlr->bus_lock_mutex);
3538
3539        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3540        ctlr->bus_lock_flag = 1;
3541        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3542
3543        /* mutex remains locked until spi_bus_unlock is called */
3544
3545        return 0;
3546}
3547EXPORT_SYMBOL_GPL(spi_bus_lock);
3548
3549/**
3550 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3551 * @ctlr: SPI bus master that was locked for exclusive bus access
3552 * Context: can sleep
3553 *
3554 * This call may only be used from a context that may sleep.  The sleep
3555 * is non-interruptible, and has no timeout.
3556 *
3557 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3558 * call.
3559 *
3560 * Return: always zero.
3561 */
3562int spi_bus_unlock(struct spi_controller *ctlr)
3563{
3564        ctlr->bus_lock_flag = 0;
3565
3566        mutex_unlock(&ctlr->bus_lock_mutex);
3567
3568        return 0;
3569}
3570EXPORT_SYMBOL_GPL(spi_bus_unlock);
3571
3572/* portable code must never pass more than 32 bytes */
3573#define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
3574
3575static u8       *buf;
3576
3577/**
3578 * spi_write_then_read - SPI synchronous write followed by read
3579 * @spi: device with which data will be exchanged
3580 * @txbuf: data to be written (need not be dma-safe)
3581 * @n_tx: size of txbuf, in bytes
3582 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3583 * @n_rx: size of rxbuf, in bytes
3584 * Context: can sleep
3585 *
3586 * This performs a half duplex MicroWire style transaction with the
3587 * device, sending txbuf and then reading rxbuf.  The return value
3588 * is zero for success, else a negative errno status code.
3589 * This call may only be used from a context that may sleep.
3590 *
3591 * Parameters to this routine are always copied using a small buffer;
3592 * portable code should never use this for more than 32 bytes.
3593 * Performance-sensitive or bulk transfer code should instead use
3594 * spi_{async,sync}() calls with dma-safe buffers.
3595 *
3596 * Return: zero on success, else a negative error code.
3597 */
3598int spi_write_then_read(struct spi_device *spi,
3599                const void *txbuf, unsigned n_tx,
3600                void *rxbuf, unsigned n_rx)
3601{
3602        static DEFINE_MUTEX(lock);
3603
3604        int                     status;
3605        struct spi_message      message;
3606        struct spi_transfer     x[2];
3607        u8                      *local_buf;
3608
3609        /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3610         * copying here, (as a pure convenience thing), but we can
3611         * keep heap costs out of the hot path unless someone else is
3612         * using the pre-allocated buffer or the transfer is too large.
3613         */
3614        if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3615                local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3616                                    GFP_KERNEL | GFP_DMA);
3617                if (!local_buf)
3618                        return -ENOMEM;
3619        } else {
3620                local_buf = buf;
3621        }
3622
3623        spi_message_init(&message);
3624        memset(x, 0, sizeof(x));
3625        if (n_tx) {
3626                x[0].len = n_tx;
3627                spi_message_add_tail(&x[0], &message);
3628        }
3629        if (n_rx) {
3630                x[1].len = n_rx;
3631                spi_message_add_tail(&x[1], &message);
3632        }
3633
3634        memcpy(local_buf, txbuf, n_tx);
3635        x[0].tx_buf = local_buf;
3636        x[1].rx_buf = local_buf + n_tx;
3637
3638        /* do the i/o */
3639        status = spi_sync(spi, &message);
3640        if (status == 0)
3641                memcpy(rxbuf, x[1].rx_buf, n_rx);
3642
3643        if (x[0].tx_buf == buf)
3644                mutex_unlock(&lock);
3645        else
3646                kfree(local_buf);
3647
3648        return status;
3649}
3650EXPORT_SYMBOL_GPL(spi_write_then_read);
3651
3652/*-------------------------------------------------------------------------*/
3653
3654#if IS_ENABLED(CONFIG_OF)
3655static int __spi_of_device_match(struct device *dev, const void *data)
3656{
3657        return dev->of_node == data;
3658}
3659
3660/* must call put_device() when done with returned spi_device device */
3661struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3662{
3663        struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3664                                                __spi_of_device_match);
3665        return dev ? to_spi_device(dev) : NULL;
3666}
3667EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3668#endif /* IS_ENABLED(CONFIG_OF) */
3669
3670#if IS_ENABLED(CONFIG_OF_DYNAMIC)
3671static int __spi_of_controller_match(struct device *dev, const void *data)
3672{
3673        return dev->of_node == data;
3674}
3675
3676/* the spi controllers are not using spi_bus, so we find it with another way */
3677static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3678{
3679        struct device *dev;
3680
3681        dev = class_find_device(&spi_master_class, NULL, node,
3682                                __spi_of_controller_match);
3683        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3684                dev = class_find_device(&spi_slave_class, NULL, node,
3685                                        __spi_of_controller_match);
3686        if (!dev)
3687                return NULL;
3688
3689        /* reference got in class_find_device */
3690        return container_of(dev, struct spi_controller, dev);
3691}
3692
3693static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3694                         void *arg)
3695{
3696        struct of_reconfig_data *rd = arg;
3697        struct spi_controller *ctlr;
3698        struct spi_device *spi;
3699
3700        switch (of_reconfig_get_state_change(action, arg)) {
3701        case OF_RECONFIG_CHANGE_ADD:
3702                ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3703                if (ctlr == NULL)
3704                        return NOTIFY_OK;       /* not for us */
3705
3706                if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3707                        put_device(&ctlr->dev);
3708                        return NOTIFY_OK;
3709                }
3710
3711                spi = of_register_spi_device(ctlr, rd->dn);
3712                put_device(&ctlr->dev);
3713
3714                if (IS_ERR(spi)) {
3715                        pr_err("%s: failed to create for '%pOF'\n",
3716                                        __func__, rd->dn);
3717                        of_node_clear_flag(rd->dn, OF_POPULATED);
3718                        return notifier_from_errno(PTR_ERR(spi));
3719                }
3720                break;
3721
3722        case OF_RECONFIG_CHANGE_REMOVE:
3723                /* already depopulated? */
3724                if (!of_node_check_flag(rd->dn, OF_POPULATED))
3725                        return NOTIFY_OK;
3726
3727                /* find our device by node */
3728                spi = of_find_spi_device_by_node(rd->dn);
3729                if (spi == NULL)
3730                        return NOTIFY_OK;       /* no? not meant for us */
3731
3732                /* unregister takes one ref away */
3733                spi_unregister_device(spi);
3734
3735                /* and put the reference of the find */
3736                put_device(&spi->dev);
3737                break;
3738        }
3739
3740        return NOTIFY_OK;
3741}
3742
3743static struct notifier_block spi_of_notifier = {
3744        .notifier_call = of_spi_notify,
3745};
3746#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3747extern struct notifier_block spi_of_notifier;
3748#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3749
3750#if IS_ENABLED(CONFIG_ACPI)
3751static int spi_acpi_controller_match(struct device *dev, const void *data)
3752{
3753        return ACPI_COMPANION(dev->parent) == data;
3754}
3755
3756static int spi_acpi_device_match(struct device *dev, const void *data)
3757{
3758        return ACPI_COMPANION(dev) == data;
3759}
3760
3761static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3762{
3763        struct device *dev;
3764
3765        dev = class_find_device(&spi_master_class, NULL, adev,
3766                                spi_acpi_controller_match);
3767        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3768                dev = class_find_device(&spi_slave_class, NULL, adev,
3769                                        spi_acpi_controller_match);
3770        if (!dev)
3771                return NULL;
3772
3773        return container_of(dev, struct spi_controller, dev);
3774}
3775
3776static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3777{
3778        struct device *dev;
3779
3780        dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3781
3782        return dev ? to_spi_device(dev) : NULL;
3783}
3784
3785static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3786                           void *arg)
3787{
3788        struct acpi_device *adev = arg;
3789        struct spi_controller *ctlr;
3790        struct spi_device *spi;
3791
3792        switch (value) {
3793        case ACPI_RECONFIG_DEVICE_ADD:
3794                ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3795                if (!ctlr)
3796                        break;
3797
3798                acpi_register_spi_device(ctlr, adev);
3799                put_device(&ctlr->dev);
3800                break;
3801        case ACPI_RECONFIG_DEVICE_REMOVE:
3802                if (!acpi_device_enumerated(adev))
3803                        break;
3804
3805                spi = acpi_spi_find_device_by_adev(adev);
3806                if (!spi)
3807                        break;
3808
3809                spi_unregister_device(spi);
3810                put_device(&spi->dev);
3811                break;
3812        }
3813
3814        return NOTIFY_OK;
3815}
3816
3817static struct notifier_block spi_acpi_notifier = {
3818        .notifier_call = acpi_spi_notify,
3819};
3820#else
3821extern struct notifier_block spi_acpi_notifier;
3822#endif
3823
3824static int __init spi_init(void)
3825{
3826        int     status;
3827
3828        buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3829        if (!buf) {
3830                status = -ENOMEM;
3831                goto err0;
3832        }
3833
3834        status = bus_register(&spi_bus_type);
3835        if (status < 0)
3836                goto err1;
3837
3838        status = class_register(&spi_master_class);
3839        if (status < 0)
3840                goto err2;
3841
3842        if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3843                status = class_register(&spi_slave_class);
3844                if (status < 0)
3845                        goto err3;
3846        }
3847
3848        if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3849                WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3850        if (IS_ENABLED(CONFIG_ACPI))
3851                WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3852
3853        return 0;
3854
3855err3:
3856        class_unregister(&spi_master_class);
3857err2:
3858        bus_unregister(&spi_bus_type);
3859err1:
3860        kfree(buf);
3861        buf = NULL;
3862err0:
3863        return status;
3864}
3865
3866/* board_info is normally registered in arch_initcall(),
3867 * but even essential drivers wait till later
3868 *
3869 * REVISIT only boardinfo really needs static linking. the rest (device and
3870 * driver registration) _could_ be dynamically linked (modular) ... costs
3871 * include needing to have boardinfo data structures be much more public.
3872 */
3873postcore_initcall(spi_init);
3874