linux/drivers/spi/spi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2// SPI init/core code
   3//
   4// Copyright (C) 2005 David Brownell
   5// Copyright (C) 2008 Secret Lab Technologies Ltd.
   6
   7#include <linux/kernel.h>
   8#include <linux/device.h>
   9#include <linux/init.h>
  10#include <linux/cache.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/mutex.h>
  14#include <linux/of_device.h>
  15#include <linux/of_irq.h>
  16#include <linux/clk/clk-conf.h>
  17#include <linux/slab.h>
  18#include <linux/mod_devicetable.h>
  19#include <linux/spi/spi.h>
  20#include <linux/spi/spi-mem.h>
  21#include <linux/of_gpio.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/pm_domain.h>
  25#include <linux/property.h>
  26#include <linux/export.h>
  27#include <linux/sched/rt.h>
  28#include <uapi/linux/sched/types.h>
  29#include <linux/delay.h>
  30#include <linux/kthread.h>
  31#include <linux/ioport.h>
  32#include <linux/acpi.h>
  33#include <linux/highmem.h>
  34#include <linux/idr.h>
  35#include <linux/platform_data/x86/apple.h>
  36
  37#define CREATE_TRACE_POINTS
  38#include <trace/events/spi.h>
  39EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
  40EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
  41
  42#include "internals.h"
  43
  44static DEFINE_IDR(spi_master_idr);
  45
  46static void spidev_release(struct device *dev)
  47{
  48        struct spi_device       *spi = to_spi_device(dev);
  49
  50        spi_controller_put(spi->controller);
  51        kfree(spi->driver_override);
  52        kfree(spi);
  53}
  54
  55static ssize_t
  56modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  57{
  58        const struct spi_device *spi = to_spi_device(dev);
  59        int len;
  60
  61        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  62        if (len != -ENODEV)
  63                return len;
  64
  65        return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  66}
  67static DEVICE_ATTR_RO(modalias);
  68
  69static ssize_t driver_override_store(struct device *dev,
  70                                     struct device_attribute *a,
  71                                     const char *buf, size_t count)
  72{
  73        struct spi_device *spi = to_spi_device(dev);
  74        const char *end = memchr(buf, '\n', count);
  75        const size_t len = end ? end - buf : count;
  76        const char *driver_override, *old;
  77
  78        /* We need to keep extra room for a newline when displaying value */
  79        if (len >= (PAGE_SIZE - 1))
  80                return -EINVAL;
  81
  82        driver_override = kstrndup(buf, len, GFP_KERNEL);
  83        if (!driver_override)
  84                return -ENOMEM;
  85
  86        device_lock(dev);
  87        old = spi->driver_override;
  88        if (len) {
  89                spi->driver_override = driver_override;
  90        } else {
  91                /* Empty string, disable driver override */
  92                spi->driver_override = NULL;
  93                kfree(driver_override);
  94        }
  95        device_unlock(dev);
  96        kfree(old);
  97
  98        return count;
  99}
 100
 101static ssize_t driver_override_show(struct device *dev,
 102                                    struct device_attribute *a, char *buf)
 103{
 104        const struct spi_device *spi = to_spi_device(dev);
 105        ssize_t len;
 106
 107        device_lock(dev);
 108        len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
 109        device_unlock(dev);
 110        return len;
 111}
 112static DEVICE_ATTR_RW(driver_override);
 113
 114#define SPI_STATISTICS_ATTRS(field, file)                               \
 115static ssize_t spi_controller_##field##_show(struct device *dev,        \
 116                                             struct device_attribute *attr, \
 117                                             char *buf)                 \
 118{                                                                       \
 119        struct spi_controller *ctlr = container_of(dev,                 \
 120                                         struct spi_controller, dev);   \
 121        return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
 122}                                                                       \
 123static struct device_attribute dev_attr_spi_controller_##field = {      \
 124        .attr = { .name = file, .mode = 0444 },                         \
 125        .show = spi_controller_##field##_show,                          \
 126};                                                                      \
 127static ssize_t spi_device_##field##_show(struct device *dev,            \
 128                                         struct device_attribute *attr, \
 129                                        char *buf)                      \
 130{                                                                       \
 131        struct spi_device *spi = to_spi_device(dev);                    \
 132        return spi_statistics_##field##_show(&spi->statistics, buf);    \
 133}                                                                       \
 134static struct device_attribute dev_attr_spi_device_##field = {          \
 135        .attr = { .name = file, .mode = 0444 },                         \
 136        .show = spi_device_##field##_show,                              \
 137}
 138
 139#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
 140static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
 141                                            char *buf)                  \
 142{                                                                       \
 143        unsigned long flags;                                            \
 144        ssize_t len;                                                    \
 145        spin_lock_irqsave(&stat->lock, flags);                          \
 146        len = sprintf(buf, format_string, stat->field);                 \
 147        spin_unlock_irqrestore(&stat->lock, flags);                     \
 148        return len;                                                     \
 149}                                                                       \
 150SPI_STATISTICS_ATTRS(name, file)
 151
 152#define SPI_STATISTICS_SHOW(field, format_string)                       \
 153        SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
 154                                 field, format_string)
 155
 156SPI_STATISTICS_SHOW(messages, "%lu");
 157SPI_STATISTICS_SHOW(transfers, "%lu");
 158SPI_STATISTICS_SHOW(errors, "%lu");
 159SPI_STATISTICS_SHOW(timedout, "%lu");
 160
 161SPI_STATISTICS_SHOW(spi_sync, "%lu");
 162SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
 163SPI_STATISTICS_SHOW(spi_async, "%lu");
 164
 165SPI_STATISTICS_SHOW(bytes, "%llu");
 166SPI_STATISTICS_SHOW(bytes_rx, "%llu");
 167SPI_STATISTICS_SHOW(bytes_tx, "%llu");
 168
 169#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
 170        SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
 171                                 "transfer_bytes_histo_" number,        \
 172                                 transfer_bytes_histo[index],  "%lu")
 173SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
 174SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
 175SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
 176SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
 177SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
 178SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
 179SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
 180SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
 181SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
 182SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
 183SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
 184SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
 185SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
 186SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
 187SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
 188SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 189SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 190
 191SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
 192
 193static struct attribute *spi_dev_attrs[] = {
 194        &dev_attr_modalias.attr,
 195        &dev_attr_driver_override.attr,
 196        NULL,
 197};
 198
 199static const struct attribute_group spi_dev_group = {
 200        .attrs  = spi_dev_attrs,
 201};
 202
 203static struct attribute *spi_device_statistics_attrs[] = {
 204        &dev_attr_spi_device_messages.attr,
 205        &dev_attr_spi_device_transfers.attr,
 206        &dev_attr_spi_device_errors.attr,
 207        &dev_attr_spi_device_timedout.attr,
 208        &dev_attr_spi_device_spi_sync.attr,
 209        &dev_attr_spi_device_spi_sync_immediate.attr,
 210        &dev_attr_spi_device_spi_async.attr,
 211        &dev_attr_spi_device_bytes.attr,
 212        &dev_attr_spi_device_bytes_rx.attr,
 213        &dev_attr_spi_device_bytes_tx.attr,
 214        &dev_attr_spi_device_transfer_bytes_histo0.attr,
 215        &dev_attr_spi_device_transfer_bytes_histo1.attr,
 216        &dev_attr_spi_device_transfer_bytes_histo2.attr,
 217        &dev_attr_spi_device_transfer_bytes_histo3.attr,
 218        &dev_attr_spi_device_transfer_bytes_histo4.attr,
 219        &dev_attr_spi_device_transfer_bytes_histo5.attr,
 220        &dev_attr_spi_device_transfer_bytes_histo6.attr,
 221        &dev_attr_spi_device_transfer_bytes_histo7.attr,
 222        &dev_attr_spi_device_transfer_bytes_histo8.attr,
 223        &dev_attr_spi_device_transfer_bytes_histo9.attr,
 224        &dev_attr_spi_device_transfer_bytes_histo10.attr,
 225        &dev_attr_spi_device_transfer_bytes_histo11.attr,
 226        &dev_attr_spi_device_transfer_bytes_histo12.attr,
 227        &dev_attr_spi_device_transfer_bytes_histo13.attr,
 228        &dev_attr_spi_device_transfer_bytes_histo14.attr,
 229        &dev_attr_spi_device_transfer_bytes_histo15.attr,
 230        &dev_attr_spi_device_transfer_bytes_histo16.attr,
 231        &dev_attr_spi_device_transfers_split_maxsize.attr,
 232        NULL,
 233};
 234
 235static const struct attribute_group spi_device_statistics_group = {
 236        .name  = "statistics",
 237        .attrs  = spi_device_statistics_attrs,
 238};
 239
 240static const struct attribute_group *spi_dev_groups[] = {
 241        &spi_dev_group,
 242        &spi_device_statistics_group,
 243        NULL,
 244};
 245
 246static struct attribute *spi_controller_statistics_attrs[] = {
 247        &dev_attr_spi_controller_messages.attr,
 248        &dev_attr_spi_controller_transfers.attr,
 249        &dev_attr_spi_controller_errors.attr,
 250        &dev_attr_spi_controller_timedout.attr,
 251        &dev_attr_spi_controller_spi_sync.attr,
 252        &dev_attr_spi_controller_spi_sync_immediate.attr,
 253        &dev_attr_spi_controller_spi_async.attr,
 254        &dev_attr_spi_controller_bytes.attr,
 255        &dev_attr_spi_controller_bytes_rx.attr,
 256        &dev_attr_spi_controller_bytes_tx.attr,
 257        &dev_attr_spi_controller_transfer_bytes_histo0.attr,
 258        &dev_attr_spi_controller_transfer_bytes_histo1.attr,
 259        &dev_attr_spi_controller_transfer_bytes_histo2.attr,
 260        &dev_attr_spi_controller_transfer_bytes_histo3.attr,
 261        &dev_attr_spi_controller_transfer_bytes_histo4.attr,
 262        &dev_attr_spi_controller_transfer_bytes_histo5.attr,
 263        &dev_attr_spi_controller_transfer_bytes_histo6.attr,
 264        &dev_attr_spi_controller_transfer_bytes_histo7.attr,
 265        &dev_attr_spi_controller_transfer_bytes_histo8.attr,
 266        &dev_attr_spi_controller_transfer_bytes_histo9.attr,
 267        &dev_attr_spi_controller_transfer_bytes_histo10.attr,
 268        &dev_attr_spi_controller_transfer_bytes_histo11.attr,
 269        &dev_attr_spi_controller_transfer_bytes_histo12.attr,
 270        &dev_attr_spi_controller_transfer_bytes_histo13.attr,
 271        &dev_attr_spi_controller_transfer_bytes_histo14.attr,
 272        &dev_attr_spi_controller_transfer_bytes_histo15.attr,
 273        &dev_attr_spi_controller_transfer_bytes_histo16.attr,
 274        &dev_attr_spi_controller_transfers_split_maxsize.attr,
 275        NULL,
 276};
 277
 278static const struct attribute_group spi_controller_statistics_group = {
 279        .name  = "statistics",
 280        .attrs  = spi_controller_statistics_attrs,
 281};
 282
 283static const struct attribute_group *spi_master_groups[] = {
 284        &spi_controller_statistics_group,
 285        NULL,
 286};
 287
 288void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 289                                       struct spi_transfer *xfer,
 290                                       struct spi_controller *ctlr)
 291{
 292        unsigned long flags;
 293        int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
 294
 295        if (l2len < 0)
 296                l2len = 0;
 297
 298        spin_lock_irqsave(&stats->lock, flags);
 299
 300        stats->transfers++;
 301        stats->transfer_bytes_histo[l2len]++;
 302
 303        stats->bytes += xfer->len;
 304        if ((xfer->tx_buf) &&
 305            (xfer->tx_buf != ctlr->dummy_tx))
 306                stats->bytes_tx += xfer->len;
 307        if ((xfer->rx_buf) &&
 308            (xfer->rx_buf != ctlr->dummy_rx))
 309                stats->bytes_rx += xfer->len;
 310
 311        spin_unlock_irqrestore(&stats->lock, flags);
 312}
 313EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
 314
 315/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 316 * and the sysfs version makes coldplug work too.
 317 */
 318
 319static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
 320                                                const struct spi_device *sdev)
 321{
 322        while (id->name[0]) {
 323                if (!strcmp(sdev->modalias, id->name))
 324                        return id;
 325                id++;
 326        }
 327        return NULL;
 328}
 329
 330const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
 331{
 332        const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
 333
 334        return spi_match_id(sdrv->id_table, sdev);
 335}
 336EXPORT_SYMBOL_GPL(spi_get_device_id);
 337
 338static int spi_match_device(struct device *dev, struct device_driver *drv)
 339{
 340        const struct spi_device *spi = to_spi_device(dev);
 341        const struct spi_driver *sdrv = to_spi_driver(drv);
 342
 343        /* Check override first, and if set, only use the named driver */
 344        if (spi->driver_override)
 345                return strcmp(spi->driver_override, drv->name) == 0;
 346
 347        /* Attempt an OF style match */
 348        if (of_driver_match_device(dev, drv))
 349                return 1;
 350
 351        /* Then try ACPI */
 352        if (acpi_driver_match_device(dev, drv))
 353                return 1;
 354
 355        if (sdrv->id_table)
 356                return !!spi_match_id(sdrv->id_table, spi);
 357
 358        return strcmp(spi->modalias, drv->name) == 0;
 359}
 360
 361static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 362{
 363        const struct spi_device         *spi = to_spi_device(dev);
 364        int rc;
 365
 366        rc = acpi_device_uevent_modalias(dev, env);
 367        if (rc != -ENODEV)
 368                return rc;
 369
 370        return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 371}
 372
 373static int spi_probe(struct device *dev)
 374{
 375        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 376        struct spi_device               *spi = to_spi_device(dev);
 377        int ret;
 378
 379        ret = of_clk_set_defaults(dev->of_node, false);
 380        if (ret)
 381                return ret;
 382
 383        if (dev->of_node) {
 384                spi->irq = of_irq_get(dev->of_node, 0);
 385                if (spi->irq == -EPROBE_DEFER)
 386                        return -EPROBE_DEFER;
 387                if (spi->irq < 0)
 388                        spi->irq = 0;
 389        }
 390
 391        ret = dev_pm_domain_attach(dev, true);
 392        if (ret)
 393                return ret;
 394
 395        if (sdrv->probe) {
 396                ret = sdrv->probe(spi);
 397                if (ret)
 398                        dev_pm_domain_detach(dev, true);
 399        }
 400
 401        return ret;
 402}
 403
 404static void spi_remove(struct device *dev)
 405{
 406        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 407
 408        if (sdrv->remove) {
 409                int ret;
 410
 411                ret = sdrv->remove(to_spi_device(dev));
 412                if (ret)
 413                        dev_warn(dev,
 414                                 "Failed to unbind driver (%pe), ignoring\n",
 415                                 ERR_PTR(ret));
 416        }
 417
 418        dev_pm_domain_detach(dev, true);
 419}
 420
 421static void spi_shutdown(struct device *dev)
 422{
 423        if (dev->driver) {
 424                const struct spi_driver *sdrv = to_spi_driver(dev->driver);
 425
 426                if (sdrv->shutdown)
 427                        sdrv->shutdown(to_spi_device(dev));
 428        }
 429}
 430
 431struct bus_type spi_bus_type = {
 432        .name           = "spi",
 433        .dev_groups     = spi_dev_groups,
 434        .match          = spi_match_device,
 435        .uevent         = spi_uevent,
 436        .probe          = spi_probe,
 437        .remove         = spi_remove,
 438        .shutdown       = spi_shutdown,
 439};
 440EXPORT_SYMBOL_GPL(spi_bus_type);
 441
 442/**
 443 * __spi_register_driver - register a SPI driver
 444 * @owner: owner module of the driver to register
 445 * @sdrv: the driver to register
 446 * Context: can sleep
 447 *
 448 * Return: zero on success, else a negative error code.
 449 */
 450int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
 451{
 452        sdrv->driver.owner = owner;
 453        sdrv->driver.bus = &spi_bus_type;
 454        return driver_register(&sdrv->driver);
 455}
 456EXPORT_SYMBOL_GPL(__spi_register_driver);
 457
 458/*-------------------------------------------------------------------------*/
 459
 460/* SPI devices should normally not be created by SPI device drivers; that
 461 * would make them board-specific.  Similarly with SPI controller drivers.
 462 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 463 * with other readonly (flashable) information about mainboard devices.
 464 */
 465
 466struct boardinfo {
 467        struct list_head        list;
 468        struct spi_board_info   board_info;
 469};
 470
 471static LIST_HEAD(board_list);
 472static LIST_HEAD(spi_controller_list);
 473
 474/*
 475 * Used to protect add/del operation for board_info list and
 476 * spi_controller list, and their matching process
 477 * also used to protect object of type struct idr
 478 */
 479static DEFINE_MUTEX(board_lock);
 480
 481/**
 482 * spi_alloc_device - Allocate a new SPI device
 483 * @ctlr: Controller to which device is connected
 484 * Context: can sleep
 485 *
 486 * Allows a driver to allocate and initialize a spi_device without
 487 * registering it immediately.  This allows a driver to directly
 488 * fill the spi_device with device parameters before calling
 489 * spi_add_device() on it.
 490 *
 491 * Caller is responsible to call spi_add_device() on the returned
 492 * spi_device structure to add it to the SPI controller.  If the caller
 493 * needs to discard the spi_device without adding it, then it should
 494 * call spi_dev_put() on it.
 495 *
 496 * Return: a pointer to the new device, or NULL.
 497 */
 498struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
 499{
 500        struct spi_device       *spi;
 501
 502        if (!spi_controller_get(ctlr))
 503                return NULL;
 504
 505        spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 506        if (!spi) {
 507                spi_controller_put(ctlr);
 508                return NULL;
 509        }
 510
 511        spi->master = spi->controller = ctlr;
 512        spi->dev.parent = &ctlr->dev;
 513        spi->dev.bus = &spi_bus_type;
 514        spi->dev.release = spidev_release;
 515        spi->cs_gpio = -ENOENT;
 516        spi->mode = ctlr->buswidth_override_bits;
 517
 518        spin_lock_init(&spi->statistics.lock);
 519
 520        device_initialize(&spi->dev);
 521        return spi;
 522}
 523EXPORT_SYMBOL_GPL(spi_alloc_device);
 524
 525static void spi_dev_set_name(struct spi_device *spi)
 526{
 527        struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
 528
 529        if (adev) {
 530                dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
 531                return;
 532        }
 533
 534        dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
 535                     spi->chip_select);
 536}
 537
 538static int spi_dev_check(struct device *dev, void *data)
 539{
 540        struct spi_device *spi = to_spi_device(dev);
 541        struct spi_device *new_spi = data;
 542
 543        if (spi->controller == new_spi->controller &&
 544            spi->chip_select == new_spi->chip_select)
 545                return -EBUSY;
 546        return 0;
 547}
 548
 549static void spi_cleanup(struct spi_device *spi)
 550{
 551        if (spi->controller->cleanup)
 552                spi->controller->cleanup(spi);
 553}
 554
 555static int __spi_add_device(struct spi_device *spi)
 556{
 557        struct spi_controller *ctlr = spi->controller;
 558        struct device *dev = ctlr->dev.parent;
 559        int status;
 560
 561        status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
 562        if (status) {
 563                dev_err(dev, "chipselect %d already in use\n",
 564                                spi->chip_select);
 565                return status;
 566        }
 567
 568        /* Controller may unregister concurrently */
 569        if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
 570            !device_is_registered(&ctlr->dev)) {
 571                return -ENODEV;
 572        }
 573
 574        /* Descriptors take precedence */
 575        if (ctlr->cs_gpiods)
 576                spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
 577        else if (ctlr->cs_gpios)
 578                spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
 579
 580        /* Drivers may modify this initial i/o setup, but will
 581         * normally rely on the device being setup.  Devices
 582         * using SPI_CS_HIGH can't coexist well otherwise...
 583         */
 584        status = spi_setup(spi);
 585        if (status < 0) {
 586                dev_err(dev, "can't setup %s, status %d\n",
 587                                dev_name(&spi->dev), status);
 588                return status;
 589        }
 590
 591        /* Device may be bound to an active driver when this returns */
 592        status = device_add(&spi->dev);
 593        if (status < 0) {
 594                dev_err(dev, "can't add %s, status %d\n",
 595                                dev_name(&spi->dev), status);
 596                spi_cleanup(spi);
 597        } else {
 598                dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 599        }
 600
 601        return status;
 602}
 603
 604/**
 605 * spi_add_device - Add spi_device allocated with spi_alloc_device
 606 * @spi: spi_device to register
 607 *
 608 * Companion function to spi_alloc_device.  Devices allocated with
 609 * spi_alloc_device can be added onto the spi bus with this function.
 610 *
 611 * Return: 0 on success; negative errno on failure
 612 */
 613int spi_add_device(struct spi_device *spi)
 614{
 615        struct spi_controller *ctlr = spi->controller;
 616        struct device *dev = ctlr->dev.parent;
 617        int status;
 618
 619        /* Chipselects are numbered 0..max; validate. */
 620        if (spi->chip_select >= ctlr->num_chipselect) {
 621                dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
 622                        ctlr->num_chipselect);
 623                return -EINVAL;
 624        }
 625
 626        /* Set the bus ID string */
 627        spi_dev_set_name(spi);
 628
 629        /* We need to make sure there's no other device with this
 630         * chipselect **BEFORE** we call setup(), else we'll trash
 631         * its configuration.  Lock against concurrent add() calls.
 632         */
 633        mutex_lock(&ctlr->add_lock);
 634        status = __spi_add_device(spi);
 635        mutex_unlock(&ctlr->add_lock);
 636        return status;
 637}
 638EXPORT_SYMBOL_GPL(spi_add_device);
 639
 640static int spi_add_device_locked(struct spi_device *spi)
 641{
 642        struct spi_controller *ctlr = spi->controller;
 643        struct device *dev = ctlr->dev.parent;
 644
 645        /* Chipselects are numbered 0..max; validate. */
 646        if (spi->chip_select >= ctlr->num_chipselect) {
 647                dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
 648                        ctlr->num_chipselect);
 649                return -EINVAL;
 650        }
 651
 652        /* Set the bus ID string */
 653        spi_dev_set_name(spi);
 654
 655        WARN_ON(!mutex_is_locked(&ctlr->add_lock));
 656        return __spi_add_device(spi);
 657}
 658
 659/**
 660 * spi_new_device - instantiate one new SPI device
 661 * @ctlr: Controller to which device is connected
 662 * @chip: Describes the SPI device
 663 * Context: can sleep
 664 *
 665 * On typical mainboards, this is purely internal; and it's not needed
 666 * after board init creates the hard-wired devices.  Some development
 667 * platforms may not be able to use spi_register_board_info though, and
 668 * this is exported so that for example a USB or parport based adapter
 669 * driver could add devices (which it would learn about out-of-band).
 670 *
 671 * Return: the new device, or NULL.
 672 */
 673struct spi_device *spi_new_device(struct spi_controller *ctlr,
 674                                  struct spi_board_info *chip)
 675{
 676        struct spi_device       *proxy;
 677        int                     status;
 678
 679        /* NOTE:  caller did any chip->bus_num checks necessary.
 680         *
 681         * Also, unless we change the return value convention to use
 682         * error-or-pointer (not NULL-or-pointer), troubleshootability
 683         * suggests syslogged diagnostics are best here (ugh).
 684         */
 685
 686        proxy = spi_alloc_device(ctlr);
 687        if (!proxy)
 688                return NULL;
 689
 690        WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 691
 692        proxy->chip_select = chip->chip_select;
 693        proxy->max_speed_hz = chip->max_speed_hz;
 694        proxy->mode = chip->mode;
 695        proxy->irq = chip->irq;
 696        strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 697        proxy->dev.platform_data = (void *) chip->platform_data;
 698        proxy->controller_data = chip->controller_data;
 699        proxy->controller_state = NULL;
 700
 701        if (chip->swnode) {
 702                status = device_add_software_node(&proxy->dev, chip->swnode);
 703                if (status) {
 704                        dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
 705                                chip->modalias, status);
 706                        goto err_dev_put;
 707                }
 708        }
 709
 710        status = spi_add_device(proxy);
 711        if (status < 0)
 712                goto err_dev_put;
 713
 714        return proxy;
 715
 716err_dev_put:
 717        device_remove_software_node(&proxy->dev);
 718        spi_dev_put(proxy);
 719        return NULL;
 720}
 721EXPORT_SYMBOL_GPL(spi_new_device);
 722
 723/**
 724 * spi_unregister_device - unregister a single SPI device
 725 * @spi: spi_device to unregister
 726 *
 727 * Start making the passed SPI device vanish. Normally this would be handled
 728 * by spi_unregister_controller().
 729 */
 730void spi_unregister_device(struct spi_device *spi)
 731{
 732        if (!spi)
 733                return;
 734
 735        if (spi->dev.of_node) {
 736                of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
 737                of_node_put(spi->dev.of_node);
 738        }
 739        if (ACPI_COMPANION(&spi->dev))
 740                acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
 741        device_remove_software_node(&spi->dev);
 742        device_del(&spi->dev);
 743        spi_cleanup(spi);
 744        put_device(&spi->dev);
 745}
 746EXPORT_SYMBOL_GPL(spi_unregister_device);
 747
 748static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
 749                                              struct spi_board_info *bi)
 750{
 751        struct spi_device *dev;
 752
 753        if (ctlr->bus_num != bi->bus_num)
 754                return;
 755
 756        dev = spi_new_device(ctlr, bi);
 757        if (!dev)
 758                dev_err(ctlr->dev.parent, "can't create new device for %s\n",
 759                        bi->modalias);
 760}
 761
 762/**
 763 * spi_register_board_info - register SPI devices for a given board
 764 * @info: array of chip descriptors
 765 * @n: how many descriptors are provided
 766 * Context: can sleep
 767 *
 768 * Board-specific early init code calls this (probably during arch_initcall)
 769 * with segments of the SPI device table.  Any device nodes are created later,
 770 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 771 * this table of devices forever, so that reloading a controller driver will
 772 * not make Linux forget about these hard-wired devices.
 773 *
 774 * Other code can also call this, e.g. a particular add-on board might provide
 775 * SPI devices through its expansion connector, so code initializing that board
 776 * would naturally declare its SPI devices.
 777 *
 778 * The board info passed can safely be __initdata ... but be careful of
 779 * any embedded pointers (platform_data, etc), they're copied as-is.
 780 *
 781 * Return: zero on success, else a negative error code.
 782 */
 783int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 784{
 785        struct boardinfo *bi;
 786        int i;
 787
 788        if (!n)
 789                return 0;
 790
 791        bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
 792        if (!bi)
 793                return -ENOMEM;
 794
 795        for (i = 0; i < n; i++, bi++, info++) {
 796                struct spi_controller *ctlr;
 797
 798                memcpy(&bi->board_info, info, sizeof(*info));
 799
 800                mutex_lock(&board_lock);
 801                list_add_tail(&bi->list, &board_list);
 802                list_for_each_entry(ctlr, &spi_controller_list, list)
 803                        spi_match_controller_to_boardinfo(ctlr,
 804                                                          &bi->board_info);
 805                mutex_unlock(&board_lock);
 806        }
 807
 808        return 0;
 809}
 810
 811/*-------------------------------------------------------------------------*/
 812
 813static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
 814{
 815        bool activate = enable;
 816
 817        /*
 818         * Avoid calling into the driver (or doing delays) if the chip select
 819         * isn't actually changing from the last time this was called.
 820         */
 821        if (!force && (spi->controller->last_cs_enable == enable) &&
 822            (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
 823                return;
 824
 825        trace_spi_set_cs(spi, activate);
 826
 827        spi->controller->last_cs_enable = enable;
 828        spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
 829
 830        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
 831            !spi->controller->set_cs_timing) {
 832                if (activate)
 833                        spi_delay_exec(&spi->cs_setup, NULL);
 834                else
 835                        spi_delay_exec(&spi->cs_hold, NULL);
 836        }
 837
 838        if (spi->mode & SPI_CS_HIGH)
 839                enable = !enable;
 840
 841        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
 842                if (!(spi->mode & SPI_NO_CS)) {
 843                        if (spi->cs_gpiod) {
 844                                /*
 845                                 * Historically ACPI has no means of the GPIO polarity and
 846                                 * thus the SPISerialBus() resource defines it on the per-chip
 847                                 * basis. In order to avoid a chain of negations, the GPIO
 848                                 * polarity is considered being Active High. Even for the cases
 849                                 * when _DSD() is involved (in the updated versions of ACPI)
 850                                 * the GPIO CS polarity must be defined Active High to avoid
 851                                 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
 852                                 * into account.
 853                                 */
 854                                if (has_acpi_companion(&spi->dev))
 855                                        gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
 856                                else
 857                                        /* Polarity handled by GPIO library */
 858                                        gpiod_set_value_cansleep(spi->cs_gpiod, activate);
 859                        } else {
 860                                /*
 861                                 * invert the enable line, as active low is
 862                                 * default for SPI.
 863                                 */
 864                                gpio_set_value_cansleep(spi->cs_gpio, !enable);
 865                        }
 866                }
 867                /* Some SPI masters need both GPIO CS & slave_select */
 868                if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
 869                    spi->controller->set_cs)
 870                        spi->controller->set_cs(spi, !enable);
 871        } else if (spi->controller->set_cs) {
 872                spi->controller->set_cs(spi, !enable);
 873        }
 874
 875        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
 876            !spi->controller->set_cs_timing) {
 877                if (!activate)
 878                        spi_delay_exec(&spi->cs_inactive, NULL);
 879        }
 880}
 881
 882#ifdef CONFIG_HAS_DMA
 883int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
 884                struct sg_table *sgt, void *buf, size_t len,
 885                enum dma_data_direction dir)
 886{
 887        const bool vmalloced_buf = is_vmalloc_addr(buf);
 888        unsigned int max_seg_size = dma_get_max_seg_size(dev);
 889#ifdef CONFIG_HIGHMEM
 890        const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
 891                                (unsigned long)buf < (PKMAP_BASE +
 892                                        (LAST_PKMAP * PAGE_SIZE)));
 893#else
 894        const bool kmap_buf = false;
 895#endif
 896        int desc_len;
 897        int sgs;
 898        struct page *vm_page;
 899        struct scatterlist *sg;
 900        void *sg_buf;
 901        size_t min;
 902        int i, ret;
 903
 904        if (vmalloced_buf || kmap_buf) {
 905                desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 906                sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 907        } else if (virt_addr_valid(buf)) {
 908                desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
 909                sgs = DIV_ROUND_UP(len, desc_len);
 910        } else {
 911                return -EINVAL;
 912        }
 913
 914        ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 915        if (ret != 0)
 916                return ret;
 917
 918        sg = &sgt->sgl[0];
 919        for (i = 0; i < sgs; i++) {
 920
 921                if (vmalloced_buf || kmap_buf) {
 922                        /*
 923                         * Next scatterlist entry size is the minimum between
 924                         * the desc_len and the remaining buffer length that
 925                         * fits in a page.
 926                         */
 927                        min = min_t(size_t, desc_len,
 928                                    min_t(size_t, len,
 929                                          PAGE_SIZE - offset_in_page(buf)));
 930                        if (vmalloced_buf)
 931                                vm_page = vmalloc_to_page(buf);
 932                        else
 933                                vm_page = kmap_to_page(buf);
 934                        if (!vm_page) {
 935                                sg_free_table(sgt);
 936                                return -ENOMEM;
 937                        }
 938                        sg_set_page(sg, vm_page,
 939                                    min, offset_in_page(buf));
 940                } else {
 941                        min = min_t(size_t, len, desc_len);
 942                        sg_buf = buf;
 943                        sg_set_buf(sg, sg_buf, min);
 944                }
 945
 946                buf += min;
 947                len -= min;
 948                sg = sg_next(sg);
 949        }
 950
 951        ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
 952        if (!ret)
 953                ret = -ENOMEM;
 954        if (ret < 0) {
 955                sg_free_table(sgt);
 956                return ret;
 957        }
 958
 959        sgt->nents = ret;
 960
 961        return 0;
 962}
 963
 964void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
 965                   struct sg_table *sgt, enum dma_data_direction dir)
 966{
 967        if (sgt->orig_nents) {
 968                dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
 969                sg_free_table(sgt);
 970        }
 971}
 972
 973static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 974{
 975        struct device *tx_dev, *rx_dev;
 976        struct spi_transfer *xfer;
 977        int ret;
 978
 979        if (!ctlr->can_dma)
 980                return 0;
 981
 982        if (ctlr->dma_tx)
 983                tx_dev = ctlr->dma_tx->device->dev;
 984        else if (ctlr->dma_map_dev)
 985                tx_dev = ctlr->dma_map_dev;
 986        else
 987                tx_dev = ctlr->dev.parent;
 988
 989        if (ctlr->dma_rx)
 990                rx_dev = ctlr->dma_rx->device->dev;
 991        else if (ctlr->dma_map_dev)
 992                rx_dev = ctlr->dma_map_dev;
 993        else
 994                rx_dev = ctlr->dev.parent;
 995
 996        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 997                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 998                        continue;
 999
1000                if (xfer->tx_buf != NULL) {
1001                        ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
1002                                          (void *)xfer->tx_buf, xfer->len,
1003                                          DMA_TO_DEVICE);
1004                        if (ret != 0)
1005                                return ret;
1006                }
1007
1008                if (xfer->rx_buf != NULL) {
1009                        ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
1010                                          xfer->rx_buf, xfer->len,
1011                                          DMA_FROM_DEVICE);
1012                        if (ret != 0) {
1013                                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
1014                                              DMA_TO_DEVICE);
1015                                return ret;
1016                        }
1017                }
1018        }
1019
1020        ctlr->cur_msg_mapped = true;
1021
1022        return 0;
1023}
1024
1025static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1026{
1027        struct spi_transfer *xfer;
1028        struct device *tx_dev, *rx_dev;
1029
1030        if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1031                return 0;
1032
1033        if (ctlr->dma_tx)
1034                tx_dev = ctlr->dma_tx->device->dev;
1035        else
1036                tx_dev = ctlr->dev.parent;
1037
1038        if (ctlr->dma_rx)
1039                rx_dev = ctlr->dma_rx->device->dev;
1040        else
1041                rx_dev = ctlr->dev.parent;
1042
1043        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1044                if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1045                        continue;
1046
1047                spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1048                spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1049        }
1050
1051        ctlr->cur_msg_mapped = false;
1052
1053        return 0;
1054}
1055#else /* !CONFIG_HAS_DMA */
1056static inline int __spi_map_msg(struct spi_controller *ctlr,
1057                                struct spi_message *msg)
1058{
1059        return 0;
1060}
1061
1062static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1063                                  struct spi_message *msg)
1064{
1065        return 0;
1066}
1067#endif /* !CONFIG_HAS_DMA */
1068
1069static inline int spi_unmap_msg(struct spi_controller *ctlr,
1070                                struct spi_message *msg)
1071{
1072        struct spi_transfer *xfer;
1073
1074        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1075                /*
1076                 * Restore the original value of tx_buf or rx_buf if they are
1077                 * NULL.
1078                 */
1079                if (xfer->tx_buf == ctlr->dummy_tx)
1080                        xfer->tx_buf = NULL;
1081                if (xfer->rx_buf == ctlr->dummy_rx)
1082                        xfer->rx_buf = NULL;
1083        }
1084
1085        return __spi_unmap_msg(ctlr, msg);
1086}
1087
1088static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1089{
1090        struct spi_transfer *xfer;
1091        void *tmp;
1092        unsigned int max_tx, max_rx;
1093
1094        if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1095                && !(msg->spi->mode & SPI_3WIRE)) {
1096                max_tx = 0;
1097                max_rx = 0;
1098
1099                list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1100                        if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1101                            !xfer->tx_buf)
1102                                max_tx = max(xfer->len, max_tx);
1103                        if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1104                            !xfer->rx_buf)
1105                                max_rx = max(xfer->len, max_rx);
1106                }
1107
1108                if (max_tx) {
1109                        tmp = krealloc(ctlr->dummy_tx, max_tx,
1110                                       GFP_KERNEL | GFP_DMA);
1111                        if (!tmp)
1112                                return -ENOMEM;
1113                        ctlr->dummy_tx = tmp;
1114                        memset(tmp, 0, max_tx);
1115                }
1116
1117                if (max_rx) {
1118                        tmp = krealloc(ctlr->dummy_rx, max_rx,
1119                                       GFP_KERNEL | GFP_DMA);
1120                        if (!tmp)
1121                                return -ENOMEM;
1122                        ctlr->dummy_rx = tmp;
1123                }
1124
1125                if (max_tx || max_rx) {
1126                        list_for_each_entry(xfer, &msg->transfers,
1127                                            transfer_list) {
1128                                if (!xfer->len)
1129                                        continue;
1130                                if (!xfer->tx_buf)
1131                                        xfer->tx_buf = ctlr->dummy_tx;
1132                                if (!xfer->rx_buf)
1133                                        xfer->rx_buf = ctlr->dummy_rx;
1134                        }
1135                }
1136        }
1137
1138        return __spi_map_msg(ctlr, msg);
1139}
1140
1141static int spi_transfer_wait(struct spi_controller *ctlr,
1142                             struct spi_message *msg,
1143                             struct spi_transfer *xfer)
1144{
1145        struct spi_statistics *statm = &ctlr->statistics;
1146        struct spi_statistics *stats = &msg->spi->statistics;
1147        u32 speed_hz = xfer->speed_hz;
1148        unsigned long long ms;
1149
1150        if (spi_controller_is_slave(ctlr)) {
1151                if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1152                        dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1153                        return -EINTR;
1154                }
1155        } else {
1156                if (!speed_hz)
1157                        speed_hz = 100000;
1158
1159                /*
1160                 * For each byte we wait for 8 cycles of the SPI clock.
1161                 * Since speed is defined in Hz and we want milliseconds,
1162                 * use respective multiplier, but before the division,
1163                 * otherwise we may get 0 for short transfers.
1164                 */
1165                ms = 8LL * MSEC_PER_SEC * xfer->len;
1166                do_div(ms, speed_hz);
1167
1168                /*
1169                 * Increase it twice and add 200 ms tolerance, use
1170                 * predefined maximum in case of overflow.
1171                 */
1172                ms += ms + 200;
1173                if (ms > UINT_MAX)
1174                        ms = UINT_MAX;
1175
1176                ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1177                                                 msecs_to_jiffies(ms));
1178
1179                if (ms == 0) {
1180                        SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1181                        SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1182                        dev_err(&msg->spi->dev,
1183                                "SPI transfer timed out\n");
1184                        return -ETIMEDOUT;
1185                }
1186        }
1187
1188        return 0;
1189}
1190
1191static void _spi_transfer_delay_ns(u32 ns)
1192{
1193        if (!ns)
1194                return;
1195        if (ns <= NSEC_PER_USEC) {
1196                ndelay(ns);
1197        } else {
1198                u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1199
1200                if (us <= 10)
1201                        udelay(us);
1202                else
1203                        usleep_range(us, us + DIV_ROUND_UP(us, 10));
1204        }
1205}
1206
1207int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1208{
1209        u32 delay = _delay->value;
1210        u32 unit = _delay->unit;
1211        u32 hz;
1212
1213        if (!delay)
1214                return 0;
1215
1216        switch (unit) {
1217        case SPI_DELAY_UNIT_USECS:
1218                delay *= NSEC_PER_USEC;
1219                break;
1220        case SPI_DELAY_UNIT_NSECS:
1221                /* Nothing to do here */
1222                break;
1223        case SPI_DELAY_UNIT_SCK:
1224                /* clock cycles need to be obtained from spi_transfer */
1225                if (!xfer)
1226                        return -EINVAL;
1227                /*
1228                 * If there is unknown effective speed, approximate it
1229                 * by underestimating with half of the requested hz.
1230                 */
1231                hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1232                if (!hz)
1233                        return -EINVAL;
1234
1235                /* Convert delay to nanoseconds */
1236                delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1237                break;
1238        default:
1239                return -EINVAL;
1240        }
1241
1242        return delay;
1243}
1244EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1245
1246int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1247{
1248        int delay;
1249
1250        might_sleep();
1251
1252        if (!_delay)
1253                return -EINVAL;
1254
1255        delay = spi_delay_to_ns(_delay, xfer);
1256        if (delay < 0)
1257                return delay;
1258
1259        _spi_transfer_delay_ns(delay);
1260
1261        return 0;
1262}
1263EXPORT_SYMBOL_GPL(spi_delay_exec);
1264
1265static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1266                                          struct spi_transfer *xfer)
1267{
1268        u32 default_delay_ns = 10 * NSEC_PER_USEC;
1269        u32 delay = xfer->cs_change_delay.value;
1270        u32 unit = xfer->cs_change_delay.unit;
1271        int ret;
1272
1273        /* return early on "fast" mode - for everything but USECS */
1274        if (!delay) {
1275                if (unit == SPI_DELAY_UNIT_USECS)
1276                        _spi_transfer_delay_ns(default_delay_ns);
1277                return;
1278        }
1279
1280        ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1281        if (ret) {
1282                dev_err_once(&msg->spi->dev,
1283                             "Use of unsupported delay unit %i, using default of %luus\n",
1284                             unit, default_delay_ns / NSEC_PER_USEC);
1285                _spi_transfer_delay_ns(default_delay_ns);
1286        }
1287}
1288
1289/*
1290 * spi_transfer_one_message - Default implementation of transfer_one_message()
1291 *
1292 * This is a standard implementation of transfer_one_message() for
1293 * drivers which implement a transfer_one() operation.  It provides
1294 * standard handling of delays and chip select management.
1295 */
1296static int spi_transfer_one_message(struct spi_controller *ctlr,
1297                                    struct spi_message *msg)
1298{
1299        struct spi_transfer *xfer;
1300        bool keep_cs = false;
1301        int ret = 0;
1302        struct spi_statistics *statm = &ctlr->statistics;
1303        struct spi_statistics *stats = &msg->spi->statistics;
1304
1305        spi_set_cs(msg->spi, true, false);
1306
1307        SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1308        SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1309
1310        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1311                trace_spi_transfer_start(msg, xfer);
1312
1313                spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1314                spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1315
1316                if (!ctlr->ptp_sts_supported) {
1317                        xfer->ptp_sts_word_pre = 0;
1318                        ptp_read_system_prets(xfer->ptp_sts);
1319                }
1320
1321                if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1322                        reinit_completion(&ctlr->xfer_completion);
1323
1324fallback_pio:
1325                        ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1326                        if (ret < 0) {
1327                                if (ctlr->cur_msg_mapped &&
1328                                   (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1329                                        __spi_unmap_msg(ctlr, msg);
1330                                        ctlr->fallback = true;
1331                                        xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1332                                        goto fallback_pio;
1333                                }
1334
1335                                SPI_STATISTICS_INCREMENT_FIELD(statm,
1336                                                               errors);
1337                                SPI_STATISTICS_INCREMENT_FIELD(stats,
1338                                                               errors);
1339                                dev_err(&msg->spi->dev,
1340                                        "SPI transfer failed: %d\n", ret);
1341                                goto out;
1342                        }
1343
1344                        if (ret > 0) {
1345                                ret = spi_transfer_wait(ctlr, msg, xfer);
1346                                if (ret < 0)
1347                                        msg->status = ret;
1348                        }
1349                } else {
1350                        if (xfer->len)
1351                                dev_err(&msg->spi->dev,
1352                                        "Bufferless transfer has length %u\n",
1353                                        xfer->len);
1354                }
1355
1356                if (!ctlr->ptp_sts_supported) {
1357                        ptp_read_system_postts(xfer->ptp_sts);
1358                        xfer->ptp_sts_word_post = xfer->len;
1359                }
1360
1361                trace_spi_transfer_stop(msg, xfer);
1362
1363                if (msg->status != -EINPROGRESS)
1364                        goto out;
1365
1366                spi_transfer_delay_exec(xfer);
1367
1368                if (xfer->cs_change) {
1369                        if (list_is_last(&xfer->transfer_list,
1370                                         &msg->transfers)) {
1371                                keep_cs = true;
1372                        } else {
1373                                spi_set_cs(msg->spi, false, false);
1374                                _spi_transfer_cs_change_delay(msg, xfer);
1375                                spi_set_cs(msg->spi, true, false);
1376                        }
1377                }
1378
1379                msg->actual_length += xfer->len;
1380        }
1381
1382out:
1383        if (ret != 0 || !keep_cs)
1384                spi_set_cs(msg->spi, false, false);
1385
1386        if (msg->status == -EINPROGRESS)
1387                msg->status = ret;
1388
1389        if (msg->status && ctlr->handle_err)
1390                ctlr->handle_err(ctlr, msg);
1391
1392        spi_finalize_current_message(ctlr);
1393
1394        return ret;
1395}
1396
1397/**
1398 * spi_finalize_current_transfer - report completion of a transfer
1399 * @ctlr: the controller reporting completion
1400 *
1401 * Called by SPI drivers using the core transfer_one_message()
1402 * implementation to notify it that the current interrupt driven
1403 * transfer has finished and the next one may be scheduled.
1404 */
1405void spi_finalize_current_transfer(struct spi_controller *ctlr)
1406{
1407        complete(&ctlr->xfer_completion);
1408}
1409EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1410
1411static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1412{
1413        if (ctlr->auto_runtime_pm) {
1414                pm_runtime_mark_last_busy(ctlr->dev.parent);
1415                pm_runtime_put_autosuspend(ctlr->dev.parent);
1416        }
1417}
1418
1419/**
1420 * __spi_pump_messages - function which processes spi message queue
1421 * @ctlr: controller to process queue for
1422 * @in_kthread: true if we are in the context of the message pump thread
1423 *
1424 * This function checks if there is any spi message in the queue that
1425 * needs processing and if so call out to the driver to initialize hardware
1426 * and transfer each message.
1427 *
1428 * Note that it is called both from the kthread itself and also from
1429 * inside spi_sync(); the queue extraction handling at the top of the
1430 * function should deal with this safely.
1431 */
1432static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1433{
1434        struct spi_transfer *xfer;
1435        struct spi_message *msg;
1436        bool was_busy = false;
1437        unsigned long flags;
1438        int ret;
1439
1440        /* Lock queue */
1441        spin_lock_irqsave(&ctlr->queue_lock, flags);
1442
1443        /* Make sure we are not already running a message */
1444        if (ctlr->cur_msg) {
1445                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1446                return;
1447        }
1448
1449        /* If another context is idling the device then defer */
1450        if (ctlr->idling) {
1451                kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1452                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1453                return;
1454        }
1455
1456        /* Check if the queue is idle */
1457        if (list_empty(&ctlr->queue) || !ctlr->running) {
1458                if (!ctlr->busy) {
1459                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1460                        return;
1461                }
1462
1463                /* Defer any non-atomic teardown to the thread */
1464                if (!in_kthread) {
1465                        if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1466                            !ctlr->unprepare_transfer_hardware) {
1467                                spi_idle_runtime_pm(ctlr);
1468                                ctlr->busy = false;
1469                                trace_spi_controller_idle(ctlr);
1470                        } else {
1471                                kthread_queue_work(ctlr->kworker,
1472                                                   &ctlr->pump_messages);
1473                        }
1474                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1475                        return;
1476                }
1477
1478                ctlr->busy = false;
1479                ctlr->idling = true;
1480                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1481
1482                kfree(ctlr->dummy_rx);
1483                ctlr->dummy_rx = NULL;
1484                kfree(ctlr->dummy_tx);
1485                ctlr->dummy_tx = NULL;
1486                if (ctlr->unprepare_transfer_hardware &&
1487                    ctlr->unprepare_transfer_hardware(ctlr))
1488                        dev_err(&ctlr->dev,
1489                                "failed to unprepare transfer hardware\n");
1490                spi_idle_runtime_pm(ctlr);
1491                trace_spi_controller_idle(ctlr);
1492
1493                spin_lock_irqsave(&ctlr->queue_lock, flags);
1494                ctlr->idling = false;
1495                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1496                return;
1497        }
1498
1499        /* Extract head of queue */
1500        msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1501        ctlr->cur_msg = msg;
1502
1503        list_del_init(&msg->queue);
1504        if (ctlr->busy)
1505                was_busy = true;
1506        else
1507                ctlr->busy = true;
1508        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1509
1510        mutex_lock(&ctlr->io_mutex);
1511
1512        if (!was_busy && ctlr->auto_runtime_pm) {
1513                ret = pm_runtime_get_sync(ctlr->dev.parent);
1514                if (ret < 0) {
1515                        pm_runtime_put_noidle(ctlr->dev.parent);
1516                        dev_err(&ctlr->dev, "Failed to power device: %d\n",
1517                                ret);
1518                        mutex_unlock(&ctlr->io_mutex);
1519                        return;
1520                }
1521        }
1522
1523        if (!was_busy)
1524                trace_spi_controller_busy(ctlr);
1525
1526        if (!was_busy && ctlr->prepare_transfer_hardware) {
1527                ret = ctlr->prepare_transfer_hardware(ctlr);
1528                if (ret) {
1529                        dev_err(&ctlr->dev,
1530                                "failed to prepare transfer hardware: %d\n",
1531                                ret);
1532
1533                        if (ctlr->auto_runtime_pm)
1534                                pm_runtime_put(ctlr->dev.parent);
1535
1536                        msg->status = ret;
1537                        spi_finalize_current_message(ctlr);
1538
1539                        mutex_unlock(&ctlr->io_mutex);
1540                        return;
1541                }
1542        }
1543
1544        trace_spi_message_start(msg);
1545
1546        if (ctlr->prepare_message) {
1547                ret = ctlr->prepare_message(ctlr, msg);
1548                if (ret) {
1549                        dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1550                                ret);
1551                        msg->status = ret;
1552                        spi_finalize_current_message(ctlr);
1553                        goto out;
1554                }
1555                ctlr->cur_msg_prepared = true;
1556        }
1557
1558        ret = spi_map_msg(ctlr, msg);
1559        if (ret) {
1560                msg->status = ret;
1561                spi_finalize_current_message(ctlr);
1562                goto out;
1563        }
1564
1565        if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1566                list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1567                        xfer->ptp_sts_word_pre = 0;
1568                        ptp_read_system_prets(xfer->ptp_sts);
1569                }
1570        }
1571
1572        ret = ctlr->transfer_one_message(ctlr, msg);
1573        if (ret) {
1574                dev_err(&ctlr->dev,
1575                        "failed to transfer one message from queue\n");
1576                goto out;
1577        }
1578
1579out:
1580        mutex_unlock(&ctlr->io_mutex);
1581
1582        /* Prod the scheduler in case transfer_one() was busy waiting */
1583        if (!ret)
1584                cond_resched();
1585}
1586
1587/**
1588 * spi_pump_messages - kthread work function which processes spi message queue
1589 * @work: pointer to kthread work struct contained in the controller struct
1590 */
1591static void spi_pump_messages(struct kthread_work *work)
1592{
1593        struct spi_controller *ctlr =
1594                container_of(work, struct spi_controller, pump_messages);
1595
1596        __spi_pump_messages(ctlr, true);
1597}
1598
1599/**
1600 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
1601 *                          TX timestamp for the requested byte from the SPI
1602 *                          transfer. The frequency with which this function
1603 *                          must be called (once per word, once for the whole
1604 *                          transfer, once per batch of words etc) is arbitrary
1605 *                          as long as the @tx buffer offset is greater than or
1606 *                          equal to the requested byte at the time of the
1607 *                          call. The timestamp is only taken once, at the
1608 *                          first such call. It is assumed that the driver
1609 *                          advances its @tx buffer pointer monotonically.
1610 * @ctlr: Pointer to the spi_controller structure of the driver
1611 * @xfer: Pointer to the transfer being timestamped
1612 * @progress: How many words (not bytes) have been transferred so far
1613 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1614 *            transfer, for less jitter in time measurement. Only compatible
1615 *            with PIO drivers. If true, must follow up with
1616 *            spi_take_timestamp_post or otherwise system will crash.
1617 *            WARNING: for fully predictable results, the CPU frequency must
1618 *            also be under control (governor).
1619 */
1620void spi_take_timestamp_pre(struct spi_controller *ctlr,
1621                            struct spi_transfer *xfer,
1622                            size_t progress, bool irqs_off)
1623{
1624        if (!xfer->ptp_sts)
1625                return;
1626
1627        if (xfer->timestamped)
1628                return;
1629
1630        if (progress > xfer->ptp_sts_word_pre)
1631                return;
1632
1633        /* Capture the resolution of the timestamp */
1634        xfer->ptp_sts_word_pre = progress;
1635
1636        if (irqs_off) {
1637                local_irq_save(ctlr->irq_flags);
1638                preempt_disable();
1639        }
1640
1641        ptp_read_system_prets(xfer->ptp_sts);
1642}
1643EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1644
1645/**
1646 * spi_take_timestamp_post - helper for drivers to collect the end of the
1647 *                           TX timestamp for the requested byte from the SPI
1648 *                           transfer. Can be called with an arbitrary
1649 *                           frequency: only the first call where @tx exceeds
1650 *                           or is equal to the requested word will be
1651 *                           timestamped.
1652 * @ctlr: Pointer to the spi_controller structure of the driver
1653 * @xfer: Pointer to the transfer being timestamped
1654 * @progress: How many words (not bytes) have been transferred so far
1655 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1656 */
1657void spi_take_timestamp_post(struct spi_controller *ctlr,
1658                             struct spi_transfer *xfer,
1659                             size_t progress, bool irqs_off)
1660{
1661        if (!xfer->ptp_sts)
1662                return;
1663
1664        if (xfer->timestamped)
1665                return;
1666
1667        if (progress < xfer->ptp_sts_word_post)
1668                return;
1669
1670        ptp_read_system_postts(xfer->ptp_sts);
1671
1672        if (irqs_off) {
1673                local_irq_restore(ctlr->irq_flags);
1674                preempt_enable();
1675        }
1676
1677        /* Capture the resolution of the timestamp */
1678        xfer->ptp_sts_word_post = progress;
1679
1680        xfer->timestamped = true;
1681}
1682EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1683
1684/**
1685 * spi_set_thread_rt - set the controller to pump at realtime priority
1686 * @ctlr: controller to boost priority of
1687 *
1688 * This can be called because the controller requested realtime priority
1689 * (by setting the ->rt value before calling spi_register_controller()) or
1690 * because a device on the bus said that its transfers needed realtime
1691 * priority.
1692 *
1693 * NOTE: at the moment if any device on a bus says it needs realtime then
1694 * the thread will be at realtime priority for all transfers on that
1695 * controller.  If this eventually becomes a problem we may see if we can
1696 * find a way to boost the priority only temporarily during relevant
1697 * transfers.
1698 */
1699static void spi_set_thread_rt(struct spi_controller *ctlr)
1700{
1701        dev_info(&ctlr->dev,
1702                "will run message pump with realtime priority\n");
1703        sched_set_fifo(ctlr->kworker->task);
1704}
1705
1706static int spi_init_queue(struct spi_controller *ctlr)
1707{
1708        ctlr->running = false;
1709        ctlr->busy = false;
1710
1711        ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1712        if (IS_ERR(ctlr->kworker)) {
1713                dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1714                return PTR_ERR(ctlr->kworker);
1715        }
1716
1717        kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1718
1719        /*
1720         * Controller config will indicate if this controller should run the
1721         * message pump with high (realtime) priority to reduce the transfer
1722         * latency on the bus by minimising the delay between a transfer
1723         * request and the scheduling of the message pump thread. Without this
1724         * setting the message pump thread will remain at default priority.
1725         */
1726        if (ctlr->rt)
1727                spi_set_thread_rt(ctlr);
1728
1729        return 0;
1730}
1731
1732/**
1733 * spi_get_next_queued_message() - called by driver to check for queued
1734 * messages
1735 * @ctlr: the controller to check for queued messages
1736 *
1737 * If there are more messages in the queue, the next message is returned from
1738 * this call.
1739 *
1740 * Return: the next message in the queue, else NULL if the queue is empty.
1741 */
1742struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1743{
1744        struct spi_message *next;
1745        unsigned long flags;
1746
1747        /* get a pointer to the next message, if any */
1748        spin_lock_irqsave(&ctlr->queue_lock, flags);
1749        next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1750                                        queue);
1751        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1752
1753        return next;
1754}
1755EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1756
1757/**
1758 * spi_finalize_current_message() - the current message is complete
1759 * @ctlr: the controller to return the message to
1760 *
1761 * Called by the driver to notify the core that the message in the front of the
1762 * queue is complete and can be removed from the queue.
1763 */
1764void spi_finalize_current_message(struct spi_controller *ctlr)
1765{
1766        struct spi_transfer *xfer;
1767        struct spi_message *mesg;
1768        unsigned long flags;
1769        int ret;
1770
1771        spin_lock_irqsave(&ctlr->queue_lock, flags);
1772        mesg = ctlr->cur_msg;
1773        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1774
1775        if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1776                list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1777                        ptp_read_system_postts(xfer->ptp_sts);
1778                        xfer->ptp_sts_word_post = xfer->len;
1779                }
1780        }
1781
1782        if (unlikely(ctlr->ptp_sts_supported))
1783                list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1784                        WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1785
1786        spi_unmap_msg(ctlr, mesg);
1787
1788        /* In the prepare_messages callback the spi bus has the opportunity to
1789         * split a transfer to smaller chunks.
1790         * Release splited transfers here since spi_map_msg is done on the
1791         * splited transfers.
1792         */
1793        spi_res_release(ctlr, mesg);
1794
1795        if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1796                ret = ctlr->unprepare_message(ctlr, mesg);
1797                if (ret) {
1798                        dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1799                                ret);
1800                }
1801        }
1802
1803        spin_lock_irqsave(&ctlr->queue_lock, flags);
1804        ctlr->cur_msg = NULL;
1805        ctlr->cur_msg_prepared = false;
1806        ctlr->fallback = false;
1807        kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1808        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1809
1810        trace_spi_message_done(mesg);
1811
1812        mesg->state = NULL;
1813        if (mesg->complete)
1814                mesg->complete(mesg->context);
1815}
1816EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1817
1818static int spi_start_queue(struct spi_controller *ctlr)
1819{
1820        unsigned long flags;
1821
1822        spin_lock_irqsave(&ctlr->queue_lock, flags);
1823
1824        if (ctlr->running || ctlr->busy) {
1825                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1826                return -EBUSY;
1827        }
1828
1829        ctlr->running = true;
1830        ctlr->cur_msg = NULL;
1831        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1832
1833        kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1834
1835        return 0;
1836}
1837
1838static int spi_stop_queue(struct spi_controller *ctlr)
1839{
1840        unsigned long flags;
1841        unsigned limit = 500;
1842        int ret = 0;
1843
1844        spin_lock_irqsave(&ctlr->queue_lock, flags);
1845
1846        /*
1847         * This is a bit lame, but is optimized for the common execution path.
1848         * A wait_queue on the ctlr->busy could be used, but then the common
1849         * execution path (pump_messages) would be required to call wake_up or
1850         * friends on every SPI message. Do this instead.
1851         */
1852        while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1853                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1854                usleep_range(10000, 11000);
1855                spin_lock_irqsave(&ctlr->queue_lock, flags);
1856        }
1857
1858        if (!list_empty(&ctlr->queue) || ctlr->busy)
1859                ret = -EBUSY;
1860        else
1861                ctlr->running = false;
1862
1863        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1864
1865        if (ret) {
1866                dev_warn(&ctlr->dev, "could not stop message queue\n");
1867                return ret;
1868        }
1869        return ret;
1870}
1871
1872static int spi_destroy_queue(struct spi_controller *ctlr)
1873{
1874        int ret;
1875
1876        ret = spi_stop_queue(ctlr);
1877
1878        /*
1879         * kthread_flush_worker will block until all work is done.
1880         * If the reason that stop_queue timed out is that the work will never
1881         * finish, then it does no good to call flush/stop thread, so
1882         * return anyway.
1883         */
1884        if (ret) {
1885                dev_err(&ctlr->dev, "problem destroying queue\n");
1886                return ret;
1887        }
1888
1889        kthread_destroy_worker(ctlr->kworker);
1890
1891        return 0;
1892}
1893
1894static int __spi_queued_transfer(struct spi_device *spi,
1895                                 struct spi_message *msg,
1896                                 bool need_pump)
1897{
1898        struct spi_controller *ctlr = spi->controller;
1899        unsigned long flags;
1900
1901        spin_lock_irqsave(&ctlr->queue_lock, flags);
1902
1903        if (!ctlr->running) {
1904                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1905                return -ESHUTDOWN;
1906        }
1907        msg->actual_length = 0;
1908        msg->status = -EINPROGRESS;
1909
1910        list_add_tail(&msg->queue, &ctlr->queue);
1911        if (!ctlr->busy && need_pump)
1912                kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1913
1914        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1915        return 0;
1916}
1917
1918/**
1919 * spi_queued_transfer - transfer function for queued transfers
1920 * @spi: spi device which is requesting transfer
1921 * @msg: spi message which is to handled is queued to driver queue
1922 *
1923 * Return: zero on success, else a negative error code.
1924 */
1925static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1926{
1927        return __spi_queued_transfer(spi, msg, true);
1928}
1929
1930static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1931{
1932        int ret;
1933
1934        ctlr->transfer = spi_queued_transfer;
1935        if (!ctlr->transfer_one_message)
1936                ctlr->transfer_one_message = spi_transfer_one_message;
1937
1938        /* Initialize and start queue */
1939        ret = spi_init_queue(ctlr);
1940        if (ret) {
1941                dev_err(&ctlr->dev, "problem initializing queue\n");
1942                goto err_init_queue;
1943        }
1944        ctlr->queued = true;
1945        ret = spi_start_queue(ctlr);
1946        if (ret) {
1947                dev_err(&ctlr->dev, "problem starting queue\n");
1948                goto err_start_queue;
1949        }
1950
1951        return 0;
1952
1953err_start_queue:
1954        spi_destroy_queue(ctlr);
1955err_init_queue:
1956        return ret;
1957}
1958
1959/**
1960 * spi_flush_queue - Send all pending messages in the queue from the callers'
1961 *                   context
1962 * @ctlr: controller to process queue for
1963 *
1964 * This should be used when one wants to ensure all pending messages have been
1965 * sent before doing something. Is used by the spi-mem code to make sure SPI
1966 * memory operations do not preempt regular SPI transfers that have been queued
1967 * before the spi-mem operation.
1968 */
1969void spi_flush_queue(struct spi_controller *ctlr)
1970{
1971        if (ctlr->transfer == spi_queued_transfer)
1972                __spi_pump_messages(ctlr, false);
1973}
1974
1975/*-------------------------------------------------------------------------*/
1976
1977#if defined(CONFIG_OF)
1978static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1979                           struct device_node *nc)
1980{
1981        u32 value;
1982        int rc;
1983
1984        /* Mode (clock phase/polarity/etc.) */
1985        if (of_property_read_bool(nc, "spi-cpha"))
1986                spi->mode |= SPI_CPHA;
1987        if (of_property_read_bool(nc, "spi-cpol"))
1988                spi->mode |= SPI_CPOL;
1989        if (of_property_read_bool(nc, "spi-3wire"))
1990                spi->mode |= SPI_3WIRE;
1991        if (of_property_read_bool(nc, "spi-lsb-first"))
1992                spi->mode |= SPI_LSB_FIRST;
1993        if (of_property_read_bool(nc, "spi-cs-high"))
1994                spi->mode |= SPI_CS_HIGH;
1995
1996        /* Device DUAL/QUAD mode */
1997        if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1998                switch (value) {
1999                case 0:
2000                        spi->mode |= SPI_NO_TX;
2001                        break;
2002                case 1:
2003                        break;
2004                case 2:
2005                        spi->mode |= SPI_TX_DUAL;
2006                        break;
2007                case 4:
2008                        spi->mode |= SPI_TX_QUAD;
2009                        break;
2010                case 8:
2011                        spi->mode |= SPI_TX_OCTAL;
2012                        break;
2013                default:
2014                        dev_warn(&ctlr->dev,
2015                                "spi-tx-bus-width %d not supported\n",
2016                                value);
2017                        break;
2018                }
2019        }
2020
2021        if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2022                switch (value) {
2023                case 0:
2024                        spi->mode |= SPI_NO_RX;
2025                        break;
2026                case 1:
2027                        break;
2028                case 2:
2029                        spi->mode |= SPI_RX_DUAL;
2030                        break;
2031                case 4:
2032                        spi->mode |= SPI_RX_QUAD;
2033                        break;
2034                case 8:
2035                        spi->mode |= SPI_RX_OCTAL;
2036                        break;
2037                default:
2038                        dev_warn(&ctlr->dev,
2039                                "spi-rx-bus-width %d not supported\n",
2040                                value);
2041                        break;
2042                }
2043        }
2044
2045        if (spi_controller_is_slave(ctlr)) {
2046                if (!of_node_name_eq(nc, "slave")) {
2047                        dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2048                                nc);
2049                        return -EINVAL;
2050                }
2051                return 0;
2052        }
2053
2054        /* Device address */
2055        rc = of_property_read_u32(nc, "reg", &value);
2056        if (rc) {
2057                dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2058                        nc, rc);
2059                return rc;
2060        }
2061        spi->chip_select = value;
2062
2063        /* Device speed */
2064        if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2065                spi->max_speed_hz = value;
2066
2067        return 0;
2068}
2069
2070static struct spi_device *
2071of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2072{
2073        struct spi_device *spi;
2074        int rc;
2075
2076        /* Alloc an spi_device */
2077        spi = spi_alloc_device(ctlr);
2078        if (!spi) {
2079                dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2080                rc = -ENOMEM;
2081                goto err_out;
2082        }
2083
2084        /* Select device driver */
2085        rc = of_modalias_node(nc, spi->modalias,
2086                                sizeof(spi->modalias));
2087        if (rc < 0) {
2088                dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2089                goto err_out;
2090        }
2091
2092        rc = of_spi_parse_dt(ctlr, spi, nc);
2093        if (rc)
2094                goto err_out;
2095
2096        /* Store a pointer to the node in the device structure */
2097        of_node_get(nc);
2098        spi->dev.of_node = nc;
2099        spi->dev.fwnode = of_fwnode_handle(nc);
2100
2101        /* Register the new device */
2102        rc = spi_add_device(spi);
2103        if (rc) {
2104                dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2105                goto err_of_node_put;
2106        }
2107
2108        return spi;
2109
2110err_of_node_put:
2111        of_node_put(nc);
2112err_out:
2113        spi_dev_put(spi);
2114        return ERR_PTR(rc);
2115}
2116
2117/**
2118 * of_register_spi_devices() - Register child devices onto the SPI bus
2119 * @ctlr:       Pointer to spi_controller device
2120 *
2121 * Registers an spi_device for each child node of controller node which
2122 * represents a valid SPI slave.
2123 */
2124static void of_register_spi_devices(struct spi_controller *ctlr)
2125{
2126        struct spi_device *spi;
2127        struct device_node *nc;
2128
2129        if (!ctlr->dev.of_node)
2130                return;
2131
2132        for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2133                if (of_node_test_and_set_flag(nc, OF_POPULATED))
2134                        continue;
2135                spi = of_register_spi_device(ctlr, nc);
2136                if (IS_ERR(spi)) {
2137                        dev_warn(&ctlr->dev,
2138                                 "Failed to create SPI device for %pOF\n", nc);
2139                        of_node_clear_flag(nc, OF_POPULATED);
2140                }
2141        }
2142}
2143#else
2144static void of_register_spi_devices(struct spi_controller *ctlr) { }
2145#endif
2146
2147/**
2148 * spi_new_ancillary_device() - Register ancillary SPI device
2149 * @spi:         Pointer to the main SPI device registering the ancillary device
2150 * @chip_select: Chip Select of the ancillary device
2151 *
2152 * Register an ancillary SPI device; for example some chips have a chip-select
2153 * for normal device usage and another one for setup/firmware upload.
2154 *
2155 * This may only be called from main SPI device's probe routine.
2156 *
2157 * Return: 0 on success; negative errno on failure
2158 */
2159struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2160                                             u8 chip_select)
2161{
2162        struct spi_device *ancillary;
2163        int rc = 0;
2164
2165        /* Alloc an spi_device */
2166        ancillary = spi_alloc_device(spi->controller);
2167        if (!ancillary) {
2168                rc = -ENOMEM;
2169                goto err_out;
2170        }
2171
2172        strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2173
2174        /* Use provided chip-select for ancillary device */
2175        ancillary->chip_select = chip_select;
2176
2177        /* Take over SPI mode/speed from SPI main device */
2178        ancillary->max_speed_hz = spi->max_speed_hz;
2179        ancillary->mode = spi->mode;
2180
2181        /* Register the new device */
2182        rc = spi_add_device_locked(ancillary);
2183        if (rc) {
2184                dev_err(&spi->dev, "failed to register ancillary device\n");
2185                goto err_out;
2186        }
2187
2188        return ancillary;
2189
2190err_out:
2191        spi_dev_put(ancillary);
2192        return ERR_PTR(rc);
2193}
2194EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2195
2196#ifdef CONFIG_ACPI
2197struct acpi_spi_lookup {
2198        struct spi_controller   *ctlr;
2199        u32                     max_speed_hz;
2200        u32                     mode;
2201        int                     irq;
2202        u8                      bits_per_word;
2203        u8                      chip_select;
2204};
2205
2206static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2207                                            struct acpi_spi_lookup *lookup)
2208{
2209        const union acpi_object *obj;
2210
2211        if (!x86_apple_machine)
2212                return;
2213
2214        if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2215            && obj->buffer.length >= 4)
2216                lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2217
2218        if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2219            && obj->buffer.length == 8)
2220                lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2221
2222        if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2223            && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2224                lookup->mode |= SPI_LSB_FIRST;
2225
2226        if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2227            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2228                lookup->mode |= SPI_CPOL;
2229
2230        if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2231            && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2232                lookup->mode |= SPI_CPHA;
2233}
2234
2235static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2236{
2237        struct acpi_spi_lookup *lookup = data;
2238        struct spi_controller *ctlr = lookup->ctlr;
2239
2240        if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2241                struct acpi_resource_spi_serialbus *sb;
2242                acpi_handle parent_handle;
2243                acpi_status status;
2244
2245                sb = &ares->data.spi_serial_bus;
2246                if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2247
2248                        status = acpi_get_handle(NULL,
2249                                                 sb->resource_source.string_ptr,
2250                                                 &parent_handle);
2251
2252                        if (ACPI_FAILURE(status) ||
2253                            ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2254                                return -ENODEV;
2255
2256                        /*
2257                         * ACPI DeviceSelection numbering is handled by the
2258                         * host controller driver in Windows and can vary
2259                         * from driver to driver. In Linux we always expect
2260                         * 0 .. max - 1 so we need to ask the driver to
2261                         * translate between the two schemes.
2262                         */
2263                        if (ctlr->fw_translate_cs) {
2264                                int cs = ctlr->fw_translate_cs(ctlr,
2265                                                sb->device_selection);
2266                                if (cs < 0)
2267                                        return cs;
2268                                lookup->chip_select = cs;
2269                        } else {
2270                                lookup->chip_select = sb->device_selection;
2271                        }
2272
2273                        lookup->max_speed_hz = sb->connection_speed;
2274                        lookup->bits_per_word = sb->data_bit_length;
2275
2276                        if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2277                                lookup->mode |= SPI_CPHA;
2278                        if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2279                                lookup->mode |= SPI_CPOL;
2280                        if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2281                                lookup->mode |= SPI_CS_HIGH;
2282                }
2283        } else if (lookup->irq < 0) {
2284                struct resource r;
2285
2286                if (acpi_dev_resource_interrupt(ares, 0, &r))
2287                        lookup->irq = r.start;
2288        }
2289
2290        /* Always tell the ACPI core to skip this resource */
2291        return 1;
2292}
2293
2294static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2295                                            struct acpi_device *adev)
2296{
2297        acpi_handle parent_handle = NULL;
2298        struct list_head resource_list;
2299        struct acpi_spi_lookup lookup = {};
2300        struct spi_device *spi;
2301        int ret;
2302
2303        if (acpi_bus_get_status(adev) || !adev->status.present ||
2304            acpi_device_enumerated(adev))
2305                return AE_OK;
2306
2307        lookup.ctlr             = ctlr;
2308        lookup.irq              = -1;
2309
2310        INIT_LIST_HEAD(&resource_list);
2311        ret = acpi_dev_get_resources(adev, &resource_list,
2312                                     acpi_spi_add_resource, &lookup);
2313        acpi_dev_free_resource_list(&resource_list);
2314
2315        if (ret < 0)
2316                /* found SPI in _CRS but it points to another controller */
2317                return AE_OK;
2318
2319        if (!lookup.max_speed_hz &&
2320            ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2321            ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2322                /* Apple does not use _CRS but nested devices for SPI slaves */
2323                acpi_spi_parse_apple_properties(adev, &lookup);
2324        }
2325
2326        if (!lookup.max_speed_hz)
2327                return AE_OK;
2328
2329        spi = spi_alloc_device(ctlr);
2330        if (!spi) {
2331                dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2332                        dev_name(&adev->dev));
2333                return AE_NO_MEMORY;
2334        }
2335
2336
2337        ACPI_COMPANION_SET(&spi->dev, adev);
2338        spi->max_speed_hz       = lookup.max_speed_hz;
2339        spi->mode               |= lookup.mode;
2340        spi->irq                = lookup.irq;
2341        spi->bits_per_word      = lookup.bits_per_word;
2342        spi->chip_select        = lookup.chip_select;
2343
2344        acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2345                          sizeof(spi->modalias));
2346
2347        if (spi->irq < 0)
2348                spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2349
2350        acpi_device_set_enumerated(adev);
2351
2352        adev->power.flags.ignore_parent = true;
2353        if (spi_add_device(spi)) {
2354                adev->power.flags.ignore_parent = false;
2355                dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2356                        dev_name(&adev->dev));
2357                spi_dev_put(spi);
2358        }
2359
2360        return AE_OK;
2361}
2362
2363static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2364                                       void *data, void **return_value)
2365{
2366        struct spi_controller *ctlr = data;
2367        struct acpi_device *adev;
2368
2369        if (acpi_bus_get_device(handle, &adev))
2370                return AE_OK;
2371
2372        return acpi_register_spi_device(ctlr, adev);
2373}
2374
2375#define SPI_ACPI_ENUMERATE_MAX_DEPTH            32
2376
2377static void acpi_register_spi_devices(struct spi_controller *ctlr)
2378{
2379        acpi_status status;
2380        acpi_handle handle;
2381
2382        handle = ACPI_HANDLE(ctlr->dev.parent);
2383        if (!handle)
2384                return;
2385
2386        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2387                                     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2388                                     acpi_spi_add_device, NULL, ctlr, NULL);
2389        if (ACPI_FAILURE(status))
2390                dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2391}
2392#else
2393static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2394#endif /* CONFIG_ACPI */
2395
2396static void spi_controller_release(struct device *dev)
2397{
2398        struct spi_controller *ctlr;
2399
2400        ctlr = container_of(dev, struct spi_controller, dev);
2401        kfree(ctlr);
2402}
2403
2404static struct class spi_master_class = {
2405        .name           = "spi_master",
2406        .owner          = THIS_MODULE,
2407        .dev_release    = spi_controller_release,
2408        .dev_groups     = spi_master_groups,
2409};
2410
2411#ifdef CONFIG_SPI_SLAVE
2412/**
2413 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2414 *                   controller
2415 * @spi: device used for the current transfer
2416 */
2417int spi_slave_abort(struct spi_device *spi)
2418{
2419        struct spi_controller *ctlr = spi->controller;
2420
2421        if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2422                return ctlr->slave_abort(ctlr);
2423
2424        return -ENOTSUPP;
2425}
2426EXPORT_SYMBOL_GPL(spi_slave_abort);
2427
2428static int match_true(struct device *dev, void *data)
2429{
2430        return 1;
2431}
2432
2433static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2434                          char *buf)
2435{
2436        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2437                                                   dev);
2438        struct device *child;
2439
2440        child = device_find_child(&ctlr->dev, NULL, match_true);
2441        return sprintf(buf, "%s\n",
2442                       child ? to_spi_device(child)->modalias : NULL);
2443}
2444
2445static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2446                           const char *buf, size_t count)
2447{
2448        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2449                                                   dev);
2450        struct spi_device *spi;
2451        struct device *child;
2452        char name[32];
2453        int rc;
2454
2455        rc = sscanf(buf, "%31s", name);
2456        if (rc != 1 || !name[0])
2457                return -EINVAL;
2458
2459        child = device_find_child(&ctlr->dev, NULL, match_true);
2460        if (child) {
2461                /* Remove registered slave */
2462                device_unregister(child);
2463                put_device(child);
2464        }
2465
2466        if (strcmp(name, "(null)")) {
2467                /* Register new slave */
2468                spi = spi_alloc_device(ctlr);
2469                if (!spi)
2470                        return -ENOMEM;
2471
2472                strlcpy(spi->modalias, name, sizeof(spi->modalias));
2473
2474                rc = spi_add_device(spi);
2475                if (rc) {
2476                        spi_dev_put(spi);
2477                        return rc;
2478                }
2479        }
2480
2481        return count;
2482}
2483
2484static DEVICE_ATTR_RW(slave);
2485
2486static struct attribute *spi_slave_attrs[] = {
2487        &dev_attr_slave.attr,
2488        NULL,
2489};
2490
2491static const struct attribute_group spi_slave_group = {
2492        .attrs = spi_slave_attrs,
2493};
2494
2495static const struct attribute_group *spi_slave_groups[] = {
2496        &spi_controller_statistics_group,
2497        &spi_slave_group,
2498        NULL,
2499};
2500
2501static struct class spi_slave_class = {
2502        .name           = "spi_slave",
2503        .owner          = THIS_MODULE,
2504        .dev_release    = spi_controller_release,
2505        .dev_groups     = spi_slave_groups,
2506};
2507#else
2508extern struct class spi_slave_class;    /* dummy */
2509#endif
2510
2511/**
2512 * __spi_alloc_controller - allocate an SPI master or slave controller
2513 * @dev: the controller, possibly using the platform_bus
2514 * @size: how much zeroed driver-private data to allocate; the pointer to this
2515 *      memory is in the driver_data field of the returned device, accessible
2516 *      with spi_controller_get_devdata(); the memory is cacheline aligned;
2517 *      drivers granting DMA access to portions of their private data need to
2518 *      round up @size using ALIGN(size, dma_get_cache_alignment()).
2519 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2520 *      slave (true) controller
2521 * Context: can sleep
2522 *
2523 * This call is used only by SPI controller drivers, which are the
2524 * only ones directly touching chip registers.  It's how they allocate
2525 * an spi_controller structure, prior to calling spi_register_controller().
2526 *
2527 * This must be called from context that can sleep.
2528 *
2529 * The caller is responsible for assigning the bus number and initializing the
2530 * controller's methods before calling spi_register_controller(); and (after
2531 * errors adding the device) calling spi_controller_put() to prevent a memory
2532 * leak.
2533 *
2534 * Return: the SPI controller structure on success, else NULL.
2535 */
2536struct spi_controller *__spi_alloc_controller(struct device *dev,
2537                                              unsigned int size, bool slave)
2538{
2539        struct spi_controller   *ctlr;
2540        size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2541
2542        if (!dev)
2543                return NULL;
2544
2545        ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2546        if (!ctlr)
2547                return NULL;
2548
2549        device_initialize(&ctlr->dev);
2550        INIT_LIST_HEAD(&ctlr->queue);
2551        spin_lock_init(&ctlr->queue_lock);
2552        spin_lock_init(&ctlr->bus_lock_spinlock);
2553        mutex_init(&ctlr->bus_lock_mutex);
2554        mutex_init(&ctlr->io_mutex);
2555        mutex_init(&ctlr->add_lock);
2556        ctlr->bus_num = -1;
2557        ctlr->num_chipselect = 1;
2558        ctlr->slave = slave;
2559        if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2560                ctlr->dev.class = &spi_slave_class;
2561        else
2562                ctlr->dev.class = &spi_master_class;
2563        ctlr->dev.parent = dev;
2564        pm_suspend_ignore_children(&ctlr->dev, true);
2565        spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2566
2567        return ctlr;
2568}
2569EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2570
2571static void devm_spi_release_controller(struct device *dev, void *ctlr)
2572{
2573        spi_controller_put(*(struct spi_controller **)ctlr);
2574}
2575
2576/**
2577 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2578 * @dev: physical device of SPI controller
2579 * @size: how much zeroed driver-private data to allocate
2580 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2581 * Context: can sleep
2582 *
2583 * Allocate an SPI controller and automatically release a reference on it
2584 * when @dev is unbound from its driver.  Drivers are thus relieved from
2585 * having to call spi_controller_put().
2586 *
2587 * The arguments to this function are identical to __spi_alloc_controller().
2588 *
2589 * Return: the SPI controller structure on success, else NULL.
2590 */
2591struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2592                                                   unsigned int size,
2593                                                   bool slave)
2594{
2595        struct spi_controller **ptr, *ctlr;
2596
2597        ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2598                           GFP_KERNEL);
2599        if (!ptr)
2600                return NULL;
2601
2602        ctlr = __spi_alloc_controller(dev, size, slave);
2603        if (ctlr) {
2604                ctlr->devm_allocated = true;
2605                *ptr = ctlr;
2606                devres_add(dev, ptr);
2607        } else {
2608                devres_free(ptr);
2609        }
2610
2611        return ctlr;
2612}
2613EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2614
2615#ifdef CONFIG_OF
2616static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2617{
2618        int nb, i, *cs;
2619        struct device_node *np = ctlr->dev.of_node;
2620
2621        if (!np)
2622                return 0;
2623
2624        nb = of_gpio_named_count(np, "cs-gpios");
2625        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2626
2627        /* Return error only for an incorrectly formed cs-gpios property */
2628        if (nb == 0 || nb == -ENOENT)
2629                return 0;
2630        else if (nb < 0)
2631                return nb;
2632
2633        cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2634                          GFP_KERNEL);
2635        ctlr->cs_gpios = cs;
2636
2637        if (!ctlr->cs_gpios)
2638                return -ENOMEM;
2639
2640        for (i = 0; i < ctlr->num_chipselect; i++)
2641                cs[i] = -ENOENT;
2642
2643        for (i = 0; i < nb; i++)
2644                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2645
2646        return 0;
2647}
2648#else
2649static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2650{
2651        return 0;
2652}
2653#endif
2654
2655/**
2656 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2657 * @ctlr: The SPI master to grab GPIO descriptors for
2658 */
2659static int spi_get_gpio_descs(struct spi_controller *ctlr)
2660{
2661        int nb, i;
2662        struct gpio_desc **cs;
2663        struct device *dev = &ctlr->dev;
2664        unsigned long native_cs_mask = 0;
2665        unsigned int num_cs_gpios = 0;
2666
2667        nb = gpiod_count(dev, "cs");
2668        if (nb < 0) {
2669                /* No GPIOs at all is fine, else return the error */
2670                if (nb == -ENOENT)
2671                        return 0;
2672                return nb;
2673        }
2674
2675        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2676
2677        cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2678                          GFP_KERNEL);
2679        if (!cs)
2680                return -ENOMEM;
2681        ctlr->cs_gpiods = cs;
2682
2683        for (i = 0; i < nb; i++) {
2684                /*
2685                 * Most chipselects are active low, the inverted
2686                 * semantics are handled by special quirks in gpiolib,
2687                 * so initializing them GPIOD_OUT_LOW here means
2688                 * "unasserted", in most cases this will drive the physical
2689                 * line high.
2690                 */
2691                cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2692                                                      GPIOD_OUT_LOW);
2693                if (IS_ERR(cs[i]))
2694                        return PTR_ERR(cs[i]);
2695
2696                if (cs[i]) {
2697                        /*
2698                         * If we find a CS GPIO, name it after the device and
2699                         * chip select line.
2700                         */
2701                        char *gpioname;
2702
2703                        gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2704                                                  dev_name(dev), i);
2705                        if (!gpioname)
2706                                return -ENOMEM;
2707                        gpiod_set_consumer_name(cs[i], gpioname);
2708                        num_cs_gpios++;
2709                        continue;
2710                }
2711
2712                if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2713                        dev_err(dev, "Invalid native chip select %d\n", i);
2714                        return -EINVAL;
2715                }
2716                native_cs_mask |= BIT(i);
2717        }
2718
2719        ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2720
2721        if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2722            ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2723                dev_err(dev, "No unused native chip select available\n");
2724                return -EINVAL;
2725        }
2726
2727        return 0;
2728}
2729
2730static int spi_controller_check_ops(struct spi_controller *ctlr)
2731{
2732        /*
2733         * The controller may implement only the high-level SPI-memory like
2734         * operations if it does not support regular SPI transfers, and this is
2735         * valid use case.
2736         * If ->mem_ops is NULL, we request that at least one of the
2737         * ->transfer_xxx() method be implemented.
2738         */
2739        if (ctlr->mem_ops) {
2740                if (!ctlr->mem_ops->exec_op)
2741                        return -EINVAL;
2742        } else if (!ctlr->transfer && !ctlr->transfer_one &&
2743                   !ctlr->transfer_one_message) {
2744                return -EINVAL;
2745        }
2746
2747        return 0;
2748}
2749
2750/**
2751 * spi_register_controller - register SPI master or slave controller
2752 * @ctlr: initialized master, originally from spi_alloc_master() or
2753 *      spi_alloc_slave()
2754 * Context: can sleep
2755 *
2756 * SPI controllers connect to their drivers using some non-SPI bus,
2757 * such as the platform bus.  The final stage of probe() in that code
2758 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2759 *
2760 * SPI controllers use board specific (often SOC specific) bus numbers,
2761 * and board-specific addressing for SPI devices combines those numbers
2762 * with chip select numbers.  Since SPI does not directly support dynamic
2763 * device identification, boards need configuration tables telling which
2764 * chip is at which address.
2765 *
2766 * This must be called from context that can sleep.  It returns zero on
2767 * success, else a negative error code (dropping the controller's refcount).
2768 * After a successful return, the caller is responsible for calling
2769 * spi_unregister_controller().
2770 *
2771 * Return: zero on success, else a negative error code.
2772 */
2773int spi_register_controller(struct spi_controller *ctlr)
2774{
2775        struct device           *dev = ctlr->dev.parent;
2776        struct boardinfo        *bi;
2777        int                     status;
2778        int                     id, first_dynamic;
2779
2780        if (!dev)
2781                return -ENODEV;
2782
2783        /*
2784         * Make sure all necessary hooks are implemented before registering
2785         * the SPI controller.
2786         */
2787        status = spi_controller_check_ops(ctlr);
2788        if (status)
2789                return status;
2790
2791        if (ctlr->bus_num >= 0) {
2792                /* devices with a fixed bus num must check-in with the num */
2793                mutex_lock(&board_lock);
2794                id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2795                        ctlr->bus_num + 1, GFP_KERNEL);
2796                mutex_unlock(&board_lock);
2797                if (WARN(id < 0, "couldn't get idr"))
2798                        return id == -ENOSPC ? -EBUSY : id;
2799                ctlr->bus_num = id;
2800        } else if (ctlr->dev.of_node) {
2801                /* allocate dynamic bus number using Linux idr */
2802                id = of_alias_get_id(ctlr->dev.of_node, "spi");
2803                if (id >= 0) {
2804                        ctlr->bus_num = id;
2805                        mutex_lock(&board_lock);
2806                        id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2807                                       ctlr->bus_num + 1, GFP_KERNEL);
2808                        mutex_unlock(&board_lock);
2809                        if (WARN(id < 0, "couldn't get idr"))
2810                                return id == -ENOSPC ? -EBUSY : id;
2811                }
2812        }
2813        if (ctlr->bus_num < 0) {
2814                first_dynamic = of_alias_get_highest_id("spi");
2815                if (first_dynamic < 0)
2816                        first_dynamic = 0;
2817                else
2818                        first_dynamic++;
2819
2820                mutex_lock(&board_lock);
2821                id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2822                               0, GFP_KERNEL);
2823                mutex_unlock(&board_lock);
2824                if (WARN(id < 0, "couldn't get idr"))
2825                        return id;
2826                ctlr->bus_num = id;
2827        }
2828        ctlr->bus_lock_flag = 0;
2829        init_completion(&ctlr->xfer_completion);
2830        if (!ctlr->max_dma_len)
2831                ctlr->max_dma_len = INT_MAX;
2832
2833        /* register the device, then userspace will see it.
2834         * registration fails if the bus ID is in use.
2835         */
2836        dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2837
2838        if (!spi_controller_is_slave(ctlr)) {
2839                if (ctlr->use_gpio_descriptors) {
2840                        status = spi_get_gpio_descs(ctlr);
2841                        if (status)
2842                                goto free_bus_id;
2843                        /*
2844                         * A controller using GPIO descriptors always
2845                         * supports SPI_CS_HIGH if need be.
2846                         */
2847                        ctlr->mode_bits |= SPI_CS_HIGH;
2848                } else {
2849                        /* Legacy code path for GPIOs from DT */
2850                        status = of_spi_get_gpio_numbers(ctlr);
2851                        if (status)
2852                                goto free_bus_id;
2853                }
2854        }
2855
2856        /*
2857         * Even if it's just one always-selected device, there must
2858         * be at least one chipselect.
2859         */
2860        if (!ctlr->num_chipselect) {
2861                status = -EINVAL;
2862                goto free_bus_id;
2863        }
2864
2865        status = device_add(&ctlr->dev);
2866        if (status < 0)
2867                goto free_bus_id;
2868        dev_dbg(dev, "registered %s %s\n",
2869                        spi_controller_is_slave(ctlr) ? "slave" : "master",
2870                        dev_name(&ctlr->dev));
2871
2872        /*
2873         * If we're using a queued driver, start the queue. Note that we don't
2874         * need the queueing logic if the driver is only supporting high-level
2875         * memory operations.
2876         */
2877        if (ctlr->transfer) {
2878                dev_info(dev, "controller is unqueued, this is deprecated\n");
2879        } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2880                status = spi_controller_initialize_queue(ctlr);
2881                if (status) {
2882                        device_del(&ctlr->dev);
2883                        goto free_bus_id;
2884                }
2885        }
2886        /* add statistics */
2887        spin_lock_init(&ctlr->statistics.lock);
2888
2889        mutex_lock(&board_lock);
2890        list_add_tail(&ctlr->list, &spi_controller_list);
2891        list_for_each_entry(bi, &board_list, list)
2892                spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2893        mutex_unlock(&board_lock);
2894
2895        /* Register devices from the device tree and ACPI */
2896        of_register_spi_devices(ctlr);
2897        acpi_register_spi_devices(ctlr);
2898        return status;
2899
2900free_bus_id:
2901        mutex_lock(&board_lock);
2902        idr_remove(&spi_master_idr, ctlr->bus_num);
2903        mutex_unlock(&board_lock);
2904        return status;
2905}
2906EXPORT_SYMBOL_GPL(spi_register_controller);
2907
2908static void devm_spi_unregister(void *ctlr)
2909{
2910        spi_unregister_controller(ctlr);
2911}
2912
2913/**
2914 * devm_spi_register_controller - register managed SPI master or slave
2915 *      controller
2916 * @dev:    device managing SPI controller
2917 * @ctlr: initialized controller, originally from spi_alloc_master() or
2918 *      spi_alloc_slave()
2919 * Context: can sleep
2920 *
2921 * Register a SPI device as with spi_register_controller() which will
2922 * automatically be unregistered and freed.
2923 *
2924 * Return: zero on success, else a negative error code.
2925 */
2926int devm_spi_register_controller(struct device *dev,
2927                                 struct spi_controller *ctlr)
2928{
2929        int ret;
2930
2931        ret = spi_register_controller(ctlr);
2932        if (ret)
2933                return ret;
2934
2935        return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr);
2936}
2937EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2938
2939static int __unregister(struct device *dev, void *null)
2940{
2941        spi_unregister_device(to_spi_device(dev));
2942        return 0;
2943}
2944
2945/**
2946 * spi_unregister_controller - unregister SPI master or slave controller
2947 * @ctlr: the controller being unregistered
2948 * Context: can sleep
2949 *
2950 * This call is used only by SPI controller drivers, which are the
2951 * only ones directly touching chip registers.
2952 *
2953 * This must be called from context that can sleep.
2954 *
2955 * Note that this function also drops a reference to the controller.
2956 */
2957void spi_unregister_controller(struct spi_controller *ctlr)
2958{
2959        struct spi_controller *found;
2960        int id = ctlr->bus_num;
2961
2962        /* Prevent addition of new devices, unregister existing ones */
2963        if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2964                mutex_lock(&ctlr->add_lock);
2965
2966        device_for_each_child(&ctlr->dev, NULL, __unregister);
2967
2968        /* First make sure that this controller was ever added */
2969        mutex_lock(&board_lock);
2970        found = idr_find(&spi_master_idr, id);
2971        mutex_unlock(&board_lock);
2972        if (ctlr->queued) {
2973                if (spi_destroy_queue(ctlr))
2974                        dev_err(&ctlr->dev, "queue remove failed\n");
2975        }
2976        mutex_lock(&board_lock);
2977        list_del(&ctlr->list);
2978        mutex_unlock(&board_lock);
2979
2980        device_del(&ctlr->dev);
2981
2982        /* Release the last reference on the controller if its driver
2983         * has not yet been converted to devm_spi_alloc_master/slave().
2984         */
2985        if (!ctlr->devm_allocated)
2986                put_device(&ctlr->dev);
2987
2988        /* free bus id */
2989        mutex_lock(&board_lock);
2990        if (found == ctlr)
2991                idr_remove(&spi_master_idr, id);
2992        mutex_unlock(&board_lock);
2993
2994        if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2995                mutex_unlock(&ctlr->add_lock);
2996}
2997EXPORT_SYMBOL_GPL(spi_unregister_controller);
2998
2999int spi_controller_suspend(struct spi_controller *ctlr)
3000{
3001        int ret;
3002
3003        /* Basically no-ops for non-queued controllers */
3004        if (!ctlr->queued)
3005                return 0;
3006
3007        ret = spi_stop_queue(ctlr);
3008        if (ret)
3009                dev_err(&ctlr->dev, "queue stop failed\n");
3010
3011        return ret;
3012}
3013EXPORT_SYMBOL_GPL(spi_controller_suspend);
3014
3015int spi_controller_resume(struct spi_controller *ctlr)
3016{
3017        int ret;
3018
3019        if (!ctlr->queued)
3020                return 0;
3021
3022        ret = spi_start_queue(ctlr);
3023        if (ret)
3024                dev_err(&ctlr->dev, "queue restart failed\n");
3025
3026        return ret;
3027}
3028EXPORT_SYMBOL_GPL(spi_controller_resume);
3029
3030static int __spi_controller_match(struct device *dev, const void *data)
3031{
3032        struct spi_controller *ctlr;
3033        const u16 *bus_num = data;
3034
3035        ctlr = container_of(dev, struct spi_controller, dev);
3036        return ctlr->bus_num == *bus_num;
3037}
3038
3039/**
3040 * spi_busnum_to_master - look up master associated with bus_num
3041 * @bus_num: the master's bus number
3042 * Context: can sleep
3043 *
3044 * This call may be used with devices that are registered after
3045 * arch init time.  It returns a refcounted pointer to the relevant
3046 * spi_controller (which the caller must release), or NULL if there is
3047 * no such master registered.
3048 *
3049 * Return: the SPI master structure on success, else NULL.
3050 */
3051struct spi_controller *spi_busnum_to_master(u16 bus_num)
3052{
3053        struct device           *dev;
3054        struct spi_controller   *ctlr = NULL;
3055
3056        dev = class_find_device(&spi_master_class, NULL, &bus_num,
3057                                __spi_controller_match);
3058        if (dev)
3059                ctlr = container_of(dev, struct spi_controller, dev);
3060        /* reference got in class_find_device */
3061        return ctlr;
3062}
3063EXPORT_SYMBOL_GPL(spi_busnum_to_master);
3064
3065/*-------------------------------------------------------------------------*/
3066
3067/* Core methods for SPI resource management */
3068
3069/**
3070 * spi_res_alloc - allocate a spi resource that is life-cycle managed
3071 *                 during the processing of a spi_message while using
3072 *                 spi_transfer_one
3073 * @spi:     the spi device for which we allocate memory
3074 * @release: the release code to execute for this resource
3075 * @size:    size to alloc and return
3076 * @gfp:     GFP allocation flags
3077 *
3078 * Return: the pointer to the allocated data
3079 *
3080 * This may get enhanced in the future to allocate from a memory pool
3081 * of the @spi_device or @spi_controller to avoid repeated allocations.
3082 */
3083void *spi_res_alloc(struct spi_device *spi,
3084                    spi_res_release_t release,
3085                    size_t size, gfp_t gfp)
3086{
3087        struct spi_res *sres;
3088
3089        sres = kzalloc(sizeof(*sres) + size, gfp);
3090        if (!sres)
3091                return NULL;
3092
3093        INIT_LIST_HEAD(&sres->entry);
3094        sres->release = release;
3095
3096        return sres->data;
3097}
3098EXPORT_SYMBOL_GPL(spi_res_alloc);
3099
3100/**
3101 * spi_res_free - free an spi resource
3102 * @res: pointer to the custom data of a resource
3103 *
3104 */
3105void spi_res_free(void *res)
3106{
3107        struct spi_res *sres = container_of(res, struct spi_res, data);
3108
3109        if (!res)
3110                return;
3111
3112        WARN_ON(!list_empty(&sres->entry));
3113        kfree(sres);
3114}
3115EXPORT_SYMBOL_GPL(spi_res_free);
3116
3117/**
3118 * spi_res_add - add a spi_res to the spi_message
3119 * @message: the spi message
3120 * @res:     the spi_resource
3121 */
3122void spi_res_add(struct spi_message *message, void *res)
3123{
3124        struct spi_res *sres = container_of(res, struct spi_res, data);
3125
3126        WARN_ON(!list_empty(&sres->entry));
3127        list_add_tail(&sres->entry, &message->resources);
3128}
3129EXPORT_SYMBOL_GPL(spi_res_add);
3130
3131/**
3132 * spi_res_release - release all spi resources for this message
3133 * @ctlr:  the @spi_controller
3134 * @message: the @spi_message
3135 */
3136void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
3137{
3138        struct spi_res *res, *tmp;
3139
3140        list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
3141                if (res->release)
3142                        res->release(ctlr, message, res->data);
3143
3144                list_del(&res->entry);
3145
3146                kfree(res);
3147        }
3148}
3149EXPORT_SYMBOL_GPL(spi_res_release);
3150
3151/*-------------------------------------------------------------------------*/
3152
3153/* Core methods for spi_message alterations */
3154
3155static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3156                                            struct spi_message *msg,
3157                                            void *res)
3158{
3159        struct spi_replaced_transfers *rxfer = res;
3160        size_t i;
3161
3162        /* call extra callback if requested */
3163        if (rxfer->release)
3164                rxfer->release(ctlr, msg, res);
3165
3166        /* insert replaced transfers back into the message */
3167        list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3168
3169        /* remove the formerly inserted entries */
3170        for (i = 0; i < rxfer->inserted; i++)
3171                list_del(&rxfer->inserted_transfers[i].transfer_list);
3172}
3173
3174/**
3175 * spi_replace_transfers - replace transfers with several transfers
3176 *                         and register change with spi_message.resources
3177 * @msg:           the spi_message we work upon
3178 * @xfer_first:    the first spi_transfer we want to replace
3179 * @remove:        number of transfers to remove
3180 * @insert:        the number of transfers we want to insert instead
3181 * @release:       extra release code necessary in some circumstances
3182 * @extradatasize: extra data to allocate (with alignment guarantees
3183 *                 of struct @spi_transfer)
3184 * @gfp:           gfp flags
3185 *
3186 * Returns: pointer to @spi_replaced_transfers,
3187 *          PTR_ERR(...) in case of errors.
3188 */
3189struct spi_replaced_transfers *spi_replace_transfers(
3190        struct spi_message *msg,
3191        struct spi_transfer *xfer_first,
3192        size_t remove,
3193        size_t insert,
3194        spi_replaced_release_t release,
3195        size_t extradatasize,
3196        gfp_t gfp)
3197{
3198        struct spi_replaced_transfers *rxfer;
3199        struct spi_transfer *xfer;
3200        size_t i;
3201
3202        /* allocate the structure using spi_res */
3203        rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3204                              struct_size(rxfer, inserted_transfers, insert)
3205                              + extradatasize,
3206                              gfp);
3207        if (!rxfer)
3208                return ERR_PTR(-ENOMEM);
3209
3210        /* the release code to invoke before running the generic release */
3211        rxfer->release = release;
3212
3213        /* assign extradata */
3214        if (extradatasize)
3215                rxfer->extradata =
3216                        &rxfer->inserted_transfers[insert];
3217
3218        /* init the replaced_transfers list */
3219        INIT_LIST_HEAD(&rxfer->replaced_transfers);
3220
3221        /* assign the list_entry after which we should reinsert
3222         * the @replaced_transfers - it may be spi_message.messages!
3223         */
3224        rxfer->replaced_after = xfer_first->transfer_list.prev;
3225
3226        /* remove the requested number of transfers */
3227        for (i = 0; i < remove; i++) {
3228                /* if the entry after replaced_after it is msg->transfers
3229                 * then we have been requested to remove more transfers
3230                 * than are in the list
3231                 */
3232                if (rxfer->replaced_after->next == &msg->transfers) {
3233                        dev_err(&msg->spi->dev,
3234                                "requested to remove more spi_transfers than are available\n");
3235                        /* insert replaced transfers back into the message */
3236                        list_splice(&rxfer->replaced_transfers,
3237                                    rxfer->replaced_after);
3238
3239                        /* free the spi_replace_transfer structure */
3240                        spi_res_free(rxfer);
3241
3242                        /* and return with an error */
3243                        return ERR_PTR(-EINVAL);
3244                }
3245
3246                /* remove the entry after replaced_after from list of
3247                 * transfers and add it to list of replaced_transfers
3248                 */
3249                list_move_tail(rxfer->replaced_after->next,
3250                               &rxfer->replaced_transfers);
3251        }
3252
3253        /* create copy of the given xfer with identical settings
3254         * based on the first transfer to get removed
3255         */
3256        for (i = 0; i < insert; i++) {
3257                /* we need to run in reverse order */
3258                xfer = &rxfer->inserted_transfers[insert - 1 - i];
3259
3260                /* copy all spi_transfer data */
3261                memcpy(xfer, xfer_first, sizeof(*xfer));
3262
3263                /* add to list */
3264                list_add(&xfer->transfer_list, rxfer->replaced_after);
3265
3266                /* clear cs_change and delay for all but the last */
3267                if (i) {
3268                        xfer->cs_change = false;
3269                        xfer->delay.value = 0;
3270                }
3271        }
3272
3273        /* set up inserted */
3274        rxfer->inserted = insert;
3275
3276        /* and register it with spi_res/spi_message */
3277        spi_res_add(msg, rxfer);
3278
3279        return rxfer;
3280}
3281EXPORT_SYMBOL_GPL(spi_replace_transfers);
3282
3283static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3284                                        struct spi_message *msg,
3285                                        struct spi_transfer **xferp,
3286                                        size_t maxsize,
3287                                        gfp_t gfp)
3288{
3289        struct spi_transfer *xfer = *xferp, *xfers;
3290        struct spi_replaced_transfers *srt;
3291        size_t offset;
3292        size_t count, i;
3293
3294        /* calculate how many we have to replace */
3295        count = DIV_ROUND_UP(xfer->len, maxsize);
3296
3297        /* create replacement */
3298        srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3299        if (IS_ERR(srt))
3300                return PTR_ERR(srt);
3301        xfers = srt->inserted_transfers;
3302
3303        /* now handle each of those newly inserted spi_transfers
3304         * note that the replacements spi_transfers all are preset
3305         * to the same values as *xferp, so tx_buf, rx_buf and len
3306         * are all identical (as well as most others)
3307         * so we just have to fix up len and the pointers.
3308         *
3309         * this also includes support for the depreciated
3310         * spi_message.is_dma_mapped interface
3311         */
3312
3313        /* the first transfer just needs the length modified, so we
3314         * run it outside the loop
3315         */
3316        xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3317
3318        /* all the others need rx_buf/tx_buf also set */
3319        for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3320                /* update rx_buf, tx_buf and dma */
3321                if (xfers[i].rx_buf)
3322                        xfers[i].rx_buf += offset;
3323                if (xfers[i].rx_dma)
3324                        xfers[i].rx_dma += offset;
3325                if (xfers[i].tx_buf)
3326                        xfers[i].tx_buf += offset;
3327                if (xfers[i].tx_dma)
3328                        xfers[i].tx_dma += offset;
3329
3330                /* update length */
3331                xfers[i].len = min(maxsize, xfers[i].len - offset);
3332        }
3333
3334        /* we set up xferp to the last entry we have inserted,
3335         * so that we skip those already split transfers
3336         */
3337        *xferp = &xfers[count - 1];
3338
3339        /* increment statistics counters */
3340        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3341                                       transfers_split_maxsize);
3342        SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3343                                       transfers_split_maxsize);
3344
3345        return 0;
3346}
3347
3348/**
3349 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3350 *                               when an individual transfer exceeds a
3351 *                               certain size
3352 * @ctlr:    the @spi_controller for this transfer
3353 * @msg:   the @spi_message to transform
3354 * @maxsize:  the maximum when to apply this
3355 * @gfp: GFP allocation flags
3356 *
3357 * Return: status of transformation
3358 */
3359int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3360                                struct spi_message *msg,
3361                                size_t maxsize,
3362                                gfp_t gfp)
3363{
3364        struct spi_transfer *xfer;
3365        int ret;
3366
3367        /* iterate over the transfer_list,
3368         * but note that xfer is advanced to the last transfer inserted
3369         * to avoid checking sizes again unnecessarily (also xfer does
3370         * potentiall belong to a different list by the time the
3371         * replacement has happened
3372         */
3373        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3374                if (xfer->len > maxsize) {
3375                        ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3376                                                           maxsize, gfp);
3377                        if (ret)
3378                                return ret;
3379                }
3380        }
3381
3382        return 0;
3383}
3384EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3385
3386/*-------------------------------------------------------------------------*/
3387
3388/* Core methods for SPI controller protocol drivers.  Some of the
3389 * other core methods are currently defined as inline functions.
3390 */
3391
3392static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3393                                        u8 bits_per_word)
3394{
3395        if (ctlr->bits_per_word_mask) {
3396                /* Only 32 bits fit in the mask */
3397                if (bits_per_word > 32)
3398                        return -EINVAL;
3399                if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3400                        return -EINVAL;
3401        }
3402
3403        return 0;
3404}
3405
3406/**
3407 * spi_setup - setup SPI mode and clock rate
3408 * @spi: the device whose settings are being modified
3409 * Context: can sleep, and no requests are queued to the device
3410 *
3411 * SPI protocol drivers may need to update the transfer mode if the
3412 * device doesn't work with its default.  They may likewise need
3413 * to update clock rates or word sizes from initial values.  This function
3414 * changes those settings, and must be called from a context that can sleep.
3415 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3416 * effect the next time the device is selected and data is transferred to
3417 * or from it.  When this function returns, the spi device is deselected.
3418 *
3419 * Note that this call will fail if the protocol driver specifies an option
3420 * that the underlying controller or its driver does not support.  For
3421 * example, not all hardware supports wire transfers using nine bit words,
3422 * LSB-first wire encoding, or active-high chipselects.
3423 *
3424 * Return: zero on success, else a negative error code.
3425 */
3426int spi_setup(struct spi_device *spi)
3427{
3428        unsigned        bad_bits, ugly_bits;
3429        int             status;
3430
3431        /*
3432         * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3433         * are set at the same time
3434         */
3435        if ((hweight_long(spi->mode &
3436                (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3437            (hweight_long(spi->mode &
3438                (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3439                dev_err(&spi->dev,
3440                "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3441                return -EINVAL;
3442        }
3443        /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3444         */
3445        if ((spi->mode & SPI_3WIRE) && (spi->mode &
3446                (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3447                 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3448                return -EINVAL;
3449        /* help drivers fail *cleanly* when they need options
3450         * that aren't supported with their current controller
3451         * SPI_CS_WORD has a fallback software implementation,
3452         * so it is ignored here.
3453         */
3454        bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3455                                 SPI_NO_TX | SPI_NO_RX);
3456        /* nothing prevents from working with active-high CS in case if it
3457         * is driven by GPIO.
3458         */
3459        if (gpio_is_valid(spi->cs_gpio))
3460                bad_bits &= ~SPI_CS_HIGH;
3461        ugly_bits = bad_bits &
3462                    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3463                     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3464        if (ugly_bits) {
3465                dev_warn(&spi->dev,
3466                         "setup: ignoring unsupported mode bits %x\n",
3467                         ugly_bits);
3468                spi->mode &= ~ugly_bits;
3469                bad_bits &= ~ugly_bits;
3470        }
3471        if (bad_bits) {
3472                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3473                        bad_bits);
3474                return -EINVAL;
3475        }
3476
3477        if (!spi->bits_per_word)
3478                spi->bits_per_word = 8;
3479
3480        status = __spi_validate_bits_per_word(spi->controller,
3481                                              spi->bits_per_word);
3482        if (status)
3483                return status;
3484
3485        if (spi->controller->max_speed_hz &&
3486            (!spi->max_speed_hz ||
3487             spi->max_speed_hz > spi->controller->max_speed_hz))
3488                spi->max_speed_hz = spi->controller->max_speed_hz;
3489
3490        mutex_lock(&spi->controller->io_mutex);
3491
3492        if (spi->controller->setup) {
3493                status = spi->controller->setup(spi);
3494                if (status) {
3495                        mutex_unlock(&spi->controller->io_mutex);
3496                        dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3497                                status);
3498                        return status;
3499                }
3500        }
3501
3502        if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3503                status = pm_runtime_get_sync(spi->controller->dev.parent);
3504                if (status < 0) {
3505                        mutex_unlock(&spi->controller->io_mutex);
3506                        pm_runtime_put_noidle(spi->controller->dev.parent);
3507                        dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3508                                status);
3509                        return status;
3510                }
3511
3512                /*
3513                 * We do not want to return positive value from pm_runtime_get,
3514                 * there are many instances of devices calling spi_setup() and
3515                 * checking for a non-zero return value instead of a negative
3516                 * return value.
3517                 */
3518                status = 0;
3519
3520                spi_set_cs(spi, false, true);
3521                pm_runtime_mark_last_busy(spi->controller->dev.parent);
3522                pm_runtime_put_autosuspend(spi->controller->dev.parent);
3523        } else {
3524                spi_set_cs(spi, false, true);
3525        }
3526
3527        mutex_unlock(&spi->controller->io_mutex);
3528
3529        if (spi->rt && !spi->controller->rt) {
3530                spi->controller->rt = true;
3531                spi_set_thread_rt(spi->controller);
3532        }
3533
3534        trace_spi_setup(spi, status);
3535
3536        dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3537                        spi->mode & SPI_MODE_X_MASK,
3538                        (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3539                        (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3540                        (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3541                        (spi->mode & SPI_LOOP) ? "loopback, " : "",
3542                        spi->bits_per_word, spi->max_speed_hz,
3543                        status);
3544
3545        return status;
3546}
3547EXPORT_SYMBOL_GPL(spi_setup);
3548
3549static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3550                                       struct spi_device *spi)
3551{
3552        int delay1, delay2;
3553
3554        delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3555        if (delay1 < 0)
3556                return delay1;
3557
3558        delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3559        if (delay2 < 0)
3560                return delay2;
3561
3562        if (delay1 < delay2)
3563                memcpy(&xfer->word_delay, &spi->word_delay,
3564                       sizeof(xfer->word_delay));
3565
3566        return 0;
3567}
3568
3569static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3570{
3571        struct spi_controller *ctlr = spi->controller;
3572        struct spi_transfer *xfer;
3573        int w_size;
3574
3575        if (list_empty(&message->transfers))
3576                return -EINVAL;
3577
3578        /* If an SPI controller does not support toggling the CS line on each
3579         * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3580         * for the CS line, we can emulate the CS-per-word hardware function by
3581         * splitting transfers into one-word transfers and ensuring that
3582         * cs_change is set for each transfer.
3583         */
3584        if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3585                                          spi->cs_gpiod ||
3586                                          gpio_is_valid(spi->cs_gpio))) {
3587                size_t maxsize;
3588                int ret;
3589
3590                maxsize = (spi->bits_per_word + 7) / 8;
3591
3592                /* spi_split_transfers_maxsize() requires message->spi */
3593                message->spi = spi;
3594
3595                ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3596                                                  GFP_KERNEL);
3597                if (ret)
3598                        return ret;
3599
3600                list_for_each_entry(xfer, &message->transfers, transfer_list) {
3601                        /* don't change cs_change on the last entry in the list */
3602                        if (list_is_last(&xfer->transfer_list, &message->transfers))
3603                                break;
3604                        xfer->cs_change = 1;
3605                }
3606        }
3607
3608        /* Half-duplex links include original MicroWire, and ones with
3609         * only one data pin like SPI_3WIRE (switches direction) or where
3610         * either MOSI or MISO is missing.  They can also be caused by
3611         * software limitations.
3612         */
3613        if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3614            (spi->mode & SPI_3WIRE)) {
3615                unsigned flags = ctlr->flags;
3616
3617                list_for_each_entry(xfer, &message->transfers, transfer_list) {
3618                        if (xfer->rx_buf && xfer->tx_buf)
3619                                return -EINVAL;
3620                        if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3621                                return -EINVAL;
3622                        if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3623                                return -EINVAL;
3624                }
3625        }
3626
3627        /**
3628         * Set transfer bits_per_word and max speed as spi device default if
3629         * it is not set for this transfer.
3630         * Set transfer tx_nbits and rx_nbits as single transfer default
3631         * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3632         * Ensure transfer word_delay is at least as long as that required by
3633         * device itself.
3634         */
3635        message->frame_length = 0;
3636        list_for_each_entry(xfer, &message->transfers, transfer_list) {
3637                xfer->effective_speed_hz = 0;
3638                message->frame_length += xfer->len;
3639                if (!xfer->bits_per_word)
3640                        xfer->bits_per_word = spi->bits_per_word;
3641
3642                if (!xfer->speed_hz)
3643                        xfer->speed_hz = spi->max_speed_hz;
3644
3645                if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3646                        xfer->speed_hz = ctlr->max_speed_hz;
3647
3648                if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3649                        return -EINVAL;
3650
3651                /*
3652                 * SPI transfer length should be multiple of SPI word size
3653                 * where SPI word size should be power-of-two multiple
3654                 */
3655                if (xfer->bits_per_word <= 8)
3656                        w_size = 1;
3657                else if (xfer->bits_per_word <= 16)
3658                        w_size = 2;
3659                else
3660                        w_size = 4;
3661
3662                /* No partial transfers accepted */
3663                if (xfer->len % w_size)
3664                        return -EINVAL;
3665
3666                if (xfer->speed_hz && ctlr->min_speed_hz &&
3667                    xfer->speed_hz < ctlr->min_speed_hz)
3668                        return -EINVAL;
3669
3670                if (xfer->tx_buf && !xfer->tx_nbits)
3671                        xfer->tx_nbits = SPI_NBITS_SINGLE;
3672                if (xfer->rx_buf && !xfer->rx_nbits)
3673                        xfer->rx_nbits = SPI_NBITS_SINGLE;
3674                /* check transfer tx/rx_nbits:
3675                 * 1. check the value matches one of single, dual and quad
3676                 * 2. check tx/rx_nbits match the mode in spi_device
3677                 */
3678                if (xfer->tx_buf) {
3679                        if (spi->mode & SPI_NO_TX)
3680                                return -EINVAL;
3681                        if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3682                                xfer->tx_nbits != SPI_NBITS_DUAL &&
3683                                xfer->tx_nbits != SPI_NBITS_QUAD)
3684                                return -EINVAL;
3685                        if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3686                                !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3687                                return -EINVAL;
3688                        if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3689                                !(spi->mode & SPI_TX_QUAD))
3690                                return -EINVAL;
3691                }
3692                /* check transfer rx_nbits */
3693                if (xfer->rx_buf) {
3694                        if (spi->mode & SPI_NO_RX)
3695                                return -EINVAL;
3696                        if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3697                                xfer->rx_nbits != SPI_NBITS_DUAL &&
3698                                xfer->rx_nbits != SPI_NBITS_QUAD)
3699                                return -EINVAL;
3700                        if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3701                                !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3702                                return -EINVAL;
3703                        if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3704                                !(spi->mode & SPI_RX_QUAD))
3705                                return -EINVAL;
3706                }
3707
3708                if (_spi_xfer_word_delay_update(xfer, spi))
3709                        return -EINVAL;
3710        }
3711
3712        message->status = -EINPROGRESS;
3713
3714        return 0;
3715}
3716
3717static int __spi_async(struct spi_device *spi, struct spi_message *message)
3718{
3719        struct spi_controller *ctlr = spi->controller;
3720        struct spi_transfer *xfer;
3721
3722        /*
3723         * Some controllers do not support doing regular SPI transfers. Return
3724         * ENOTSUPP when this is the case.
3725         */
3726        if (!ctlr->transfer)
3727                return -ENOTSUPP;
3728
3729        message->spi = spi;
3730
3731        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3732        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3733
3734        trace_spi_message_submit(message);
3735
3736        if (!ctlr->ptp_sts_supported) {
3737                list_for_each_entry(xfer, &message->transfers, transfer_list) {
3738                        xfer->ptp_sts_word_pre = 0;
3739                        ptp_read_system_prets(xfer->ptp_sts);
3740                }
3741        }
3742
3743        return ctlr->transfer(spi, message);
3744}
3745
3746/**
3747 * spi_async - asynchronous SPI transfer
3748 * @spi: device with which data will be exchanged
3749 * @message: describes the data transfers, including completion callback
3750 * Context: any (irqs may be blocked, etc)
3751 *
3752 * This call may be used in_irq and other contexts which can't sleep,
3753 * as well as from task contexts which can sleep.
3754 *
3755 * The completion callback is invoked in a context which can't sleep.
3756 * Before that invocation, the value of message->status is undefined.
3757 * When the callback is issued, message->status holds either zero (to
3758 * indicate complete success) or a negative error code.  After that
3759 * callback returns, the driver which issued the transfer request may
3760 * deallocate the associated memory; it's no longer in use by any SPI
3761 * core or controller driver code.
3762 *
3763 * Note that although all messages to a spi_device are handled in
3764 * FIFO order, messages may go to different devices in other orders.
3765 * Some device might be higher priority, or have various "hard" access
3766 * time requirements, for example.
3767 *
3768 * On detection of any fault during the transfer, processing of
3769 * the entire message is aborted, and the device is deselected.
3770 * Until returning from the associated message completion callback,
3771 * no other spi_message queued to that device will be processed.
3772 * (This rule applies equally to all the synchronous transfer calls,
3773 * which are wrappers around this core asynchronous primitive.)
3774 *
3775 * Return: zero on success, else a negative error code.
3776 */
3777int spi_async(struct spi_device *spi, struct spi_message *message)
3778{
3779        struct spi_controller *ctlr = spi->controller;
3780        int ret;
3781        unsigned long flags;
3782
3783        ret = __spi_validate(spi, message);
3784        if (ret != 0)
3785                return ret;
3786
3787        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3788
3789        if (ctlr->bus_lock_flag)
3790                ret = -EBUSY;
3791        else
3792                ret = __spi_async(spi, message);
3793
3794        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3795
3796        return ret;
3797}
3798EXPORT_SYMBOL_GPL(spi_async);
3799
3800/**
3801 * spi_async_locked - version of spi_async with exclusive bus usage
3802 * @spi: device with which data will be exchanged
3803 * @message: describes the data transfers, including completion callback
3804 * Context: any (irqs may be blocked, etc)
3805 *
3806 * This call may be used in_irq and other contexts which can't sleep,
3807 * as well as from task contexts which can sleep.
3808 *
3809 * The completion callback is invoked in a context which can't sleep.
3810 * Before that invocation, the value of message->status is undefined.
3811 * When the callback is issued, message->status holds either zero (to
3812 * indicate complete success) or a negative error code.  After that
3813 * callback returns, the driver which issued the transfer request may
3814 * deallocate the associated memory; it's no longer in use by any SPI
3815 * core or controller driver code.
3816 *
3817 * Note that although all messages to a spi_device are handled in
3818 * FIFO order, messages may go to different devices in other orders.
3819 * Some device might be higher priority, or have various "hard" access
3820 * time requirements, for example.
3821 *
3822 * On detection of any fault during the transfer, processing of
3823 * the entire message is aborted, and the device is deselected.
3824 * Until returning from the associated message completion callback,
3825 * no other spi_message queued to that device will be processed.
3826 * (This rule applies equally to all the synchronous transfer calls,
3827 * which are wrappers around this core asynchronous primitive.)
3828 *
3829 * Return: zero on success, else a negative error code.
3830 */
3831int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3832{
3833        struct spi_controller *ctlr = spi->controller;
3834        int ret;
3835        unsigned long flags;
3836
3837        ret = __spi_validate(spi, message);
3838        if (ret != 0)
3839                return ret;
3840
3841        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3842
3843        ret = __spi_async(spi, message);
3844
3845        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3846
3847        return ret;
3848
3849}
3850EXPORT_SYMBOL_GPL(spi_async_locked);
3851
3852/*-------------------------------------------------------------------------*/
3853
3854/* Utility methods for SPI protocol drivers, layered on
3855 * top of the core.  Some other utility methods are defined as
3856 * inline functions.
3857 */
3858
3859static void spi_complete(void *arg)
3860{
3861        complete(arg);
3862}
3863
3864static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3865{
3866        DECLARE_COMPLETION_ONSTACK(done);
3867        int status;
3868        struct spi_controller *ctlr = spi->controller;
3869        unsigned long flags;
3870
3871        status = __spi_validate(spi, message);
3872        if (status != 0)
3873                return status;
3874
3875        message->complete = spi_complete;
3876        message->context = &done;
3877        message->spi = spi;
3878
3879        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3880        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3881
3882        /* If we're not using the legacy transfer method then we will
3883         * try to transfer in the calling context so special case.
3884         * This code would be less tricky if we could remove the
3885         * support for driver implemented message queues.
3886         */
3887        if (ctlr->transfer == spi_queued_transfer) {
3888                spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3889
3890                trace_spi_message_submit(message);
3891
3892                status = __spi_queued_transfer(spi, message, false);
3893
3894                spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3895        } else {
3896                status = spi_async_locked(spi, message);
3897        }
3898
3899        if (status == 0) {
3900                /* Push out the messages in the calling context if we
3901                 * can.
3902                 */
3903                if (ctlr->transfer == spi_queued_transfer) {
3904                        SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3905                                                       spi_sync_immediate);
3906                        SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3907                                                       spi_sync_immediate);
3908                        __spi_pump_messages(ctlr, false);
3909                }
3910
3911                wait_for_completion(&done);
3912                status = message->status;
3913        }
3914        message->context = NULL;
3915        return status;
3916}
3917
3918/**
3919 * spi_sync - blocking/synchronous SPI data transfers
3920 * @spi: device with which data will be exchanged
3921 * @message: describes the data transfers
3922 * Context: can sleep
3923 *
3924 * This call may only be used from a context that may sleep.  The sleep
3925 * is non-interruptible, and has no timeout.  Low-overhead controller
3926 * drivers may DMA directly into and out of the message buffers.
3927 *
3928 * Note that the SPI device's chip select is active during the message,
3929 * and then is normally disabled between messages.  Drivers for some
3930 * frequently-used devices may want to minimize costs of selecting a chip,
3931 * by leaving it selected in anticipation that the next message will go
3932 * to the same chip.  (That may increase power usage.)
3933 *
3934 * Also, the caller is guaranteeing that the memory associated with the
3935 * message will not be freed before this call returns.
3936 *
3937 * Return: zero on success, else a negative error code.
3938 */
3939int spi_sync(struct spi_device *spi, struct spi_message *message)
3940{
3941        int ret;
3942
3943        mutex_lock(&spi->controller->bus_lock_mutex);
3944        ret = __spi_sync(spi, message);
3945        mutex_unlock(&spi->controller->bus_lock_mutex);
3946
3947        return ret;
3948}
3949EXPORT_SYMBOL_GPL(spi_sync);
3950
3951/**
3952 * spi_sync_locked - version of spi_sync with exclusive bus usage
3953 * @spi: device with which data will be exchanged
3954 * @message: describes the data transfers
3955 * Context: can sleep
3956 *
3957 * This call may only be used from a context that may sleep.  The sleep
3958 * is non-interruptible, and has no timeout.  Low-overhead controller
3959 * drivers may DMA directly into and out of the message buffers.
3960 *
3961 * This call should be used by drivers that require exclusive access to the
3962 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3963 * be released by a spi_bus_unlock call when the exclusive access is over.
3964 *
3965 * Return: zero on success, else a negative error code.
3966 */
3967int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3968{
3969        return __spi_sync(spi, message);
3970}
3971EXPORT_SYMBOL_GPL(spi_sync_locked);
3972
3973/**
3974 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3975 * @ctlr: SPI bus master that should be locked for exclusive bus access
3976 * Context: can sleep
3977 *
3978 * This call may only be used from a context that may sleep.  The sleep
3979 * is non-interruptible, and has no timeout.
3980 *
3981 * This call should be used by drivers that require exclusive access to the
3982 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3983 * exclusive access is over. Data transfer must be done by spi_sync_locked
3984 * and spi_async_locked calls when the SPI bus lock is held.
3985 *
3986 * Return: always zero.
3987 */
3988int spi_bus_lock(struct spi_controller *ctlr)
3989{
3990        unsigned long flags;
3991
3992        mutex_lock(&ctlr->bus_lock_mutex);
3993
3994        spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3995        ctlr->bus_lock_flag = 1;
3996        spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3997
3998        /* mutex remains locked until spi_bus_unlock is called */
3999
4000        return 0;
4001}
4002EXPORT_SYMBOL_GPL(spi_bus_lock);
4003
4004/**
4005 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4006 * @ctlr: SPI bus master that was locked for exclusive bus access
4007 * Context: can sleep
4008 *
4009 * This call may only be used from a context that may sleep.  The sleep
4010 * is non-interruptible, and has no timeout.
4011 *
4012 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4013 * call.
4014 *
4015 * Return: always zero.
4016 */
4017int spi_bus_unlock(struct spi_controller *ctlr)
4018{
4019        ctlr->bus_lock_flag = 0;
4020
4021        mutex_unlock(&ctlr->bus_lock_mutex);
4022
4023        return 0;
4024}
4025EXPORT_SYMBOL_GPL(spi_bus_unlock);
4026
4027/* portable code must never pass more than 32 bytes */
4028#define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
4029
4030static u8       *buf;
4031
4032/**
4033 * spi_write_then_read - SPI synchronous write followed by read
4034 * @spi: device with which data will be exchanged
4035 * @txbuf: data to be written (need not be dma-safe)
4036 * @n_tx: size of txbuf, in bytes
4037 * @rxbuf: buffer into which data will be read (need not be dma-safe)
4038 * @n_rx: size of rxbuf, in bytes
4039 * Context: can sleep
4040 *
4041 * This performs a half duplex MicroWire style transaction with the
4042 * device, sending txbuf and then reading rxbuf.  The return value
4043 * is zero for success, else a negative errno status code.
4044 * This call may only be used from a context that may sleep.
4045 *
4046 * Parameters to this routine are always copied using a small buffer.
4047 * Performance-sensitive or bulk transfer code should instead use
4048 * spi_{async,sync}() calls with dma-safe buffers.
4049 *
4050 * Return: zero on success, else a negative error code.
4051 */
4052int spi_write_then_read(struct spi_device *spi,
4053                const void *txbuf, unsigned n_tx,
4054                void *rxbuf, unsigned n_rx)
4055{
4056        static DEFINE_MUTEX(lock);
4057
4058        int                     status;
4059        struct spi_message      message;
4060        struct spi_transfer     x[2];
4061        u8                      *local_buf;
4062
4063        /* Use preallocated DMA-safe buffer if we can.  We can't avoid
4064         * copying here, (as a pure convenience thing), but we can
4065         * keep heap costs out of the hot path unless someone else is
4066         * using the pre-allocated buffer or the transfer is too large.
4067         */
4068        if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4069                local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4070                                    GFP_KERNEL | GFP_DMA);
4071                if (!local_buf)
4072                        return -ENOMEM;
4073        } else {
4074                local_buf = buf;
4075        }
4076
4077        spi_message_init(&message);
4078        memset(x, 0, sizeof(x));
4079        if (n_tx) {
4080                x[0].len = n_tx;
4081                spi_message_add_tail(&x[0], &message);
4082        }
4083        if (n_rx) {
4084                x[1].len = n_rx;
4085                spi_message_add_tail(&x[1], &message);
4086        }
4087
4088        memcpy(local_buf, txbuf, n_tx);
4089        x[0].tx_buf = local_buf;
4090        x[1].rx_buf = local_buf + n_tx;
4091
4092        /* do the i/o */
4093        status = spi_sync(spi, &message);
4094        if (status == 0)
4095                memcpy(rxbuf, x[1].rx_buf, n_rx);
4096
4097        if (x[0].tx_buf == buf)
4098                mutex_unlock(&lock);
4099        else
4100                kfree(local_buf);
4101
4102        return status;
4103}
4104EXPORT_SYMBOL_GPL(spi_write_then_read);
4105
4106/*-------------------------------------------------------------------------*/
4107
4108#if IS_ENABLED(CONFIG_OF)
4109/* must call put_device() when done with returned spi_device device */
4110struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4111{
4112        struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4113
4114        return dev ? to_spi_device(dev) : NULL;
4115}
4116EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
4117#endif /* IS_ENABLED(CONFIG_OF) */
4118
4119#if IS_ENABLED(CONFIG_OF_DYNAMIC)
4120/* the spi controllers are not using spi_bus, so we find it with another way */
4121static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4122{
4123        struct device *dev;
4124
4125        dev = class_find_device_by_of_node(&spi_master_class, node);
4126        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4127                dev = class_find_device_by_of_node(&spi_slave_class, node);
4128        if (!dev)
4129                return NULL;
4130
4131        /* reference got in class_find_device */
4132        return container_of(dev, struct spi_controller, dev);
4133}
4134
4135static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4136                         void *arg)
4137{
4138        struct of_reconfig_data *rd = arg;
4139        struct spi_controller *ctlr;
4140        struct spi_device *spi;
4141
4142        switch (of_reconfig_get_state_change(action, arg)) {
4143        case OF_RECONFIG_CHANGE_ADD:
4144                ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4145                if (ctlr == NULL)
4146                        return NOTIFY_OK;       /* not for us */
4147
4148                if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4149                        put_device(&ctlr->dev);
4150                        return NOTIFY_OK;
4151                }
4152
4153                spi = of_register_spi_device(ctlr, rd->dn);
4154                put_device(&ctlr->dev);
4155
4156                if (IS_ERR(spi)) {
4157                        pr_err("%s: failed to create for '%pOF'\n",
4158                                        __func__, rd->dn);
4159                        of_node_clear_flag(rd->dn, OF_POPULATED);
4160                        return notifier_from_errno(PTR_ERR(spi));
4161                }
4162                break;
4163
4164        case OF_RECONFIG_CHANGE_REMOVE:
4165                /* already depopulated? */
4166                if (!of_node_check_flag(rd->dn, OF_POPULATED))
4167                        return NOTIFY_OK;
4168
4169                /* find our device by node */
4170                spi = of_find_spi_device_by_node(rd->dn);
4171                if (spi == NULL)
4172                        return NOTIFY_OK;       /* no? not meant for us */
4173
4174                /* unregister takes one ref away */
4175                spi_unregister_device(spi);
4176
4177                /* and put the reference of the find */
4178                put_device(&spi->dev);
4179                break;
4180        }
4181
4182        return NOTIFY_OK;
4183}
4184
4185static struct notifier_block spi_of_notifier = {
4186        .notifier_call = of_spi_notify,
4187};
4188#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4189extern struct notifier_block spi_of_notifier;
4190#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4191
4192#if IS_ENABLED(CONFIG_ACPI)
4193static int spi_acpi_controller_match(struct device *dev, const void *data)
4194{
4195        return ACPI_COMPANION(dev->parent) == data;
4196}
4197
4198static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4199{
4200        struct device *dev;
4201
4202        dev = class_find_device(&spi_master_class, NULL, adev,
4203                                spi_acpi_controller_match);
4204        if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4205                dev = class_find_device(&spi_slave_class, NULL, adev,
4206                                        spi_acpi_controller_match);
4207        if (!dev)
4208                return NULL;
4209
4210        return container_of(dev, struct spi_controller, dev);
4211}
4212
4213static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4214{
4215        struct device *dev;
4216
4217        dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4218        return to_spi_device(dev);
4219}
4220
4221static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4222                           void *arg)
4223{
4224        struct acpi_device *adev = arg;
4225        struct spi_controller *ctlr;
4226        struct spi_device *spi;
4227
4228        switch (value) {
4229        case ACPI_RECONFIG_DEVICE_ADD:
4230                ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4231                if (!ctlr)
4232                        break;
4233
4234                acpi_register_spi_device(ctlr, adev);
4235                put_device(&ctlr->dev);
4236                break;
4237        case ACPI_RECONFIG_DEVICE_REMOVE:
4238                if (!acpi_device_enumerated(adev))
4239                        break;
4240
4241                spi = acpi_spi_find_device_by_adev(adev);
4242                if (!spi)
4243                        break;
4244
4245                spi_unregister_device(spi);
4246                put_device(&spi->dev);
4247                break;
4248        }
4249
4250        return NOTIFY_OK;
4251}
4252
4253static struct notifier_block spi_acpi_notifier = {
4254        .notifier_call = acpi_spi_notify,
4255};
4256#else
4257extern struct notifier_block spi_acpi_notifier;
4258#endif
4259
4260static int __init spi_init(void)
4261{
4262        int     status;
4263
4264        buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4265        if (!buf) {
4266                status = -ENOMEM;
4267                goto err0;
4268        }
4269
4270        status = bus_register(&spi_bus_type);
4271        if (status < 0)
4272                goto err1;
4273
4274        status = class_register(&spi_master_class);
4275        if (status < 0)
4276                goto err2;
4277
4278        if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4279                status = class_register(&spi_slave_class);
4280                if (status < 0)
4281                        goto err3;
4282        }
4283
4284        if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4285                WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4286        if (IS_ENABLED(CONFIG_ACPI))
4287                WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4288
4289        return 0;
4290
4291err3:
4292        class_unregister(&spi_master_class);
4293err2:
4294        bus_unregister(&spi_bus_type);
4295err1:
4296        kfree(buf);
4297        buf = NULL;
4298err0:
4299        return status;
4300}
4301
4302/* board_info is normally registered in arch_initcall(),
4303 * but even essential drivers wait till later
4304 *
4305 * REVISIT only boardinfo really needs static linking. the rest (device and
4306 * driver registration) _could_ be dynamically linked (modular) ... costs
4307 * include needing to have boardinfo data structures be much more public.
4308 */
4309postcore_initcall(spi_init);
4310