linux/drivers/spi/spi.c
<<
>>
Prefs
   1/*
   2 * SPI init/core code
   3 *
   4 * Copyright (C) 2005 David Brownell
   5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/device.h>
  20#include <linux/init.h>
  21#include <linux/cache.h>
  22#include <linux/dma-mapping.h>
  23#include <linux/dmaengine.h>
  24#include <linux/mutex.h>
  25#include <linux/of_device.h>
  26#include <linux/of_irq.h>
  27#include <linux/clk/clk-conf.h>
  28#include <linux/slab.h>
  29#include <linux/mod_devicetable.h>
  30#include <linux/spi/spi.h>
  31#include <linux/of_gpio.h>
  32#include <linux/pm_runtime.h>
  33#include <linux/pm_domain.h>
  34#include <linux/export.h>
  35#include <linux/sched/rt.h>
  36#include <linux/delay.h>
  37#include <linux/kthread.h>
  38#include <linux/ioport.h>
  39#include <linux/acpi.h>
  40
  41#define CREATE_TRACE_POINTS
  42#include <trace/events/spi.h>
  43
  44static void spidev_release(struct device *dev)
  45{
  46        struct spi_device       *spi = to_spi_device(dev);
  47
  48        /* spi masters may cleanup for released devices */
  49        if (spi->master->cleanup)
  50                spi->master->cleanup(spi);
  51
  52        spi_master_put(spi->master);
  53        kfree(spi);
  54}
  55
  56static ssize_t
  57modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  58{
  59        const struct spi_device *spi = to_spi_device(dev);
  60        int len;
  61
  62        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  63        if (len != -ENODEV)
  64                return len;
  65
  66        return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  67}
  68static DEVICE_ATTR_RO(modalias);
  69
  70static struct attribute *spi_dev_attrs[] = {
  71        &dev_attr_modalias.attr,
  72        NULL,
  73};
  74ATTRIBUTE_GROUPS(spi_dev);
  75
  76/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
  77 * and the sysfs version makes coldplug work too.
  78 */
  79
  80static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
  81                                                const struct spi_device *sdev)
  82{
  83        while (id->name[0]) {
  84                if (!strcmp(sdev->modalias, id->name))
  85                        return id;
  86                id++;
  87        }
  88        return NULL;
  89}
  90
  91const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
  92{
  93        const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
  94
  95        return spi_match_id(sdrv->id_table, sdev);
  96}
  97EXPORT_SYMBOL_GPL(spi_get_device_id);
  98
  99static int spi_match_device(struct device *dev, struct device_driver *drv)
 100{
 101        const struct spi_device *spi = to_spi_device(dev);
 102        const struct spi_driver *sdrv = to_spi_driver(drv);
 103
 104        /* Attempt an OF style match */
 105        if (of_driver_match_device(dev, drv))
 106                return 1;
 107
 108        /* Then try ACPI */
 109        if (acpi_driver_match_device(dev, drv))
 110                return 1;
 111
 112        if (sdrv->id_table)
 113                return !!spi_match_id(sdrv->id_table, spi);
 114
 115        return strcmp(spi->modalias, drv->name) == 0;
 116}
 117
 118static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 119{
 120        const struct spi_device         *spi = to_spi_device(dev);
 121        int rc;
 122
 123        rc = acpi_device_uevent_modalias(dev, env);
 124        if (rc != -ENODEV)
 125                return rc;
 126
 127        add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 128        return 0;
 129}
 130
 131struct bus_type spi_bus_type = {
 132        .name           = "spi",
 133        .dev_groups     = spi_dev_groups,
 134        .match          = spi_match_device,
 135        .uevent         = spi_uevent,
 136};
 137EXPORT_SYMBOL_GPL(spi_bus_type);
 138
 139
 140static int spi_drv_probe(struct device *dev)
 141{
 142        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 143        int ret;
 144
 145        ret = of_clk_set_defaults(dev->of_node, false);
 146        if (ret)
 147                return ret;
 148
 149        ret = dev_pm_domain_attach(dev, true);
 150        if (ret != -EPROBE_DEFER) {
 151                ret = sdrv->probe(to_spi_device(dev));
 152                if (ret)
 153                        dev_pm_domain_detach(dev, true);
 154        }
 155
 156        return ret;
 157}
 158
 159static int spi_drv_remove(struct device *dev)
 160{
 161        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 162        int ret;
 163
 164        ret = sdrv->remove(to_spi_device(dev));
 165        dev_pm_domain_detach(dev, true);
 166
 167        return ret;
 168}
 169
 170static void spi_drv_shutdown(struct device *dev)
 171{
 172        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 173
 174        sdrv->shutdown(to_spi_device(dev));
 175}
 176
 177/**
 178 * spi_register_driver - register a SPI driver
 179 * @sdrv: the driver to register
 180 * Context: can sleep
 181 */
 182int spi_register_driver(struct spi_driver *sdrv)
 183{
 184        sdrv->driver.bus = &spi_bus_type;
 185        if (sdrv->probe)
 186                sdrv->driver.probe = spi_drv_probe;
 187        if (sdrv->remove)
 188                sdrv->driver.remove = spi_drv_remove;
 189        if (sdrv->shutdown)
 190                sdrv->driver.shutdown = spi_drv_shutdown;
 191        return driver_register(&sdrv->driver);
 192}
 193EXPORT_SYMBOL_GPL(spi_register_driver);
 194
 195/*-------------------------------------------------------------------------*/
 196
 197/* SPI devices should normally not be created by SPI device drivers; that
 198 * would make them board-specific.  Similarly with SPI master drivers.
 199 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 200 * with other readonly (flashable) information about mainboard devices.
 201 */
 202
 203struct boardinfo {
 204        struct list_head        list;
 205        struct spi_board_info   board_info;
 206};
 207
 208static LIST_HEAD(board_list);
 209static LIST_HEAD(spi_master_list);
 210
 211/*
 212 * Used to protect add/del opertion for board_info list and
 213 * spi_master list, and their matching process
 214 */
 215static DEFINE_MUTEX(board_lock);
 216
 217/**
 218 * spi_alloc_device - Allocate a new SPI device
 219 * @master: Controller to which device is connected
 220 * Context: can sleep
 221 *
 222 * Allows a driver to allocate and initialize a spi_device without
 223 * registering it immediately.  This allows a driver to directly
 224 * fill the spi_device with device parameters before calling
 225 * spi_add_device() on it.
 226 *
 227 * Caller is responsible to call spi_add_device() on the returned
 228 * spi_device structure to add it to the SPI master.  If the caller
 229 * needs to discard the spi_device without adding it, then it should
 230 * call spi_dev_put() on it.
 231 *
 232 * Returns a pointer to the new device, or NULL.
 233 */
 234struct spi_device *spi_alloc_device(struct spi_master *master)
 235{
 236        struct spi_device       *spi;
 237
 238        if (!spi_master_get(master))
 239                return NULL;
 240
 241        spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 242        if (!spi) {
 243                spi_master_put(master);
 244                return NULL;
 245        }
 246
 247        spi->master = master;
 248        spi->dev.parent = &master->dev;
 249        spi->dev.bus = &spi_bus_type;
 250        spi->dev.release = spidev_release;
 251        spi->cs_gpio = -ENOENT;
 252        device_initialize(&spi->dev);
 253        return spi;
 254}
 255EXPORT_SYMBOL_GPL(spi_alloc_device);
 256
 257static void spi_dev_set_name(struct spi_device *spi)
 258{
 259        struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
 260
 261        if (adev) {
 262                dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
 263                return;
 264        }
 265
 266        dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
 267                     spi->chip_select);
 268}
 269
 270static int spi_dev_check(struct device *dev, void *data)
 271{
 272        struct spi_device *spi = to_spi_device(dev);
 273        struct spi_device *new_spi = data;
 274
 275        if (spi->master == new_spi->master &&
 276            spi->chip_select == new_spi->chip_select)
 277                return -EBUSY;
 278        return 0;
 279}
 280
 281/**
 282 * spi_add_device - Add spi_device allocated with spi_alloc_device
 283 * @spi: spi_device to register
 284 *
 285 * Companion function to spi_alloc_device.  Devices allocated with
 286 * spi_alloc_device can be added onto the spi bus with this function.
 287 *
 288 * Returns 0 on success; negative errno on failure
 289 */
 290int spi_add_device(struct spi_device *spi)
 291{
 292        static DEFINE_MUTEX(spi_add_lock);
 293        struct spi_master *master = spi->master;
 294        struct device *dev = master->dev.parent;
 295        int status;
 296
 297        /* Chipselects are numbered 0..max; validate. */
 298        if (spi->chip_select >= master->num_chipselect) {
 299                dev_err(dev, "cs%d >= max %d\n",
 300                        spi->chip_select,
 301                        master->num_chipselect);
 302                return -EINVAL;
 303        }
 304
 305        /* Set the bus ID string */
 306        spi_dev_set_name(spi);
 307
 308        /* We need to make sure there's no other device with this
 309         * chipselect **BEFORE** we call setup(), else we'll trash
 310         * its configuration.  Lock against concurrent add() calls.
 311         */
 312        mutex_lock(&spi_add_lock);
 313
 314        status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
 315        if (status) {
 316                dev_err(dev, "chipselect %d already in use\n",
 317                                spi->chip_select);
 318                goto done;
 319        }
 320
 321        if (master->cs_gpios)
 322                spi->cs_gpio = master->cs_gpios[spi->chip_select];
 323
 324        /* Drivers may modify this initial i/o setup, but will
 325         * normally rely on the device being setup.  Devices
 326         * using SPI_CS_HIGH can't coexist well otherwise...
 327         */
 328        status = spi_setup(spi);
 329        if (status < 0) {
 330                dev_err(dev, "can't setup %s, status %d\n",
 331                                dev_name(&spi->dev), status);
 332                goto done;
 333        }
 334
 335        /* Device may be bound to an active driver when this returns */
 336        status = device_add(&spi->dev);
 337        if (status < 0)
 338                dev_err(dev, "can't add %s, status %d\n",
 339                                dev_name(&spi->dev), status);
 340        else
 341                dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 342
 343done:
 344        mutex_unlock(&spi_add_lock);
 345        return status;
 346}
 347EXPORT_SYMBOL_GPL(spi_add_device);
 348
 349/**
 350 * spi_new_device - instantiate one new SPI device
 351 * @master: Controller to which device is connected
 352 * @chip: Describes the SPI device
 353 * Context: can sleep
 354 *
 355 * On typical mainboards, this is purely internal; and it's not needed
 356 * after board init creates the hard-wired devices.  Some development
 357 * platforms may not be able to use spi_register_board_info though, and
 358 * this is exported so that for example a USB or parport based adapter
 359 * driver could add devices (which it would learn about out-of-band).
 360 *
 361 * Returns the new device, or NULL.
 362 */
 363struct spi_device *spi_new_device(struct spi_master *master,
 364                                  struct spi_board_info *chip)
 365{
 366        struct spi_device       *proxy;
 367        int                     status;
 368
 369        /* NOTE:  caller did any chip->bus_num checks necessary.
 370         *
 371         * Also, unless we change the return value convention to use
 372         * error-or-pointer (not NULL-or-pointer), troubleshootability
 373         * suggests syslogged diagnostics are best here (ugh).
 374         */
 375
 376        proxy = spi_alloc_device(master);
 377        if (!proxy)
 378                return NULL;
 379
 380        WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 381
 382        proxy->chip_select = chip->chip_select;
 383        proxy->max_speed_hz = chip->max_speed_hz;
 384        proxy->mode = chip->mode;
 385        proxy->irq = chip->irq;
 386        strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 387        proxy->dev.platform_data = (void *) chip->platform_data;
 388        proxy->controller_data = chip->controller_data;
 389        proxy->controller_state = NULL;
 390
 391        status = spi_add_device(proxy);
 392        if (status < 0) {
 393                spi_dev_put(proxy);
 394                return NULL;
 395        }
 396
 397        return proxy;
 398}
 399EXPORT_SYMBOL_GPL(spi_new_device);
 400
 401static void spi_match_master_to_boardinfo(struct spi_master *master,
 402                                struct spi_board_info *bi)
 403{
 404        struct spi_device *dev;
 405
 406        if (master->bus_num != bi->bus_num)
 407                return;
 408
 409        dev = spi_new_device(master, bi);
 410        if (!dev)
 411                dev_err(master->dev.parent, "can't create new device for %s\n",
 412                        bi->modalias);
 413}
 414
 415/**
 416 * spi_register_board_info - register SPI devices for a given board
 417 * @info: array of chip descriptors
 418 * @n: how many descriptors are provided
 419 * Context: can sleep
 420 *
 421 * Board-specific early init code calls this (probably during arch_initcall)
 422 * with segments of the SPI device table.  Any device nodes are created later,
 423 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 424 * this table of devices forever, so that reloading a controller driver will
 425 * not make Linux forget about these hard-wired devices.
 426 *
 427 * Other code can also call this, e.g. a particular add-on board might provide
 428 * SPI devices through its expansion connector, so code initializing that board
 429 * would naturally declare its SPI devices.
 430 *
 431 * The board info passed can safely be __initdata ... but be careful of
 432 * any embedded pointers (platform_data, etc), they're copied as-is.
 433 */
 434int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 435{
 436        struct boardinfo *bi;
 437        int i;
 438
 439        if (!n)
 440                return -EINVAL;
 441
 442        bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
 443        if (!bi)
 444                return -ENOMEM;
 445
 446        for (i = 0; i < n; i++, bi++, info++) {
 447                struct spi_master *master;
 448
 449                memcpy(&bi->board_info, info, sizeof(*info));
 450                mutex_lock(&board_lock);
 451                list_add_tail(&bi->list, &board_list);
 452                list_for_each_entry(master, &spi_master_list, list)
 453                        spi_match_master_to_boardinfo(master, &bi->board_info);
 454                mutex_unlock(&board_lock);
 455        }
 456
 457        return 0;
 458}
 459
 460/*-------------------------------------------------------------------------*/
 461
 462static void spi_set_cs(struct spi_device *spi, bool enable)
 463{
 464        if (spi->mode & SPI_CS_HIGH)
 465                enable = !enable;
 466
 467        if (spi->cs_gpio >= 0)
 468                gpio_set_value(spi->cs_gpio, !enable);
 469        else if (spi->master->set_cs)
 470                spi->master->set_cs(spi, !enable);
 471}
 472
 473#ifdef CONFIG_HAS_DMA
 474static int spi_map_buf(struct spi_master *master, struct device *dev,
 475                       struct sg_table *sgt, void *buf, size_t len,
 476                       enum dma_data_direction dir)
 477{
 478        const bool vmalloced_buf = is_vmalloc_addr(buf);
 479        const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
 480        const int sgs = DIV_ROUND_UP(len, desc_len);
 481        struct page *vm_page;
 482        void *sg_buf;
 483        size_t min;
 484        int i, ret;
 485
 486        ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 487        if (ret != 0)
 488                return ret;
 489
 490        for (i = 0; i < sgs; i++) {
 491                min = min_t(size_t, len, desc_len);
 492
 493                if (vmalloced_buf) {
 494                        vm_page = vmalloc_to_page(buf);
 495                        if (!vm_page) {
 496                                sg_free_table(sgt);
 497                                return -ENOMEM;
 498                        }
 499                        sg_set_page(&sgt->sgl[i], vm_page,
 500                                    min, offset_in_page(buf));
 501                } else {
 502                        sg_buf = buf;
 503                        sg_set_buf(&sgt->sgl[i], sg_buf, min);
 504                }
 505
 506
 507                buf += min;
 508                len -= min;
 509        }
 510
 511        ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
 512        if (!ret)
 513                ret = -ENOMEM;
 514        if (ret < 0) {
 515                sg_free_table(sgt);
 516                return ret;
 517        }
 518
 519        sgt->nents = ret;
 520
 521        return 0;
 522}
 523
 524static void spi_unmap_buf(struct spi_master *master, struct device *dev,
 525                          struct sg_table *sgt, enum dma_data_direction dir)
 526{
 527        if (sgt->orig_nents) {
 528                dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
 529                sg_free_table(sgt);
 530        }
 531}
 532
 533static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
 534{
 535        struct device *tx_dev, *rx_dev;
 536        struct spi_transfer *xfer;
 537        int ret;
 538
 539        if (!master->can_dma)
 540                return 0;
 541
 542        tx_dev = master->dma_tx->device->dev;
 543        rx_dev = master->dma_rx->device->dev;
 544
 545        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 546                if (!master->can_dma(master, msg->spi, xfer))
 547                        continue;
 548
 549                if (xfer->tx_buf != NULL) {
 550                        ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
 551                                          (void *)xfer->tx_buf, xfer->len,
 552                                          DMA_TO_DEVICE);
 553                        if (ret != 0)
 554                                return ret;
 555                }
 556
 557                if (xfer->rx_buf != NULL) {
 558                        ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
 559                                          xfer->rx_buf, xfer->len,
 560                                          DMA_FROM_DEVICE);
 561                        if (ret != 0) {
 562                                spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
 563                                              DMA_TO_DEVICE);
 564                                return ret;
 565                        }
 566                }
 567        }
 568
 569        master->cur_msg_mapped = true;
 570
 571        return 0;
 572}
 573
 574static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
 575{
 576        struct spi_transfer *xfer;
 577        struct device *tx_dev, *rx_dev;
 578
 579        if (!master->cur_msg_mapped || !master->can_dma)
 580                return 0;
 581
 582        tx_dev = master->dma_tx->device->dev;
 583        rx_dev = master->dma_rx->device->dev;
 584
 585        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 586                if (!master->can_dma(master, msg->spi, xfer))
 587                        continue;
 588
 589                spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 590                spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 591        }
 592
 593        return 0;
 594}
 595#else /* !CONFIG_HAS_DMA */
 596static inline int __spi_map_msg(struct spi_master *master,
 597                                struct spi_message *msg)
 598{
 599        return 0;
 600}
 601
 602static inline int __spi_unmap_msg(struct spi_master *master,
 603                                  struct spi_message *msg)
 604{
 605        return 0;
 606}
 607#endif /* !CONFIG_HAS_DMA */
 608
 609static inline int spi_unmap_msg(struct spi_master *master,
 610                                struct spi_message *msg)
 611{
 612        struct spi_transfer *xfer;
 613
 614        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 615                /*
 616                 * Restore the original value of tx_buf or rx_buf if they are
 617                 * NULL.
 618                 */
 619                if (xfer->tx_buf == master->dummy_tx)
 620                        xfer->tx_buf = NULL;
 621                if (xfer->rx_buf == master->dummy_rx)
 622                        xfer->rx_buf = NULL;
 623        }
 624
 625        return __spi_unmap_msg(master, msg);
 626}
 627
 628static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
 629{
 630        struct spi_transfer *xfer;
 631        void *tmp;
 632        unsigned int max_tx, max_rx;
 633
 634        if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
 635                max_tx = 0;
 636                max_rx = 0;
 637
 638                list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 639                        if ((master->flags & SPI_MASTER_MUST_TX) &&
 640                            !xfer->tx_buf)
 641                                max_tx = max(xfer->len, max_tx);
 642                        if ((master->flags & SPI_MASTER_MUST_RX) &&
 643                            !xfer->rx_buf)
 644                                max_rx = max(xfer->len, max_rx);
 645                }
 646
 647                if (max_tx) {
 648                        tmp = krealloc(master->dummy_tx, max_tx,
 649                                       GFP_KERNEL | GFP_DMA);
 650                        if (!tmp)
 651                                return -ENOMEM;
 652                        master->dummy_tx = tmp;
 653                        memset(tmp, 0, max_tx);
 654                }
 655
 656                if (max_rx) {
 657                        tmp = krealloc(master->dummy_rx, max_rx,
 658                                       GFP_KERNEL | GFP_DMA);
 659                        if (!tmp)
 660                                return -ENOMEM;
 661                        master->dummy_rx = tmp;
 662                }
 663
 664                if (max_tx || max_rx) {
 665                        list_for_each_entry(xfer, &msg->transfers,
 666                                            transfer_list) {
 667                                if (!xfer->tx_buf)
 668                                        xfer->tx_buf = master->dummy_tx;
 669                                if (!xfer->rx_buf)
 670                                        xfer->rx_buf = master->dummy_rx;
 671                        }
 672                }
 673        }
 674
 675        return __spi_map_msg(master, msg);
 676}
 677
 678/*
 679 * spi_transfer_one_message - Default implementation of transfer_one_message()
 680 *
 681 * This is a standard implementation of transfer_one_message() for
 682 * drivers which impelment a transfer_one() operation.  It provides
 683 * standard handling of delays and chip select management.
 684 */
 685static int spi_transfer_one_message(struct spi_master *master,
 686                                    struct spi_message *msg)
 687{
 688        struct spi_transfer *xfer;
 689        bool keep_cs = false;
 690        int ret = 0;
 691        unsigned long ms = 1;
 692
 693        spi_set_cs(msg->spi, true);
 694
 695        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 696                trace_spi_transfer_start(msg, xfer);
 697
 698                if (xfer->tx_buf || xfer->rx_buf) {
 699                        reinit_completion(&master->xfer_completion);
 700
 701                        ret = master->transfer_one(master, msg->spi, xfer);
 702                        if (ret < 0) {
 703                                dev_err(&msg->spi->dev,
 704                                        "SPI transfer failed: %d\n", ret);
 705                                goto out;
 706                        }
 707
 708                        if (ret > 0) {
 709                                ret = 0;
 710                                ms = xfer->len * 8 * 1000 / xfer->speed_hz;
 711                                ms += ms + 100; /* some tolerance */
 712
 713                                ms = wait_for_completion_timeout(&master->xfer_completion,
 714                                                                 msecs_to_jiffies(ms));
 715                        }
 716
 717                        if (ms == 0) {
 718                                dev_err(&msg->spi->dev,
 719                                        "SPI transfer timed out\n");
 720                                msg->status = -ETIMEDOUT;
 721                        }
 722                } else {
 723                        if (xfer->len)
 724                                dev_err(&msg->spi->dev,
 725                                        "Bufferless transfer has length %u\n",
 726                                        xfer->len);
 727                }
 728
 729                trace_spi_transfer_stop(msg, xfer);
 730
 731                if (msg->status != -EINPROGRESS)
 732                        goto out;
 733
 734                if (xfer->delay_usecs)
 735                        udelay(xfer->delay_usecs);
 736
 737                if (xfer->cs_change) {
 738                        if (list_is_last(&xfer->transfer_list,
 739                                         &msg->transfers)) {
 740                                keep_cs = true;
 741                        } else {
 742                                spi_set_cs(msg->spi, false);
 743                                udelay(10);
 744                                spi_set_cs(msg->spi, true);
 745                        }
 746                }
 747
 748                msg->actual_length += xfer->len;
 749        }
 750
 751out:
 752        if (ret != 0 || !keep_cs)
 753                spi_set_cs(msg->spi, false);
 754
 755        if (msg->status == -EINPROGRESS)
 756                msg->status = ret;
 757
 758        if (msg->status && master->handle_err)
 759                master->handle_err(master, msg);
 760
 761        spi_finalize_current_message(master);
 762
 763        return ret;
 764}
 765
 766/**
 767 * spi_finalize_current_transfer - report completion of a transfer
 768 * @master: the master reporting completion
 769 *
 770 * Called by SPI drivers using the core transfer_one_message()
 771 * implementation to notify it that the current interrupt driven
 772 * transfer has finished and the next one may be scheduled.
 773 */
 774void spi_finalize_current_transfer(struct spi_master *master)
 775{
 776        complete(&master->xfer_completion);
 777}
 778EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
 779
 780/**
 781 * __spi_pump_messages - function which processes spi message queue
 782 * @master: master to process queue for
 783 * @in_kthread: true if we are in the context of the message pump thread
 784 *
 785 * This function checks if there is any spi message in the queue that
 786 * needs processing and if so call out to the driver to initialize hardware
 787 * and transfer each message.
 788 *
 789 * Note that it is called both from the kthread itself and also from
 790 * inside spi_sync(); the queue extraction handling at the top of the
 791 * function should deal with this safely.
 792 */
 793static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 794{
 795        unsigned long flags;
 796        bool was_busy = false;
 797        int ret;
 798
 799        /* Lock queue */
 800        spin_lock_irqsave(&master->queue_lock, flags);
 801
 802        /* Make sure we are not already running a message */
 803        if (master->cur_msg) {
 804                spin_unlock_irqrestore(&master->queue_lock, flags);
 805                return;
 806        }
 807
 808        /* If another context is idling the device then defer */
 809        if (master->idling) {
 810                queue_kthread_work(&master->kworker, &master->pump_messages);
 811                spin_unlock_irqrestore(&master->queue_lock, flags);
 812                return;
 813        }
 814
 815        /* Check if the queue is idle */
 816        if (list_empty(&master->queue) || !master->running) {
 817                if (!master->busy) {
 818                        spin_unlock_irqrestore(&master->queue_lock, flags);
 819                        return;
 820                }
 821
 822                /* Only do teardown in the thread */
 823                if (!in_kthread) {
 824                        queue_kthread_work(&master->kworker,
 825                                           &master->pump_messages);
 826                        spin_unlock_irqrestore(&master->queue_lock, flags);
 827                        return;
 828                }
 829
 830                master->busy = false;
 831                master->idling = true;
 832                spin_unlock_irqrestore(&master->queue_lock, flags);
 833
 834                kfree(master->dummy_rx);
 835                master->dummy_rx = NULL;
 836                kfree(master->dummy_tx);
 837                master->dummy_tx = NULL;
 838                if (master->unprepare_transfer_hardware &&
 839                    master->unprepare_transfer_hardware(master))
 840                        dev_err(&master->dev,
 841                                "failed to unprepare transfer hardware\n");
 842                if (master->auto_runtime_pm) {
 843                        pm_runtime_mark_last_busy(master->dev.parent);
 844                        pm_runtime_put_autosuspend(master->dev.parent);
 845                }
 846                trace_spi_master_idle(master);
 847
 848                spin_lock_irqsave(&master->queue_lock, flags);
 849                master->idling = false;
 850                spin_unlock_irqrestore(&master->queue_lock, flags);
 851                return;
 852        }
 853
 854        /* Extract head of queue */
 855        master->cur_msg =
 856                list_first_entry(&master->queue, struct spi_message, queue);
 857
 858        list_del_init(&master->cur_msg->queue);
 859        if (master->busy)
 860                was_busy = true;
 861        else
 862                master->busy = true;
 863        spin_unlock_irqrestore(&master->queue_lock, flags);
 864
 865        if (!was_busy && master->auto_runtime_pm) {
 866                ret = pm_runtime_get_sync(master->dev.parent);
 867                if (ret < 0) {
 868                        dev_err(&master->dev, "Failed to power device: %d\n",
 869                                ret);
 870                        return;
 871                }
 872        }
 873
 874        if (!was_busy)
 875                trace_spi_master_busy(master);
 876
 877        if (!was_busy && master->prepare_transfer_hardware) {
 878                ret = master->prepare_transfer_hardware(master);
 879                if (ret) {
 880                        dev_err(&master->dev,
 881                                "failed to prepare transfer hardware\n");
 882
 883                        if (master->auto_runtime_pm)
 884                                pm_runtime_put(master->dev.parent);
 885                        return;
 886                }
 887        }
 888
 889        trace_spi_message_start(master->cur_msg);
 890
 891        if (master->prepare_message) {
 892                ret = master->prepare_message(master, master->cur_msg);
 893                if (ret) {
 894                        dev_err(&master->dev,
 895                                "failed to prepare message: %d\n", ret);
 896                        master->cur_msg->status = ret;
 897                        spi_finalize_current_message(master);
 898                        return;
 899                }
 900                master->cur_msg_prepared = true;
 901        }
 902
 903        ret = spi_map_msg(master, master->cur_msg);
 904        if (ret) {
 905                master->cur_msg->status = ret;
 906                spi_finalize_current_message(master);
 907                return;
 908        }
 909
 910        ret = master->transfer_one_message(master, master->cur_msg);
 911        if (ret) {
 912                dev_err(&master->dev,
 913                        "failed to transfer one message from queue\n");
 914                return;
 915        }
 916}
 917
 918/**
 919 * spi_pump_messages - kthread work function which processes spi message queue
 920 * @work: pointer to kthread work struct contained in the master struct
 921 */
 922static void spi_pump_messages(struct kthread_work *work)
 923{
 924        struct spi_master *master =
 925                container_of(work, struct spi_master, pump_messages);
 926
 927        __spi_pump_messages(master, true);
 928}
 929
 930static int spi_init_queue(struct spi_master *master)
 931{
 932        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 933
 934        master->running = false;
 935        master->busy = false;
 936
 937        init_kthread_worker(&master->kworker);
 938        master->kworker_task = kthread_run(kthread_worker_fn,
 939                                           &master->kworker, "%s",
 940                                           dev_name(&master->dev));
 941        if (IS_ERR(master->kworker_task)) {
 942                dev_err(&master->dev, "failed to create message pump task\n");
 943                return PTR_ERR(master->kworker_task);
 944        }
 945        init_kthread_work(&master->pump_messages, spi_pump_messages);
 946
 947        /*
 948         * Master config will indicate if this controller should run the
 949         * message pump with high (realtime) priority to reduce the transfer
 950         * latency on the bus by minimising the delay between a transfer
 951         * request and the scheduling of the message pump thread. Without this
 952         * setting the message pump thread will remain at default priority.
 953         */
 954        if (master->rt) {
 955                dev_info(&master->dev,
 956                        "will run message pump with realtime priority\n");
 957                sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
 958        }
 959
 960        return 0;
 961}
 962
 963/**
 964 * spi_get_next_queued_message() - called by driver to check for queued
 965 * messages
 966 * @master: the master to check for queued messages
 967 *
 968 * If there are more messages in the queue, the next message is returned from
 969 * this call.
 970 */
 971struct spi_message *spi_get_next_queued_message(struct spi_master *master)
 972{
 973        struct spi_message *next;
 974        unsigned long flags;
 975
 976        /* get a pointer to the next message, if any */
 977        spin_lock_irqsave(&master->queue_lock, flags);
 978        next = list_first_entry_or_null(&master->queue, struct spi_message,
 979                                        queue);
 980        spin_unlock_irqrestore(&master->queue_lock, flags);
 981
 982        return next;
 983}
 984EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
 985
 986/**
 987 * spi_finalize_current_message() - the current message is complete
 988 * @master: the master to return the message to
 989 *
 990 * Called by the driver to notify the core that the message in the front of the
 991 * queue is complete and can be removed from the queue.
 992 */
 993void spi_finalize_current_message(struct spi_master *master)
 994{
 995        struct spi_message *mesg;
 996        unsigned long flags;
 997        int ret;
 998
 999        spin_lock_irqsave(&master->queue_lock, flags);
1000        mesg = master->cur_msg;
1001        spin_unlock_irqrestore(&master->queue_lock, flags);
1002
1003        spi_unmap_msg(master, mesg);
1004
1005        if (master->cur_msg_prepared && master->unprepare_message) {
1006                ret = master->unprepare_message(master, mesg);
1007                if (ret) {
1008                        dev_err(&master->dev,
1009                                "failed to unprepare message: %d\n", ret);
1010                }
1011        }
1012
1013        spin_lock_irqsave(&master->queue_lock, flags);
1014        master->cur_msg = NULL;
1015        master->cur_msg_prepared = false;
1016        queue_kthread_work(&master->kworker, &master->pump_messages);
1017        spin_unlock_irqrestore(&master->queue_lock, flags);
1018
1019        trace_spi_message_done(mesg);
1020
1021        mesg->state = NULL;
1022        if (mesg->complete)
1023                mesg->complete(mesg->context);
1024}
1025EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1026
1027static int spi_start_queue(struct spi_master *master)
1028{
1029        unsigned long flags;
1030
1031        spin_lock_irqsave(&master->queue_lock, flags);
1032
1033        if (master->running || master->busy) {
1034                spin_unlock_irqrestore(&master->queue_lock, flags);
1035                return -EBUSY;
1036        }
1037
1038        master->running = true;
1039        master->cur_msg = NULL;
1040        spin_unlock_irqrestore(&master->queue_lock, flags);
1041
1042        queue_kthread_work(&master->kworker, &master->pump_messages);
1043
1044        return 0;
1045}
1046
1047static int spi_stop_queue(struct spi_master *master)
1048{
1049        unsigned long flags;
1050        unsigned limit = 500;
1051        int ret = 0;
1052
1053        spin_lock_irqsave(&master->queue_lock, flags);
1054
1055        /*
1056         * This is a bit lame, but is optimized for the common execution path.
1057         * A wait_queue on the master->busy could be used, but then the common
1058         * execution path (pump_messages) would be required to call wake_up or
1059         * friends on every SPI message. Do this instead.
1060         */
1061        while ((!list_empty(&master->queue) || master->busy) && limit--) {
1062                spin_unlock_irqrestore(&master->queue_lock, flags);
1063                usleep_range(10000, 11000);
1064                spin_lock_irqsave(&master->queue_lock, flags);
1065        }
1066
1067        if (!list_empty(&master->queue) || master->busy)
1068                ret = -EBUSY;
1069        else
1070                master->running = false;
1071
1072        spin_unlock_irqrestore(&master->queue_lock, flags);
1073
1074        if (ret) {
1075                dev_warn(&master->dev,
1076                         "could not stop message queue\n");
1077                return ret;
1078        }
1079        return ret;
1080}
1081
1082static int spi_destroy_queue(struct spi_master *master)
1083{
1084        int ret;
1085
1086        ret = spi_stop_queue(master);
1087
1088        /*
1089         * flush_kthread_worker will block until all work is done.
1090         * If the reason that stop_queue timed out is that the work will never
1091         * finish, then it does no good to call flush/stop thread, so
1092         * return anyway.
1093         */
1094        if (ret) {
1095                dev_err(&master->dev, "problem destroying queue\n");
1096                return ret;
1097        }
1098
1099        flush_kthread_worker(&master->kworker);
1100        kthread_stop(master->kworker_task);
1101
1102        return 0;
1103}
1104
1105static int __spi_queued_transfer(struct spi_device *spi,
1106                                 struct spi_message *msg,
1107                                 bool need_pump)
1108{
1109        struct spi_master *master = spi->master;
1110        unsigned long flags;
1111
1112        spin_lock_irqsave(&master->queue_lock, flags);
1113
1114        if (!master->running) {
1115                spin_unlock_irqrestore(&master->queue_lock, flags);
1116                return -ESHUTDOWN;
1117        }
1118        msg->actual_length = 0;
1119        msg->status = -EINPROGRESS;
1120
1121        list_add_tail(&msg->queue, &master->queue);
1122        if (!master->busy && need_pump)
1123                queue_kthread_work(&master->kworker, &master->pump_messages);
1124
1125        spin_unlock_irqrestore(&master->queue_lock, flags);
1126        return 0;
1127}
1128
1129/**
1130 * spi_queued_transfer - transfer function for queued transfers
1131 * @spi: spi device which is requesting transfer
1132 * @msg: spi message which is to handled is queued to driver queue
1133 */
1134static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1135{
1136        return __spi_queued_transfer(spi, msg, true);
1137}
1138
1139static int spi_master_initialize_queue(struct spi_master *master)
1140{
1141        int ret;
1142
1143        master->transfer = spi_queued_transfer;
1144        if (!master->transfer_one_message)
1145                master->transfer_one_message = spi_transfer_one_message;
1146
1147        /* Initialize and start queue */
1148        ret = spi_init_queue(master);
1149        if (ret) {
1150                dev_err(&master->dev, "problem initializing queue\n");
1151                goto err_init_queue;
1152        }
1153        master->queued = true;
1154        ret = spi_start_queue(master);
1155        if (ret) {
1156                dev_err(&master->dev, "problem starting queue\n");
1157                goto err_start_queue;
1158        }
1159
1160        return 0;
1161
1162err_start_queue:
1163        spi_destroy_queue(master);
1164err_init_queue:
1165        return ret;
1166}
1167
1168/*-------------------------------------------------------------------------*/
1169
1170#if defined(CONFIG_OF)
1171static struct spi_device *
1172of_register_spi_device(struct spi_master *master, struct device_node *nc)
1173{
1174        struct spi_device *spi;
1175        int rc;
1176        u32 value;
1177
1178        /* Alloc an spi_device */
1179        spi = spi_alloc_device(master);
1180        if (!spi) {
1181                dev_err(&master->dev, "spi_device alloc error for %s\n",
1182                        nc->full_name);
1183                rc = -ENOMEM;
1184                goto err_out;
1185        }
1186
1187        /* Select device driver */
1188        rc = of_modalias_node(nc, spi->modalias,
1189                                sizeof(spi->modalias));
1190        if (rc < 0) {
1191                dev_err(&master->dev, "cannot find modalias for %s\n",
1192                        nc->full_name);
1193                goto err_out;
1194        }
1195
1196        /* Device address */
1197        rc = of_property_read_u32(nc, "reg", &value);
1198        if (rc) {
1199                dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1200                        nc->full_name, rc);
1201                goto err_out;
1202        }
1203        spi->chip_select = value;
1204
1205        /* Mode (clock phase/polarity/etc.) */
1206        if (of_find_property(nc, "spi-cpha", NULL))
1207                spi->mode |= SPI_CPHA;
1208        if (of_find_property(nc, "spi-cpol", NULL))
1209                spi->mode |= SPI_CPOL;
1210        if (of_find_property(nc, "spi-cs-high", NULL))
1211                spi->mode |= SPI_CS_HIGH;
1212        if (of_find_property(nc, "spi-3wire", NULL))
1213                spi->mode |= SPI_3WIRE;
1214        if (of_find_property(nc, "spi-lsb-first", NULL))
1215                spi->mode |= SPI_LSB_FIRST;
1216
1217        /* Device DUAL/QUAD mode */
1218        if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1219                switch (value) {
1220                case 1:
1221                        break;
1222                case 2:
1223                        spi->mode |= SPI_TX_DUAL;
1224                        break;
1225                case 4:
1226                        spi->mode |= SPI_TX_QUAD;
1227                        break;
1228                default:
1229                        dev_warn(&master->dev,
1230                                "spi-tx-bus-width %d not supported\n",
1231                                value);
1232                        break;
1233                }
1234        }
1235
1236        if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1237                switch (value) {
1238                case 1:
1239                        break;
1240                case 2:
1241                        spi->mode |= SPI_RX_DUAL;
1242                        break;
1243                case 4:
1244                        spi->mode |= SPI_RX_QUAD;
1245                        break;
1246                default:
1247                        dev_warn(&master->dev,
1248                                "spi-rx-bus-width %d not supported\n",
1249                                value);
1250                        break;
1251                }
1252        }
1253
1254        /* Device speed */
1255        rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1256        if (rc) {
1257                dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1258                        nc->full_name, rc);
1259                goto err_out;
1260        }
1261        spi->max_speed_hz = value;
1262
1263        /* IRQ */
1264        spi->irq = irq_of_parse_and_map(nc, 0);
1265
1266        /* Store a pointer to the node in the device structure */
1267        of_node_get(nc);
1268        spi->dev.of_node = nc;
1269
1270        /* Register the new device */
1271        rc = spi_add_device(spi);
1272        if (rc) {
1273                dev_err(&master->dev, "spi_device register error %s\n",
1274                        nc->full_name);
1275                goto err_out;
1276        }
1277
1278        return spi;
1279
1280err_out:
1281        spi_dev_put(spi);
1282        return ERR_PTR(rc);
1283}
1284
1285/**
1286 * of_register_spi_devices() - Register child devices onto the SPI bus
1287 * @master:     Pointer to spi_master device
1288 *
1289 * Registers an spi_device for each child node of master node which has a 'reg'
1290 * property.
1291 */
1292static void of_register_spi_devices(struct spi_master *master)
1293{
1294        struct spi_device *spi;
1295        struct device_node *nc;
1296
1297        if (!master->dev.of_node)
1298                return;
1299
1300        for_each_available_child_of_node(master->dev.of_node, nc) {
1301                spi = of_register_spi_device(master, nc);
1302                if (IS_ERR(spi))
1303                        dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1304                                nc->full_name);
1305        }
1306}
1307#else
1308static void of_register_spi_devices(struct spi_master *master) { }
1309#endif
1310
1311#ifdef CONFIG_ACPI
1312static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1313{
1314        struct spi_device *spi = data;
1315
1316        if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1317                struct acpi_resource_spi_serialbus *sb;
1318
1319                sb = &ares->data.spi_serial_bus;
1320                if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1321                        spi->chip_select = sb->device_selection;
1322                        spi->max_speed_hz = sb->connection_speed;
1323
1324                        if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1325                                spi->mode |= SPI_CPHA;
1326                        if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1327                                spi->mode |= SPI_CPOL;
1328                        if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1329                                spi->mode |= SPI_CS_HIGH;
1330                }
1331        } else if (spi->irq < 0) {
1332                struct resource r;
1333
1334                if (acpi_dev_resource_interrupt(ares, 0, &r))
1335                        spi->irq = r.start;
1336        }
1337
1338        /* Always tell the ACPI core to skip this resource */
1339        return 1;
1340}
1341
1342static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1343                                       void *data, void **return_value)
1344{
1345        struct spi_master *master = data;
1346        struct list_head resource_list;
1347        struct acpi_device *adev;
1348        struct spi_device *spi;
1349        int ret;
1350
1351        if (acpi_bus_get_device(handle, &adev))
1352                return AE_OK;
1353        if (acpi_bus_get_status(adev) || !adev->status.present)
1354                return AE_OK;
1355
1356        spi = spi_alloc_device(master);
1357        if (!spi) {
1358                dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1359                        dev_name(&adev->dev));
1360                return AE_NO_MEMORY;
1361        }
1362
1363        ACPI_COMPANION_SET(&spi->dev, adev);
1364        spi->irq = -1;
1365
1366        INIT_LIST_HEAD(&resource_list);
1367        ret = acpi_dev_get_resources(adev, &resource_list,
1368                                     acpi_spi_add_resource, spi);
1369        acpi_dev_free_resource_list(&resource_list);
1370
1371        if (ret < 0 || !spi->max_speed_hz) {
1372                spi_dev_put(spi);
1373                return AE_OK;
1374        }
1375
1376        adev->power.flags.ignore_parent = true;
1377        strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1378        if (spi_add_device(spi)) {
1379                adev->power.flags.ignore_parent = false;
1380                dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1381                        dev_name(&adev->dev));
1382                spi_dev_put(spi);
1383        }
1384
1385        return AE_OK;
1386}
1387
1388static void acpi_register_spi_devices(struct spi_master *master)
1389{
1390        acpi_status status;
1391        acpi_handle handle;
1392
1393        handle = ACPI_HANDLE(master->dev.parent);
1394        if (!handle)
1395                return;
1396
1397        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1398                                     acpi_spi_add_device, NULL,
1399                                     master, NULL);
1400        if (ACPI_FAILURE(status))
1401                dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1402}
1403#else
1404static inline void acpi_register_spi_devices(struct spi_master *master) {}
1405#endif /* CONFIG_ACPI */
1406
1407static void spi_master_release(struct device *dev)
1408{
1409        struct spi_master *master;
1410
1411        master = container_of(dev, struct spi_master, dev);
1412        kfree(master);
1413}
1414
1415static struct class spi_master_class = {
1416        .name           = "spi_master",
1417        .owner          = THIS_MODULE,
1418        .dev_release    = spi_master_release,
1419};
1420
1421
1422
1423/**
1424 * spi_alloc_master - allocate SPI master controller
1425 * @dev: the controller, possibly using the platform_bus
1426 * @size: how much zeroed driver-private data to allocate; the pointer to this
1427 *      memory is in the driver_data field of the returned device,
1428 *      accessible with spi_master_get_devdata().
1429 * Context: can sleep
1430 *
1431 * This call is used only by SPI master controller drivers, which are the
1432 * only ones directly touching chip registers.  It's how they allocate
1433 * an spi_master structure, prior to calling spi_register_master().
1434 *
1435 * This must be called from context that can sleep.  It returns the SPI
1436 * master structure on success, else NULL.
1437 *
1438 * The caller is responsible for assigning the bus number and initializing
1439 * the master's methods before calling spi_register_master(); and (after errors
1440 * adding the device) calling spi_master_put() and kfree() to prevent a memory
1441 * leak.
1442 */
1443struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1444{
1445        struct spi_master       *master;
1446
1447        if (!dev)
1448                return NULL;
1449
1450        master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1451        if (!master)
1452                return NULL;
1453
1454        device_initialize(&master->dev);
1455        master->bus_num = -1;
1456        master->num_chipselect = 1;
1457        master->dev.class = &spi_master_class;
1458        master->dev.parent = get_device(dev);
1459        spi_master_set_devdata(master, &master[1]);
1460
1461        return master;
1462}
1463EXPORT_SYMBOL_GPL(spi_alloc_master);
1464
1465#ifdef CONFIG_OF
1466static int of_spi_register_master(struct spi_master *master)
1467{
1468        int nb, i, *cs;
1469        struct device_node *np = master->dev.of_node;
1470
1471        if (!np)
1472                return 0;
1473
1474        nb = of_gpio_named_count(np, "cs-gpios");
1475        master->num_chipselect = max_t(int, nb, master->num_chipselect);
1476
1477        /* Return error only for an incorrectly formed cs-gpios property */
1478        if (nb == 0 || nb == -ENOENT)
1479                return 0;
1480        else if (nb < 0)
1481                return nb;
1482
1483        cs = devm_kzalloc(&master->dev,
1484                          sizeof(int) * master->num_chipselect,
1485                          GFP_KERNEL);
1486        master->cs_gpios = cs;
1487
1488        if (!master->cs_gpios)
1489                return -ENOMEM;
1490
1491        for (i = 0; i < master->num_chipselect; i++)
1492                cs[i] = -ENOENT;
1493
1494        for (i = 0; i < nb; i++)
1495                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1496
1497        return 0;
1498}
1499#else
1500static int of_spi_register_master(struct spi_master *master)
1501{
1502        return 0;
1503}
1504#endif
1505
1506/**
1507 * spi_register_master - register SPI master controller
1508 * @master: initialized master, originally from spi_alloc_master()
1509 * Context: can sleep
1510 *
1511 * SPI master controllers connect to their drivers using some non-SPI bus,
1512 * such as the platform bus.  The final stage of probe() in that code
1513 * includes calling spi_register_master() to hook up to this SPI bus glue.
1514 *
1515 * SPI controllers use board specific (often SOC specific) bus numbers,
1516 * and board-specific addressing for SPI devices combines those numbers
1517 * with chip select numbers.  Since SPI does not directly support dynamic
1518 * device identification, boards need configuration tables telling which
1519 * chip is at which address.
1520 *
1521 * This must be called from context that can sleep.  It returns zero on
1522 * success, else a negative error code (dropping the master's refcount).
1523 * After a successful return, the caller is responsible for calling
1524 * spi_unregister_master().
1525 */
1526int spi_register_master(struct spi_master *master)
1527{
1528        static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1529        struct device           *dev = master->dev.parent;
1530        struct boardinfo        *bi;
1531        int                     status = -ENODEV;
1532        int                     dynamic = 0;
1533
1534        if (!dev)
1535                return -ENODEV;
1536
1537        status = of_spi_register_master(master);
1538        if (status)
1539                return status;
1540
1541        /* even if it's just one always-selected device, there must
1542         * be at least one chipselect
1543         */
1544        if (master->num_chipselect == 0)
1545                return -EINVAL;
1546
1547        if ((master->bus_num < 0) && master->dev.of_node)
1548                master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1549
1550        /* convention:  dynamically assigned bus IDs count down from the max */
1551        if (master->bus_num < 0) {
1552                /* FIXME switch to an IDR based scheme, something like
1553                 * I2C now uses, so we can't run out of "dynamic" IDs
1554                 */
1555                master->bus_num = atomic_dec_return(&dyn_bus_id);
1556                dynamic = 1;
1557        }
1558
1559        INIT_LIST_HEAD(&master->queue);
1560        spin_lock_init(&master->queue_lock);
1561        spin_lock_init(&master->bus_lock_spinlock);
1562        mutex_init(&master->bus_lock_mutex);
1563        master->bus_lock_flag = 0;
1564        init_completion(&master->xfer_completion);
1565        if (!master->max_dma_len)
1566                master->max_dma_len = INT_MAX;
1567
1568        /* register the device, then userspace will see it.
1569         * registration fails if the bus ID is in use.
1570         */
1571        dev_set_name(&master->dev, "spi%u", master->bus_num);
1572        status = device_add(&master->dev);
1573        if (status < 0)
1574                goto done;
1575        dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1576                        dynamic ? " (dynamic)" : "");
1577
1578        /* If we're using a queued driver, start the queue */
1579        if (master->transfer)
1580                dev_info(dev, "master is unqueued, this is deprecated\n");
1581        else {
1582                status = spi_master_initialize_queue(master);
1583                if (status) {
1584                        device_del(&master->dev);
1585                        goto done;
1586                }
1587        }
1588
1589        mutex_lock(&board_lock);
1590        list_add_tail(&master->list, &spi_master_list);
1591        list_for_each_entry(bi, &board_list, list)
1592                spi_match_master_to_boardinfo(master, &bi->board_info);
1593        mutex_unlock(&board_lock);
1594
1595        /* Register devices from the device tree and ACPI */
1596        of_register_spi_devices(master);
1597        acpi_register_spi_devices(master);
1598done:
1599        return status;
1600}
1601EXPORT_SYMBOL_GPL(spi_register_master);
1602
1603static void devm_spi_unregister(struct device *dev, void *res)
1604{
1605        spi_unregister_master(*(struct spi_master **)res);
1606}
1607
1608/**
1609 * dev_spi_register_master - register managed SPI master controller
1610 * @dev:    device managing SPI master
1611 * @master: initialized master, originally from spi_alloc_master()
1612 * Context: can sleep
1613 *
1614 * Register a SPI device as with spi_register_master() which will
1615 * automatically be unregister
1616 */
1617int devm_spi_register_master(struct device *dev, struct spi_master *master)
1618{
1619        struct spi_master **ptr;
1620        int ret;
1621
1622        ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1623        if (!ptr)
1624                return -ENOMEM;
1625
1626        ret = spi_register_master(master);
1627        if (!ret) {
1628                *ptr = master;
1629                devres_add(dev, ptr);
1630        } else {
1631                devres_free(ptr);
1632        }
1633
1634        return ret;
1635}
1636EXPORT_SYMBOL_GPL(devm_spi_register_master);
1637
1638static int __unregister(struct device *dev, void *null)
1639{
1640        spi_unregister_device(to_spi_device(dev));
1641        return 0;
1642}
1643
1644/**
1645 * spi_unregister_master - unregister SPI master controller
1646 * @master: the master being unregistered
1647 * Context: can sleep
1648 *
1649 * This call is used only by SPI master controller drivers, which are the
1650 * only ones directly touching chip registers.
1651 *
1652 * This must be called from context that can sleep.
1653 */
1654void spi_unregister_master(struct spi_master *master)
1655{
1656        int dummy;
1657
1658        if (master->queued) {
1659                if (spi_destroy_queue(master))
1660                        dev_err(&master->dev, "queue remove failed\n");
1661        }
1662
1663        mutex_lock(&board_lock);
1664        list_del(&master->list);
1665        mutex_unlock(&board_lock);
1666
1667        dummy = device_for_each_child(&master->dev, NULL, __unregister);
1668        device_unregister(&master->dev);
1669}
1670EXPORT_SYMBOL_GPL(spi_unregister_master);
1671
1672int spi_master_suspend(struct spi_master *master)
1673{
1674        int ret;
1675
1676        /* Basically no-ops for non-queued masters */
1677        if (!master->queued)
1678                return 0;
1679
1680        ret = spi_stop_queue(master);
1681        if (ret)
1682                dev_err(&master->dev, "queue stop failed\n");
1683
1684        return ret;
1685}
1686EXPORT_SYMBOL_GPL(spi_master_suspend);
1687
1688int spi_master_resume(struct spi_master *master)
1689{
1690        int ret;
1691
1692        if (!master->queued)
1693                return 0;
1694
1695        ret = spi_start_queue(master);
1696        if (ret)
1697                dev_err(&master->dev, "queue restart failed\n");
1698
1699        return ret;
1700}
1701EXPORT_SYMBOL_GPL(spi_master_resume);
1702
1703static int __spi_master_match(struct device *dev, const void *data)
1704{
1705        struct spi_master *m;
1706        const u16 *bus_num = data;
1707
1708        m = container_of(dev, struct spi_master, dev);
1709        return m->bus_num == *bus_num;
1710}
1711
1712/**
1713 * spi_busnum_to_master - look up master associated with bus_num
1714 * @bus_num: the master's bus number
1715 * Context: can sleep
1716 *
1717 * This call may be used with devices that are registered after
1718 * arch init time.  It returns a refcounted pointer to the relevant
1719 * spi_master (which the caller must release), or NULL if there is
1720 * no such master registered.
1721 */
1722struct spi_master *spi_busnum_to_master(u16 bus_num)
1723{
1724        struct device           *dev;
1725        struct spi_master       *master = NULL;
1726
1727        dev = class_find_device(&spi_master_class, NULL, &bus_num,
1728                                __spi_master_match);
1729        if (dev)
1730                master = container_of(dev, struct spi_master, dev);
1731        /* reference got in class_find_device */
1732        return master;
1733}
1734EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1735
1736
1737/*-------------------------------------------------------------------------*/
1738
1739/* Core methods for SPI master protocol drivers.  Some of the
1740 * other core methods are currently defined as inline functions.
1741 */
1742
1743/**
1744 * spi_setup - setup SPI mode and clock rate
1745 * @spi: the device whose settings are being modified
1746 * Context: can sleep, and no requests are queued to the device
1747 *
1748 * SPI protocol drivers may need to update the transfer mode if the
1749 * device doesn't work with its default.  They may likewise need
1750 * to update clock rates or word sizes from initial values.  This function
1751 * changes those settings, and must be called from a context that can sleep.
1752 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1753 * effect the next time the device is selected and data is transferred to
1754 * or from it.  When this function returns, the spi device is deselected.
1755 *
1756 * Note that this call will fail if the protocol driver specifies an option
1757 * that the underlying controller or its driver does not support.  For
1758 * example, not all hardware supports wire transfers using nine bit words,
1759 * LSB-first wire encoding, or active-high chipselects.
1760 */
1761int spi_setup(struct spi_device *spi)
1762{
1763        unsigned        bad_bits, ugly_bits;
1764        int             status = 0;
1765
1766        /* check mode to prevent that DUAL and QUAD set at the same time
1767         */
1768        if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
1769                ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
1770                dev_err(&spi->dev,
1771                "setup: can not select dual and quad at the same time\n");
1772                return -EINVAL;
1773        }
1774        /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
1775         */
1776        if ((spi->mode & SPI_3WIRE) && (spi->mode &
1777                (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
1778                return -EINVAL;
1779        /* help drivers fail *cleanly* when they need options
1780         * that aren't supported with their current master
1781         */
1782        bad_bits = spi->mode & ~spi->master->mode_bits;
1783        ugly_bits = bad_bits &
1784                    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1785        if (ugly_bits) {
1786                dev_warn(&spi->dev,
1787                         "setup: ignoring unsupported mode bits %x\n",
1788                         ugly_bits);
1789                spi->mode &= ~ugly_bits;
1790                bad_bits &= ~ugly_bits;
1791        }
1792        if (bad_bits) {
1793                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1794                        bad_bits);
1795                return -EINVAL;
1796        }
1797
1798        if (!spi->bits_per_word)
1799                spi->bits_per_word = 8;
1800
1801        if (!spi->max_speed_hz)
1802                spi->max_speed_hz = spi->master->max_speed_hz;
1803
1804        spi_set_cs(spi, false);
1805
1806        if (spi->master->setup)
1807                status = spi->master->setup(spi);
1808
1809        dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1810                        (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
1811                        (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
1812                        (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
1813                        (spi->mode & SPI_3WIRE) ? "3wire, " : "",
1814                        (spi->mode & SPI_LOOP) ? "loopback, " : "",
1815                        spi->bits_per_word, spi->max_speed_hz,
1816                        status);
1817
1818        return status;
1819}
1820EXPORT_SYMBOL_GPL(spi_setup);
1821
1822static int __spi_validate(struct spi_device *spi, struct spi_message *message)
1823{
1824        struct spi_master *master = spi->master;
1825        struct spi_transfer *xfer;
1826        int w_size;
1827
1828        if (list_empty(&message->transfers))
1829                return -EINVAL;
1830
1831        /* Half-duplex links include original MicroWire, and ones with
1832         * only one data pin like SPI_3WIRE (switches direction) or where
1833         * either MOSI or MISO is missing.  They can also be caused by
1834         * software limitations.
1835         */
1836        if ((master->flags & SPI_MASTER_HALF_DUPLEX)
1837                        || (spi->mode & SPI_3WIRE)) {
1838                unsigned flags = master->flags;
1839
1840                list_for_each_entry(xfer, &message->transfers, transfer_list) {
1841                        if (xfer->rx_buf && xfer->tx_buf)
1842                                return -EINVAL;
1843                        if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
1844                                return -EINVAL;
1845                        if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
1846                                return -EINVAL;
1847                }
1848        }
1849
1850        /**
1851         * Set transfer bits_per_word and max speed as spi device default if
1852         * it is not set for this transfer.
1853         * Set transfer tx_nbits and rx_nbits as single transfer default
1854         * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1855         */
1856        list_for_each_entry(xfer, &message->transfers, transfer_list) {
1857                message->frame_length += xfer->len;
1858                if (!xfer->bits_per_word)
1859                        xfer->bits_per_word = spi->bits_per_word;
1860
1861                if (!xfer->speed_hz)
1862                        xfer->speed_hz = spi->max_speed_hz;
1863
1864                if (master->max_speed_hz &&
1865                    xfer->speed_hz > master->max_speed_hz)
1866                        xfer->speed_hz = master->max_speed_hz;
1867
1868                if (master->bits_per_word_mask) {
1869                        /* Only 32 bits fit in the mask */
1870                        if (xfer->bits_per_word > 32)
1871                                return -EINVAL;
1872                        if (!(master->bits_per_word_mask &
1873                                        BIT(xfer->bits_per_word - 1)))
1874                                return -EINVAL;
1875                }
1876
1877                /*
1878                 * SPI transfer length should be multiple of SPI word size
1879                 * where SPI word size should be power-of-two multiple
1880                 */
1881                if (xfer->bits_per_word <= 8)
1882                        w_size = 1;
1883                else if (xfer->bits_per_word <= 16)
1884                        w_size = 2;
1885                else
1886                        w_size = 4;
1887
1888                /* No partial transfers accepted */
1889                if (xfer->len % w_size)
1890                        return -EINVAL;
1891
1892                if (xfer->speed_hz && master->min_speed_hz &&
1893                    xfer->speed_hz < master->min_speed_hz)
1894                        return -EINVAL;
1895
1896                if (xfer->tx_buf && !xfer->tx_nbits)
1897                        xfer->tx_nbits = SPI_NBITS_SINGLE;
1898                if (xfer->rx_buf && !xfer->rx_nbits)
1899                        xfer->rx_nbits = SPI_NBITS_SINGLE;
1900                /* check transfer tx/rx_nbits:
1901                 * 1. check the value matches one of single, dual and quad
1902                 * 2. check tx/rx_nbits match the mode in spi_device
1903                 */
1904                if (xfer->tx_buf) {
1905                        if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
1906                                xfer->tx_nbits != SPI_NBITS_DUAL &&
1907                                xfer->tx_nbits != SPI_NBITS_QUAD)
1908                                return -EINVAL;
1909                        if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
1910                                !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
1911                                return -EINVAL;
1912                        if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
1913                                !(spi->mode & SPI_TX_QUAD))
1914                                return -EINVAL;
1915                }
1916                /* check transfer rx_nbits */
1917                if (xfer->rx_buf) {
1918                        if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
1919                                xfer->rx_nbits != SPI_NBITS_DUAL &&
1920                                xfer->rx_nbits != SPI_NBITS_QUAD)
1921                                return -EINVAL;
1922                        if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
1923                                !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
1924                                return -EINVAL;
1925                        if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
1926                                !(spi->mode & SPI_RX_QUAD))
1927                                return -EINVAL;
1928                }
1929        }
1930
1931        message->status = -EINPROGRESS;
1932
1933        return 0;
1934}
1935
1936static int __spi_async(struct spi_device *spi, struct spi_message *message)
1937{
1938        struct spi_master *master = spi->master;
1939
1940        message->spi = spi;
1941
1942        trace_spi_message_submit(message);
1943
1944        return master->transfer(spi, message);
1945}
1946
1947/**
1948 * spi_async - asynchronous SPI transfer
1949 * @spi: device with which data will be exchanged
1950 * @message: describes the data transfers, including completion callback
1951 * Context: any (irqs may be blocked, etc)
1952 *
1953 * This call may be used in_irq and other contexts which can't sleep,
1954 * as well as from task contexts which can sleep.
1955 *
1956 * The completion callback is invoked in a context which can't sleep.
1957 * Before that invocation, the value of message->status is undefined.
1958 * When the callback is issued, message->status holds either zero (to
1959 * indicate complete success) or a negative error code.  After that
1960 * callback returns, the driver which issued the transfer request may
1961 * deallocate the associated memory; it's no longer in use by any SPI
1962 * core or controller driver code.
1963 *
1964 * Note that although all messages to a spi_device are handled in
1965 * FIFO order, messages may go to different devices in other orders.
1966 * Some device might be higher priority, or have various "hard" access
1967 * time requirements, for example.
1968 *
1969 * On detection of any fault during the transfer, processing of
1970 * the entire message is aborted, and the device is deselected.
1971 * Until returning from the associated message completion callback,
1972 * no other spi_message queued to that device will be processed.
1973 * (This rule applies equally to all the synchronous transfer calls,
1974 * which are wrappers around this core asynchronous primitive.)
1975 */
1976int spi_async(struct spi_device *spi, struct spi_message *message)
1977{
1978        struct spi_master *master = spi->master;
1979        int ret;
1980        unsigned long flags;
1981
1982        ret = __spi_validate(spi, message);
1983        if (ret != 0)
1984                return ret;
1985
1986        spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1987
1988        if (master->bus_lock_flag)
1989                ret = -EBUSY;
1990        else
1991                ret = __spi_async(spi, message);
1992
1993        spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1994
1995        return ret;
1996}
1997EXPORT_SYMBOL_GPL(spi_async);
1998
1999/**
2000 * spi_async_locked - version of spi_async with exclusive bus usage
2001 * @spi: device with which data will be exchanged
2002 * @message: describes the data transfers, including completion callback
2003 * Context: any (irqs may be blocked, etc)
2004 *
2005 * This call may be used in_irq and other contexts which can't sleep,
2006 * as well as from task contexts which can sleep.
2007 *
2008 * The completion callback is invoked in a context which can't sleep.
2009 * Before that invocation, the value of message->status is undefined.
2010 * When the callback is issued, message->status holds either zero (to
2011 * indicate complete success) or a negative error code.  After that
2012 * callback returns, the driver which issued the transfer request may
2013 * deallocate the associated memory; it's no longer in use by any SPI
2014 * core or controller driver code.
2015 *
2016 * Note that although all messages to a spi_device are handled in
2017 * FIFO order, messages may go to different devices in other orders.
2018 * Some device might be higher priority, or have various "hard" access
2019 * time requirements, for example.
2020 *
2021 * On detection of any fault during the transfer, processing of
2022 * the entire message is aborted, and the device is deselected.
2023 * Until returning from the associated message completion callback,
2024 * no other spi_message queued to that device will be processed.
2025 * (This rule applies equally to all the synchronous transfer calls,
2026 * which are wrappers around this core asynchronous primitive.)
2027 */
2028int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2029{
2030        struct spi_master *master = spi->master;
2031        int ret;
2032        unsigned long flags;
2033
2034        ret = __spi_validate(spi, message);
2035        if (ret != 0)
2036                return ret;
2037
2038        spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2039
2040        ret = __spi_async(spi, message);
2041
2042        spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2043
2044        return ret;
2045
2046}
2047EXPORT_SYMBOL_GPL(spi_async_locked);
2048
2049
2050/*-------------------------------------------------------------------------*/
2051
2052/* Utility methods for SPI master protocol drivers, layered on
2053 * top of the core.  Some other utility methods are defined as
2054 * inline functions.
2055 */
2056
2057static void spi_complete(void *arg)
2058{
2059        complete(arg);
2060}
2061
2062static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2063                      int bus_locked)
2064{
2065        DECLARE_COMPLETION_ONSTACK(done);
2066        int status;
2067        struct spi_master *master = spi->master;
2068        unsigned long flags;
2069
2070        status = __spi_validate(spi, message);
2071        if (status != 0)
2072                return status;
2073
2074        message->complete = spi_complete;
2075        message->context = &done;
2076        message->spi = spi;
2077
2078        if (!bus_locked)
2079                mutex_lock(&master->bus_lock_mutex);
2080
2081        /* If we're not using the legacy transfer method then we will
2082         * try to transfer in the calling context so special case.
2083         * This code would be less tricky if we could remove the
2084         * support for driver implemented message queues.
2085         */
2086        if (master->transfer == spi_queued_transfer) {
2087                spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2088
2089                trace_spi_message_submit(message);
2090
2091                status = __spi_queued_transfer(spi, message, false);
2092
2093                spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2094        } else {
2095                status = spi_async_locked(spi, message);
2096        }
2097
2098        if (!bus_locked)
2099                mutex_unlock(&master->bus_lock_mutex);
2100
2101        if (status == 0) {
2102                /* Push out the messages in the calling context if we
2103                 * can.
2104                 */
2105                if (master->transfer == spi_queued_transfer)
2106                        __spi_pump_messages(master, false);
2107
2108                wait_for_completion(&done);
2109                status = message->status;
2110        }
2111        message->context = NULL;
2112        return status;
2113}
2114
2115/**
2116 * spi_sync - blocking/synchronous SPI data transfers
2117 * @spi: device with which data will be exchanged
2118 * @message: describes the data transfers
2119 * Context: can sleep
2120 *
2121 * This call may only be used from a context that may sleep.  The sleep
2122 * is non-interruptible, and has no timeout.  Low-overhead controller
2123 * drivers may DMA directly into and out of the message buffers.
2124 *
2125 * Note that the SPI device's chip select is active during the message,
2126 * and then is normally disabled between messages.  Drivers for some
2127 * frequently-used devices may want to minimize costs of selecting a chip,
2128 * by leaving it selected in anticipation that the next message will go
2129 * to the same chip.  (That may increase power usage.)
2130 *
2131 * Also, the caller is guaranteeing that the memory associated with the
2132 * message will not be freed before this call returns.
2133 *
2134 * It returns zero on success, else a negative error code.
2135 */
2136int spi_sync(struct spi_device *spi, struct spi_message *message)
2137{
2138        return __spi_sync(spi, message, 0);
2139}
2140EXPORT_SYMBOL_GPL(spi_sync);
2141
2142/**
2143 * spi_sync_locked - version of spi_sync with exclusive bus usage
2144 * @spi: device with which data will be exchanged
2145 * @message: describes the data transfers
2146 * Context: can sleep
2147 *
2148 * This call may only be used from a context that may sleep.  The sleep
2149 * is non-interruptible, and has no timeout.  Low-overhead controller
2150 * drivers may DMA directly into and out of the message buffers.
2151 *
2152 * This call should be used by drivers that require exclusive access to the
2153 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2154 * be released by a spi_bus_unlock call when the exclusive access is over.
2155 *
2156 * It returns zero on success, else a negative error code.
2157 */
2158int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2159{
2160        return __spi_sync(spi, message, 1);
2161}
2162EXPORT_SYMBOL_GPL(spi_sync_locked);
2163
2164/**
2165 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2166 * @master: SPI bus master that should be locked for exclusive bus access
2167 * Context: can sleep
2168 *
2169 * This call may only be used from a context that may sleep.  The sleep
2170 * is non-interruptible, and has no timeout.
2171 *
2172 * This call should be used by drivers that require exclusive access to the
2173 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2174 * exclusive access is over. Data transfer must be done by spi_sync_locked
2175 * and spi_async_locked calls when the SPI bus lock is held.
2176 *
2177 * It returns zero on success, else a negative error code.
2178 */
2179int spi_bus_lock(struct spi_master *master)
2180{
2181        unsigned long flags;
2182
2183        mutex_lock(&master->bus_lock_mutex);
2184
2185        spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2186        master->bus_lock_flag = 1;
2187        spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2188
2189        /* mutex remains locked until spi_bus_unlock is called */
2190
2191        return 0;
2192}
2193EXPORT_SYMBOL_GPL(spi_bus_lock);
2194
2195/**
2196 * spi_bus_unlock - release the lock for exclusive SPI bus usage
2197 * @master: SPI bus master that was locked for exclusive bus access
2198 * Context: can sleep
2199 *
2200 * This call may only be used from a context that may sleep.  The sleep
2201 * is non-interruptible, and has no timeout.
2202 *
2203 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2204 * call.
2205 *
2206 * It returns zero on success, else a negative error code.
2207 */
2208int spi_bus_unlock(struct spi_master *master)
2209{
2210        master->bus_lock_flag = 0;
2211
2212        mutex_unlock(&master->bus_lock_mutex);
2213
2214        return 0;
2215}
2216EXPORT_SYMBOL_GPL(spi_bus_unlock);
2217
2218/* portable code must never pass more than 32 bytes */
2219#define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2220
2221static u8       *buf;
2222
2223/**
2224 * spi_write_then_read - SPI synchronous write followed by read
2225 * @spi: device with which data will be exchanged
2226 * @txbuf: data to be written (need not be dma-safe)
2227 * @n_tx: size of txbuf, in bytes
2228 * @rxbuf: buffer into which data will be read (need not be dma-safe)
2229 * @n_rx: size of rxbuf, in bytes
2230 * Context: can sleep
2231 *
2232 * This performs a half duplex MicroWire style transaction with the
2233 * device, sending txbuf and then reading rxbuf.  The return value
2234 * is zero for success, else a negative errno status code.
2235 * This call may only be used from a context that may sleep.
2236 *
2237 * Parameters to this routine are always copied using a small buffer;
2238 * portable code should never use this for more than 32 bytes.
2239 * Performance-sensitive or bulk transfer code should instead use
2240 * spi_{async,sync}() calls with dma-safe buffers.
2241 */
2242int spi_write_then_read(struct spi_device *spi,
2243                const void *txbuf, unsigned n_tx,
2244                void *rxbuf, unsigned n_rx)
2245{
2246        static DEFINE_MUTEX(lock);
2247
2248        int                     status;
2249        struct spi_message      message;
2250        struct spi_transfer     x[2];
2251        u8                      *local_buf;
2252
2253        /* Use preallocated DMA-safe buffer if we can.  We can't avoid
2254         * copying here, (as a pure convenience thing), but we can
2255         * keep heap costs out of the hot path unless someone else is
2256         * using the pre-allocated buffer or the transfer is too large.
2257         */
2258        if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2259                local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2260                                    GFP_KERNEL | GFP_DMA);
2261                if (!local_buf)
2262                        return -ENOMEM;
2263        } else {
2264                local_buf = buf;
2265        }
2266
2267        spi_message_init(&message);
2268        memset(x, 0, sizeof(x));
2269        if (n_tx) {
2270                x[0].len = n_tx;
2271                spi_message_add_tail(&x[0], &message);
2272        }
2273        if (n_rx) {
2274                x[1].len = n_rx;
2275                spi_message_add_tail(&x[1], &message);
2276        }
2277
2278        memcpy(local_buf, txbuf, n_tx);
2279        x[0].tx_buf = local_buf;
2280        x[1].rx_buf = local_buf + n_tx;
2281
2282        /* do the i/o */
2283        status = spi_sync(spi, &message);
2284        if (status == 0)
2285                memcpy(rxbuf, x[1].rx_buf, n_rx);
2286
2287        if (x[0].tx_buf == buf)
2288                mutex_unlock(&lock);
2289        else
2290                kfree(local_buf);
2291
2292        return status;
2293}
2294EXPORT_SYMBOL_GPL(spi_write_then_read);
2295
2296/*-------------------------------------------------------------------------*/
2297
2298#if IS_ENABLED(CONFIG_OF_DYNAMIC)
2299static int __spi_of_device_match(struct device *dev, void *data)
2300{
2301        return dev->of_node == data;
2302}
2303
2304/* must call put_device() when done with returned spi_device device */
2305static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
2306{
2307        struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
2308                                                __spi_of_device_match);
2309        return dev ? to_spi_device(dev) : NULL;
2310}
2311
2312static int __spi_of_master_match(struct device *dev, const void *data)
2313{
2314        return dev->of_node == data;
2315}
2316
2317/* the spi masters are not using spi_bus, so we find it with another way */
2318static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
2319{
2320        struct device *dev;
2321
2322        dev = class_find_device(&spi_master_class, NULL, node,
2323                                __spi_of_master_match);
2324        if (!dev)
2325                return NULL;
2326
2327        /* reference got in class_find_device */
2328        return container_of(dev, struct spi_master, dev);
2329}
2330
2331static int of_spi_notify(struct notifier_block *nb, unsigned long action,
2332                         void *arg)
2333{
2334        struct of_reconfig_data *rd = arg;
2335        struct spi_master *master;
2336        struct spi_device *spi;
2337
2338        switch (of_reconfig_get_state_change(action, arg)) {
2339        case OF_RECONFIG_CHANGE_ADD:
2340                master = of_find_spi_master_by_node(rd->dn->parent);
2341                if (master == NULL)
2342                        return NOTIFY_OK;       /* not for us */
2343
2344                spi = of_register_spi_device(master, rd->dn);
2345                put_device(&master->dev);
2346
2347                if (IS_ERR(spi)) {
2348                        pr_err("%s: failed to create for '%s'\n",
2349                                        __func__, rd->dn->full_name);
2350                        return notifier_from_errno(PTR_ERR(spi));
2351                }
2352                break;
2353
2354        case OF_RECONFIG_CHANGE_REMOVE:
2355                /* find our device by node */
2356                spi = of_find_spi_device_by_node(rd->dn);
2357                if (spi == NULL)
2358                        return NOTIFY_OK;       /* no? not meant for us */
2359
2360                /* unregister takes one ref away */
2361                spi_unregister_device(spi);
2362
2363                /* and put the reference of the find */
2364                put_device(&spi->dev);
2365                break;
2366        }
2367
2368        return NOTIFY_OK;
2369}
2370
2371static struct notifier_block spi_of_notifier = {
2372        .notifier_call = of_spi_notify,
2373};
2374#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2375extern struct notifier_block spi_of_notifier;
2376#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2377
2378static int __init spi_init(void)
2379{
2380        int     status;
2381
2382        buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2383        if (!buf) {
2384                status = -ENOMEM;
2385                goto err0;
2386        }
2387
2388        status = bus_register(&spi_bus_type);
2389        if (status < 0)
2390                goto err1;
2391
2392        status = class_register(&spi_master_class);
2393        if (status < 0)
2394                goto err2;
2395
2396        if (IS_ENABLED(CONFIG_OF_DYNAMIC))
2397                WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
2398
2399        return 0;
2400
2401err2:
2402        bus_unregister(&spi_bus_type);
2403err1:
2404        kfree(buf);
2405        buf = NULL;
2406err0:
2407        return status;
2408}
2409
2410/* board_info is normally registered in arch_initcall(),
2411 * but even essential drivers wait till later
2412 *
2413 * REVISIT only boardinfo really needs static linking. the rest (device and
2414 * driver registration) _could_ be dynamically linked (modular) ... costs
2415 * include needing to have boardinfo data structures be much more public.
2416 */
2417postcore_initcall(spi_init);
2418
2419