linux/drivers/spi/spidev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Simple synchronous userspace interface to SPI devices
   4 *
   5 * Copyright (C) 2006 SWAPP
   6 *      Andrea Paterniani <a.paterniani@swapp-eng.it>
   7 * Copyright (C) 2007 David Brownell (simplification, cleanup)
   8 */
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/ioctl.h>
  13#include <linux/fs.h>
  14#include <linux/device.h>
  15#include <linux/err.h>
  16#include <linux/list.h>
  17#include <linux/errno.h>
  18#include <linux/mutex.h>
  19#include <linux/slab.h>
  20#include <linux/compat.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/acpi.h>
  24
  25#include <linux/spi/spi.h>
  26#include <linux/spi/spidev.h>
  27
  28#include <linux/uaccess.h>
  29
  30
  31/*
  32 * This supports access to SPI devices using normal userspace I/O calls.
  33 * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  34 * and often mask message boundaries, full SPI support requires full duplex
  35 * transfers.  There are several kinds of internal message boundaries to
  36 * handle chipselect management and other protocol options.
  37 *
  38 * SPI has a character major number assigned.  We allocate minor numbers
  39 * dynamically using a bitmask.  You must use hotplug tools, such as udev
  40 * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
  41 * nodes, since there is no fixed association of minor numbers with any
  42 * particular SPI bus or device.
  43 */
  44#define SPIDEV_MAJOR                    153     /* assigned */
  45#define N_SPI_MINORS                    32      /* ... up to 256 */
  46
  47static DECLARE_BITMAP(minors, N_SPI_MINORS);
  48
  49
  50/* Bit masks for spi_device.mode management.  Note that incorrect
  51 * settings for some settings can cause *lots* of trouble for other
  52 * devices on a shared bus:
  53 *
  54 *  - CS_HIGH ... this device will be active when it shouldn't be
  55 *  - 3WIRE ... when active, it won't behave as it should
  56 *  - NO_CS ... there will be no explicit message boundaries; this
  57 *      is completely incompatible with the shared bus model
  58 *  - READY ... transfers may proceed when they shouldn't.
  59 *
  60 * REVISIT should changing those flags be privileged?
  61 */
  62#define SPI_MODE_MASK           (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
  63                                | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
  64                                | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
  65                                | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
  66                                | SPI_RX_QUAD | SPI_RX_OCTAL)
  67
  68struct spidev_data {
  69        dev_t                   devt;
  70        spinlock_t              spi_lock;
  71        struct spi_device       *spi;
  72        struct list_head        device_entry;
  73
  74        /* TX/RX buffers are NULL unless this device is open (users > 0) */
  75        struct mutex            buf_lock;
  76        unsigned                users;
  77        u8                      *tx_buffer;
  78        u8                      *rx_buffer;
  79        u32                     speed_hz;
  80};
  81
  82static LIST_HEAD(device_list);
  83static DEFINE_MUTEX(device_list_lock);
  84
  85static unsigned bufsiz = 4096;
  86module_param(bufsiz, uint, S_IRUGO);
  87MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
  88
  89/*-------------------------------------------------------------------------*/
  90
  91static ssize_t
  92spidev_sync(struct spidev_data *spidev, struct spi_message *message)
  93{
  94        int status;
  95        struct spi_device *spi;
  96
  97        spin_lock_irq(&spidev->spi_lock);
  98        spi = spidev->spi;
  99        spin_unlock_irq(&spidev->spi_lock);
 100
 101        if (spi == NULL)
 102                status = -ESHUTDOWN;
 103        else
 104                status = spi_sync(spi, message);
 105
 106        if (status == 0)
 107                status = message->actual_length;
 108
 109        return status;
 110}
 111
 112static inline ssize_t
 113spidev_sync_write(struct spidev_data *spidev, size_t len)
 114{
 115        struct spi_transfer     t = {
 116                        .tx_buf         = spidev->tx_buffer,
 117                        .len            = len,
 118                        .speed_hz       = spidev->speed_hz,
 119                };
 120        struct spi_message      m;
 121
 122        spi_message_init(&m);
 123        spi_message_add_tail(&t, &m);
 124        return spidev_sync(spidev, &m);
 125}
 126
 127static inline ssize_t
 128spidev_sync_read(struct spidev_data *spidev, size_t len)
 129{
 130        struct spi_transfer     t = {
 131                        .rx_buf         = spidev->rx_buffer,
 132                        .len            = len,
 133                        .speed_hz       = spidev->speed_hz,
 134                };
 135        struct spi_message      m;
 136
 137        spi_message_init(&m);
 138        spi_message_add_tail(&t, &m);
 139        return spidev_sync(spidev, &m);
 140}
 141
 142/*-------------------------------------------------------------------------*/
 143
 144/* Read-only message with current device setup */
 145static ssize_t
 146spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
 147{
 148        struct spidev_data      *spidev;
 149        ssize_t                 status;
 150
 151        /* chipselect only toggles at start or end of operation */
 152        if (count > bufsiz)
 153                return -EMSGSIZE;
 154
 155        spidev = filp->private_data;
 156
 157        mutex_lock(&spidev->buf_lock);
 158        status = spidev_sync_read(spidev, count);
 159        if (status > 0) {
 160                unsigned long   missing;
 161
 162                missing = copy_to_user(buf, spidev->rx_buffer, status);
 163                if (missing == status)
 164                        status = -EFAULT;
 165                else
 166                        status = status - missing;
 167        }
 168        mutex_unlock(&spidev->buf_lock);
 169
 170        return status;
 171}
 172
 173/* Write-only message with current device setup */
 174static ssize_t
 175spidev_write(struct file *filp, const char __user *buf,
 176                size_t count, loff_t *f_pos)
 177{
 178        struct spidev_data      *spidev;
 179        ssize_t                 status;
 180        unsigned long           missing;
 181
 182        /* chipselect only toggles at start or end of operation */
 183        if (count > bufsiz)
 184                return -EMSGSIZE;
 185
 186        spidev = filp->private_data;
 187
 188        mutex_lock(&spidev->buf_lock);
 189        missing = copy_from_user(spidev->tx_buffer, buf, count);
 190        if (missing == 0)
 191                status = spidev_sync_write(spidev, count);
 192        else
 193                status = -EFAULT;
 194        mutex_unlock(&spidev->buf_lock);
 195
 196        return status;
 197}
 198
 199static int spidev_message(struct spidev_data *spidev,
 200                struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
 201{
 202        struct spi_message      msg;
 203        struct spi_transfer     *k_xfers;
 204        struct spi_transfer     *k_tmp;
 205        struct spi_ioc_transfer *u_tmp;
 206        unsigned                n, total, tx_total, rx_total;
 207        u8                      *tx_buf, *rx_buf;
 208        int                     status = -EFAULT;
 209
 210        spi_message_init(&msg);
 211        k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
 212        if (k_xfers == NULL)
 213                return -ENOMEM;
 214
 215        /* Construct spi_message, copying any tx data to bounce buffer.
 216         * We walk the array of user-provided transfers, using each one
 217         * to initialize a kernel version of the same transfer.
 218         */
 219        tx_buf = spidev->tx_buffer;
 220        rx_buf = spidev->rx_buffer;
 221        total = 0;
 222        tx_total = 0;
 223        rx_total = 0;
 224        for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
 225                        n;
 226                        n--, k_tmp++, u_tmp++) {
 227                /* Ensure that also following allocations from rx_buf/tx_buf will meet
 228                 * DMA alignment requirements.
 229                 */
 230                unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
 231
 232                k_tmp->len = u_tmp->len;
 233
 234                total += k_tmp->len;
 235                /* Since the function returns the total length of transfers
 236                 * on success, restrict the total to positive int values to
 237                 * avoid the return value looking like an error.  Also check
 238                 * each transfer length to avoid arithmetic overflow.
 239                 */
 240                if (total > INT_MAX || k_tmp->len > INT_MAX) {
 241                        status = -EMSGSIZE;
 242                        goto done;
 243                }
 244
 245                if (u_tmp->rx_buf) {
 246                        /* this transfer needs space in RX bounce buffer */
 247                        rx_total += len_aligned;
 248                        if (rx_total > bufsiz) {
 249                                status = -EMSGSIZE;
 250                                goto done;
 251                        }
 252                        k_tmp->rx_buf = rx_buf;
 253                        rx_buf += len_aligned;
 254                }
 255                if (u_tmp->tx_buf) {
 256                        /* this transfer needs space in TX bounce buffer */
 257                        tx_total += len_aligned;
 258                        if (tx_total > bufsiz) {
 259                                status = -EMSGSIZE;
 260                                goto done;
 261                        }
 262                        k_tmp->tx_buf = tx_buf;
 263                        if (copy_from_user(tx_buf, (const u8 __user *)
 264                                                (uintptr_t) u_tmp->tx_buf,
 265                                        u_tmp->len))
 266                                goto done;
 267                        tx_buf += len_aligned;
 268                }
 269
 270                k_tmp->cs_change = !!u_tmp->cs_change;
 271                k_tmp->tx_nbits = u_tmp->tx_nbits;
 272                k_tmp->rx_nbits = u_tmp->rx_nbits;
 273                k_tmp->bits_per_word = u_tmp->bits_per_word;
 274                k_tmp->delay.value = u_tmp->delay_usecs;
 275                k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
 276                k_tmp->speed_hz = u_tmp->speed_hz;
 277                k_tmp->word_delay.value = u_tmp->word_delay_usecs;
 278                k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
 279                if (!k_tmp->speed_hz)
 280                        k_tmp->speed_hz = spidev->speed_hz;
 281#ifdef VERBOSE
 282                dev_dbg(&spidev->spi->dev,
 283                        "  xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
 284                        k_tmp->len,
 285                        k_tmp->rx_buf ? "rx " : "",
 286                        k_tmp->tx_buf ? "tx " : "",
 287                        k_tmp->cs_change ? "cs " : "",
 288                        k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
 289                        k_tmp->delay.value,
 290                        k_tmp->word_delay.value,
 291                        k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
 292#endif
 293                spi_message_add_tail(k_tmp, &msg);
 294        }
 295
 296        status = spidev_sync(spidev, &msg);
 297        if (status < 0)
 298                goto done;
 299
 300        /* copy any rx data out of bounce buffer */
 301        for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
 302                        n;
 303                        n--, k_tmp++, u_tmp++) {
 304                if (u_tmp->rx_buf) {
 305                        if (copy_to_user((u8 __user *)
 306                                        (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
 307                                        u_tmp->len)) {
 308                                status = -EFAULT;
 309                                goto done;
 310                        }
 311                }
 312        }
 313        status = total;
 314
 315done:
 316        kfree(k_xfers);
 317        return status;
 318}
 319
 320static struct spi_ioc_transfer *
 321spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
 322                unsigned *n_ioc)
 323{
 324        u32     tmp;
 325
 326        /* Check type, command number and direction */
 327        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
 328                        || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
 329                        || _IOC_DIR(cmd) != _IOC_WRITE)
 330                return ERR_PTR(-ENOTTY);
 331
 332        tmp = _IOC_SIZE(cmd);
 333        if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
 334                return ERR_PTR(-EINVAL);
 335        *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
 336        if (*n_ioc == 0)
 337                return NULL;
 338
 339        /* copy into scratch area */
 340        return memdup_user(u_ioc, tmp);
 341}
 342
 343static long
 344spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 345{
 346        int                     retval = 0;
 347        struct spidev_data      *spidev;
 348        struct spi_device       *spi;
 349        u32                     tmp;
 350        unsigned                n_ioc;
 351        struct spi_ioc_transfer *ioc;
 352
 353        /* Check type and command number */
 354        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
 355                return -ENOTTY;
 356
 357        /* guard against device removal before, or while,
 358         * we issue this ioctl.
 359         */
 360        spidev = filp->private_data;
 361        spin_lock_irq(&spidev->spi_lock);
 362        spi = spi_dev_get(spidev->spi);
 363        spin_unlock_irq(&spidev->spi_lock);
 364
 365        if (spi == NULL)
 366                return -ESHUTDOWN;
 367
 368        /* use the buffer lock here for triple duty:
 369         *  - prevent I/O (from us) so calling spi_setup() is safe;
 370         *  - prevent concurrent SPI_IOC_WR_* from morphing
 371         *    data fields while SPI_IOC_RD_* reads them;
 372         *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
 373         */
 374        mutex_lock(&spidev->buf_lock);
 375
 376        switch (cmd) {
 377        /* read requests */
 378        case SPI_IOC_RD_MODE:
 379                retval = put_user(spi->mode & SPI_MODE_MASK,
 380                                        (__u8 __user *)arg);
 381                break;
 382        case SPI_IOC_RD_MODE32:
 383                retval = put_user(spi->mode & SPI_MODE_MASK,
 384                                        (__u32 __user *)arg);
 385                break;
 386        case SPI_IOC_RD_LSB_FIRST:
 387                retval = put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
 388                                        (__u8 __user *)arg);
 389                break;
 390        case SPI_IOC_RD_BITS_PER_WORD:
 391                retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
 392                break;
 393        case SPI_IOC_RD_MAX_SPEED_HZ:
 394                retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
 395                break;
 396
 397        /* write requests */
 398        case SPI_IOC_WR_MODE:
 399        case SPI_IOC_WR_MODE32:
 400                if (cmd == SPI_IOC_WR_MODE)
 401                        retval = get_user(tmp, (u8 __user *)arg);
 402                else
 403                        retval = get_user(tmp, (u32 __user *)arg);
 404                if (retval == 0) {
 405                        struct spi_controller *ctlr = spi->controller;
 406                        u32     save = spi->mode;
 407
 408                        if (tmp & ~SPI_MODE_MASK) {
 409                                retval = -EINVAL;
 410                                break;
 411                        }
 412
 413                        if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
 414                            ctlr->cs_gpiods[spi->chip_select])
 415                                tmp |= SPI_CS_HIGH;
 416
 417                        tmp |= spi->mode & ~SPI_MODE_MASK;
 418                        spi->mode = (u16)tmp;
 419                        retval = spi_setup(spi);
 420                        if (retval < 0)
 421                                spi->mode = save;
 422                        else
 423                                dev_dbg(&spi->dev, "spi mode %x\n", tmp);
 424                }
 425                break;
 426        case SPI_IOC_WR_LSB_FIRST:
 427                retval = get_user(tmp, (__u8 __user *)arg);
 428                if (retval == 0) {
 429                        u32     save = spi->mode;
 430
 431                        if (tmp)
 432                                spi->mode |= SPI_LSB_FIRST;
 433                        else
 434                                spi->mode &= ~SPI_LSB_FIRST;
 435                        retval = spi_setup(spi);
 436                        if (retval < 0)
 437                                spi->mode = save;
 438                        else
 439                                dev_dbg(&spi->dev, "%csb first\n",
 440                                                tmp ? 'l' : 'm');
 441                }
 442                break;
 443        case SPI_IOC_WR_BITS_PER_WORD:
 444                retval = get_user(tmp, (__u8 __user *)arg);
 445                if (retval == 0) {
 446                        u8      save = spi->bits_per_word;
 447
 448                        spi->bits_per_word = tmp;
 449                        retval = spi_setup(spi);
 450                        if (retval < 0)
 451                                spi->bits_per_word = save;
 452                        else
 453                                dev_dbg(&spi->dev, "%d bits per word\n", tmp);
 454                }
 455                break;
 456        case SPI_IOC_WR_MAX_SPEED_HZ:
 457                retval = get_user(tmp, (__u32 __user *)arg);
 458                if (retval == 0) {
 459                        u32     save = spi->max_speed_hz;
 460
 461                        spi->max_speed_hz = tmp;
 462                        retval = spi_setup(spi);
 463                        if (retval == 0) {
 464                                spidev->speed_hz = tmp;
 465                                dev_dbg(&spi->dev, "%d Hz (max)\n",
 466                                        spidev->speed_hz);
 467                        }
 468                        spi->max_speed_hz = save;
 469                }
 470                break;
 471
 472        default:
 473                /* segmented and/or full-duplex I/O request */
 474                /* Check message and copy into scratch area */
 475                ioc = spidev_get_ioc_message(cmd,
 476                                (struct spi_ioc_transfer __user *)arg, &n_ioc);
 477                if (IS_ERR(ioc)) {
 478                        retval = PTR_ERR(ioc);
 479                        break;
 480                }
 481                if (!ioc)
 482                        break;  /* n_ioc is also 0 */
 483
 484                /* translate to spi_message, execute */
 485                retval = spidev_message(spidev, ioc, n_ioc);
 486                kfree(ioc);
 487                break;
 488        }
 489
 490        mutex_unlock(&spidev->buf_lock);
 491        spi_dev_put(spi);
 492        return retval;
 493}
 494
 495#ifdef CONFIG_COMPAT
 496static long
 497spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
 498                unsigned long arg)
 499{
 500        struct spi_ioc_transfer __user  *u_ioc;
 501        int                             retval = 0;
 502        struct spidev_data              *spidev;
 503        struct spi_device               *spi;
 504        unsigned                        n_ioc, n;
 505        struct spi_ioc_transfer         *ioc;
 506
 507        u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
 508
 509        /* guard against device removal before, or while,
 510         * we issue this ioctl.
 511         */
 512        spidev = filp->private_data;
 513        spin_lock_irq(&spidev->spi_lock);
 514        spi = spi_dev_get(spidev->spi);
 515        spin_unlock_irq(&spidev->spi_lock);
 516
 517        if (spi == NULL)
 518                return -ESHUTDOWN;
 519
 520        /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
 521        mutex_lock(&spidev->buf_lock);
 522
 523        /* Check message and copy into scratch area */
 524        ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
 525        if (IS_ERR(ioc)) {
 526                retval = PTR_ERR(ioc);
 527                goto done;
 528        }
 529        if (!ioc)
 530                goto done;      /* n_ioc is also 0 */
 531
 532        /* Convert buffer pointers */
 533        for (n = 0; n < n_ioc; n++) {
 534                ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
 535                ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
 536        }
 537
 538        /* translate to spi_message, execute */
 539        retval = spidev_message(spidev, ioc, n_ioc);
 540        kfree(ioc);
 541
 542done:
 543        mutex_unlock(&spidev->buf_lock);
 544        spi_dev_put(spi);
 545        return retval;
 546}
 547
 548static long
 549spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 550{
 551        if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
 552                        && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
 553                        && _IOC_DIR(cmd) == _IOC_WRITE)
 554                return spidev_compat_ioc_message(filp, cmd, arg);
 555
 556        return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
 557}
 558#else
 559#define spidev_compat_ioctl NULL
 560#endif /* CONFIG_COMPAT */
 561
 562static int spidev_open(struct inode *inode, struct file *filp)
 563{
 564        struct spidev_data      *spidev;
 565        int                     status = -ENXIO;
 566
 567        mutex_lock(&device_list_lock);
 568
 569        list_for_each_entry(spidev, &device_list, device_entry) {
 570                if (spidev->devt == inode->i_rdev) {
 571                        status = 0;
 572                        break;
 573                }
 574        }
 575
 576        if (status) {
 577                pr_debug("spidev: nothing for minor %d\n", iminor(inode));
 578                goto err_find_dev;
 579        }
 580
 581        if (!spidev->tx_buffer) {
 582                spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 583                if (!spidev->tx_buffer) {
 584                        dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 585                        status = -ENOMEM;
 586                        goto err_find_dev;
 587                }
 588        }
 589
 590        if (!spidev->rx_buffer) {
 591                spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 592                if (!spidev->rx_buffer) {
 593                        dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 594                        status = -ENOMEM;
 595                        goto err_alloc_rx_buf;
 596                }
 597        }
 598
 599        spidev->users++;
 600        filp->private_data = spidev;
 601        stream_open(inode, filp);
 602
 603        mutex_unlock(&device_list_lock);
 604        return 0;
 605
 606err_alloc_rx_buf:
 607        kfree(spidev->tx_buffer);
 608        spidev->tx_buffer = NULL;
 609err_find_dev:
 610        mutex_unlock(&device_list_lock);
 611        return status;
 612}
 613
 614static int spidev_release(struct inode *inode, struct file *filp)
 615{
 616        struct spidev_data      *spidev;
 617        int                     dofree;
 618
 619        mutex_lock(&device_list_lock);
 620        spidev = filp->private_data;
 621        filp->private_data = NULL;
 622
 623        spin_lock_irq(&spidev->spi_lock);
 624        /* ... after we unbound from the underlying device? */
 625        dofree = (spidev->spi == NULL);
 626        spin_unlock_irq(&spidev->spi_lock);
 627
 628        /* last close? */
 629        spidev->users--;
 630        if (!spidev->users) {
 631
 632                kfree(spidev->tx_buffer);
 633                spidev->tx_buffer = NULL;
 634
 635                kfree(spidev->rx_buffer);
 636                spidev->rx_buffer = NULL;
 637
 638                if (dofree)
 639                        kfree(spidev);
 640                else
 641                        spidev->speed_hz = spidev->spi->max_speed_hz;
 642        }
 643#ifdef CONFIG_SPI_SLAVE
 644        if (!dofree)
 645                spi_slave_abort(spidev->spi);
 646#endif
 647        mutex_unlock(&device_list_lock);
 648
 649        return 0;
 650}
 651
 652static const struct file_operations spidev_fops = {
 653        .owner =        THIS_MODULE,
 654        /* REVISIT switch to aio primitives, so that userspace
 655         * gets more complete API coverage.  It'll simplify things
 656         * too, except for the locking.
 657         */
 658        .write =        spidev_write,
 659        .read =         spidev_read,
 660        .unlocked_ioctl = spidev_ioctl,
 661        .compat_ioctl = spidev_compat_ioctl,
 662        .open =         spidev_open,
 663        .release =      spidev_release,
 664        .llseek =       no_llseek,
 665};
 666
 667/*-------------------------------------------------------------------------*/
 668
 669/* The main reason to have this class is to make mdev/udev create the
 670 * /dev/spidevB.C character device nodes exposing our userspace API.
 671 * It also simplifies memory management.
 672 */
 673
 674static struct class *spidev_class;
 675
 676#ifdef CONFIG_OF
 677static const struct of_device_id spidev_dt_ids[] = {
 678        { .compatible = "rohm,dh2228fv" },
 679        { .compatible = "lineartechnology,ltc2488" },
 680        { .compatible = "ge,achc" },
 681        { .compatible = "semtech,sx1301" },
 682        { .compatible = "lwn,bk4" },
 683        { .compatible = "dh,dhcom-board" },
 684        { .compatible = "menlo,m53cpld" },
 685        { .compatible = "cisco,spi-petra" },
 686        {},
 687};
 688MODULE_DEVICE_TABLE(of, spidev_dt_ids);
 689#endif
 690
 691#ifdef CONFIG_ACPI
 692
 693/* Dummy SPI devices not to be used in production systems */
 694#define SPIDEV_ACPI_DUMMY       1
 695
 696static const struct acpi_device_id spidev_acpi_ids[] = {
 697        /*
 698         * The ACPI SPT000* devices are only meant for development and
 699         * testing. Systems used in production should have a proper ACPI
 700         * description of the connected peripheral and they should also use
 701         * a proper driver instead of poking directly to the SPI bus.
 702         */
 703        { "SPT0001", SPIDEV_ACPI_DUMMY },
 704        { "SPT0002", SPIDEV_ACPI_DUMMY },
 705        { "SPT0003", SPIDEV_ACPI_DUMMY },
 706        {},
 707};
 708MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
 709
 710static void spidev_probe_acpi(struct spi_device *spi)
 711{
 712        const struct acpi_device_id *id;
 713
 714        if (!has_acpi_companion(&spi->dev))
 715                return;
 716
 717        id = acpi_match_device(spidev_acpi_ids, &spi->dev);
 718        if (WARN_ON(!id))
 719                return;
 720
 721        if (id->driver_data == SPIDEV_ACPI_DUMMY)
 722                dev_warn(&spi->dev, "do not use this driver in production systems!\n");
 723}
 724#else
 725static inline void spidev_probe_acpi(struct spi_device *spi) {}
 726#endif
 727
 728/*-------------------------------------------------------------------------*/
 729
 730static int spidev_probe(struct spi_device *spi)
 731{
 732        struct spidev_data      *spidev;
 733        int                     status;
 734        unsigned long           minor;
 735
 736        /*
 737         * spidev should never be referenced in DT without a specific
 738         * compatible string, it is a Linux implementation thing
 739         * rather than a description of the hardware.
 740         */
 741        WARN(spi->dev.of_node &&
 742             of_device_is_compatible(spi->dev.of_node, "spidev"),
 743             "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
 744
 745        spidev_probe_acpi(spi);
 746
 747        /* Allocate driver data */
 748        spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
 749        if (!spidev)
 750                return -ENOMEM;
 751
 752        /* Initialize the driver data */
 753        spidev->spi = spi;
 754        spin_lock_init(&spidev->spi_lock);
 755        mutex_init(&spidev->buf_lock);
 756
 757        INIT_LIST_HEAD(&spidev->device_entry);
 758
 759        /* If we can allocate a minor number, hook up this device.
 760         * Reusing minors is fine so long as udev or mdev is working.
 761         */
 762        mutex_lock(&device_list_lock);
 763        minor = find_first_zero_bit(minors, N_SPI_MINORS);
 764        if (minor < N_SPI_MINORS) {
 765                struct device *dev;
 766
 767                spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
 768                dev = device_create(spidev_class, &spi->dev, spidev->devt,
 769                                    spidev, "spidev%d.%d",
 770                                    spi->master->bus_num, spi->chip_select);
 771                status = PTR_ERR_OR_ZERO(dev);
 772        } else {
 773                dev_dbg(&spi->dev, "no minor number available!\n");
 774                status = -ENODEV;
 775        }
 776        if (status == 0) {
 777                set_bit(minor, minors);
 778                list_add(&spidev->device_entry, &device_list);
 779        }
 780        mutex_unlock(&device_list_lock);
 781
 782        spidev->speed_hz = spi->max_speed_hz;
 783
 784        if (status == 0)
 785                spi_set_drvdata(spi, spidev);
 786        else
 787                kfree(spidev);
 788
 789        return status;
 790}
 791
 792static int spidev_remove(struct spi_device *spi)
 793{
 794        struct spidev_data      *spidev = spi_get_drvdata(spi);
 795
 796        /* prevent new opens */
 797        mutex_lock(&device_list_lock);
 798        /* make sure ops on existing fds can abort cleanly */
 799        spin_lock_irq(&spidev->spi_lock);
 800        spidev->spi = NULL;
 801        spin_unlock_irq(&spidev->spi_lock);
 802
 803        list_del(&spidev->device_entry);
 804        device_destroy(spidev_class, spidev->devt);
 805        clear_bit(MINOR(spidev->devt), minors);
 806        if (spidev->users == 0)
 807                kfree(spidev);
 808        mutex_unlock(&device_list_lock);
 809
 810        return 0;
 811}
 812
 813static struct spi_driver spidev_spi_driver = {
 814        .driver = {
 815                .name =         "spidev",
 816                .of_match_table = of_match_ptr(spidev_dt_ids),
 817                .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
 818        },
 819        .probe =        spidev_probe,
 820        .remove =       spidev_remove,
 821
 822        /* NOTE:  suspend/resume methods are not necessary here.
 823         * We don't do anything except pass the requests to/from
 824         * the underlying controller.  The refrigerator handles
 825         * most issues; the controller driver handles the rest.
 826         */
 827};
 828
 829/*-------------------------------------------------------------------------*/
 830
 831static int __init spidev_init(void)
 832{
 833        int status;
 834
 835        /* Claim our 256 reserved device numbers.  Then register a class
 836         * that will key udev/mdev to add/remove /dev nodes.  Last, register
 837         * the driver which manages those device numbers.
 838         */
 839        BUILD_BUG_ON(N_SPI_MINORS > 256);
 840        status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
 841        if (status < 0)
 842                return status;
 843
 844        spidev_class = class_create(THIS_MODULE, "spidev");
 845        if (IS_ERR(spidev_class)) {
 846                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 847                return PTR_ERR(spidev_class);
 848        }
 849
 850        status = spi_register_driver(&spidev_spi_driver);
 851        if (status < 0) {
 852                class_destroy(spidev_class);
 853                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 854        }
 855        return status;
 856}
 857module_init(spidev_init);
 858
 859static void __exit spidev_exit(void)
 860{
 861        spi_unregister_driver(&spidev_spi_driver);
 862        class_destroy(spidev_class);
 863        unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 864}
 865module_exit(spidev_exit);
 866
 867MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
 868MODULE_DESCRIPTION("User mode SPI device interface");
 869MODULE_LICENSE("GPL");
 870MODULE_ALIAS("spi:spidev");
 871