linux/drivers/spi/spidev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Simple synchronous userspace interface to SPI devices
   4 *
   5 * Copyright (C) 2006 SWAPP
   6 *      Andrea Paterniani <a.paterniani@swapp-eng.it>
   7 * Copyright (C) 2007 David Brownell (simplification, cleanup)
   8 */
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/ioctl.h>
  13#include <linux/fs.h>
  14#include <linux/device.h>
  15#include <linux/err.h>
  16#include <linux/list.h>
  17#include <linux/errno.h>
  18#include <linux/mutex.h>
  19#include <linux/slab.h>
  20#include <linux/compat.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/acpi.h>
  24
  25#include <linux/spi/spi.h>
  26#include <linux/spi/spidev.h>
  27
  28#include <linux/uaccess.h>
  29
  30
  31/*
  32 * This supports access to SPI devices using normal userspace I/O calls.
  33 * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  34 * and often mask message boundaries, full SPI support requires full duplex
  35 * transfers.  There are several kinds of internal message boundaries to
  36 * handle chipselect management and other protocol options.
  37 *
  38 * SPI has a character major number assigned.  We allocate minor numbers
  39 * dynamically using a bitmask.  You must use hotplug tools, such as udev
  40 * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
  41 * nodes, since there is no fixed association of minor numbers with any
  42 * particular SPI bus or device.
  43 */
  44#define SPIDEV_MAJOR                    153     /* assigned */
  45#define N_SPI_MINORS                    32      /* ... up to 256 */
  46
  47static DECLARE_BITMAP(minors, N_SPI_MINORS);
  48
  49
  50/* Bit masks for spi_device.mode management.  Note that incorrect
  51 * settings for some settings can cause *lots* of trouble for other
  52 * devices on a shared bus:
  53 *
  54 *  - CS_HIGH ... this device will be active when it shouldn't be
  55 *  - 3WIRE ... when active, it won't behave as it should
  56 *  - NO_CS ... there will be no explicit message boundaries; this
  57 *      is completely incompatible with the shared bus model
  58 *  - READY ... transfers may proceed when they shouldn't.
  59 *
  60 * REVISIT should changing those flags be privileged?
  61 */
  62#define SPI_MODE_MASK           (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
  63                                | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
  64                                | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
  65                                | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
  66
  67struct spidev_data {
  68        dev_t                   devt;
  69        spinlock_t              spi_lock;
  70        struct spi_device       *spi;
  71        struct list_head        device_entry;
  72
  73        /* TX/RX buffers are NULL unless this device is open (users > 0) */
  74        struct mutex            buf_lock;
  75        unsigned                users;
  76        u8                      *tx_buffer;
  77        u8                      *rx_buffer;
  78        u32                     speed_hz;
  79};
  80
  81static LIST_HEAD(device_list);
  82static DEFINE_MUTEX(device_list_lock);
  83
  84static unsigned bufsiz = 4096;
  85module_param(bufsiz, uint, S_IRUGO);
  86MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
  87
  88/*-------------------------------------------------------------------------*/
  89
  90static ssize_t
  91spidev_sync(struct spidev_data *spidev, struct spi_message *message)
  92{
  93        int status;
  94        struct spi_device *spi;
  95
  96        spin_lock_irq(&spidev->spi_lock);
  97        spi = spidev->spi;
  98        spin_unlock_irq(&spidev->spi_lock);
  99
 100        if (spi == NULL)
 101                status = -ESHUTDOWN;
 102        else
 103                status = spi_sync(spi, message);
 104
 105        if (status == 0)
 106                status = message->actual_length;
 107
 108        return status;
 109}
 110
 111static inline ssize_t
 112spidev_sync_write(struct spidev_data *spidev, size_t len)
 113{
 114        struct spi_transfer     t = {
 115                        .tx_buf         = spidev->tx_buffer,
 116                        .len            = len,
 117                        .speed_hz       = spidev->speed_hz,
 118                };
 119        struct spi_message      m;
 120
 121        spi_message_init(&m);
 122        spi_message_add_tail(&t, &m);
 123        return spidev_sync(spidev, &m);
 124}
 125
 126static inline ssize_t
 127spidev_sync_read(struct spidev_data *spidev, size_t len)
 128{
 129        struct spi_transfer     t = {
 130                        .rx_buf         = spidev->rx_buffer,
 131                        .len            = len,
 132                        .speed_hz       = spidev->speed_hz,
 133                };
 134        struct spi_message      m;
 135
 136        spi_message_init(&m);
 137        spi_message_add_tail(&t, &m);
 138        return spidev_sync(spidev, &m);
 139}
 140
 141/*-------------------------------------------------------------------------*/
 142
 143/* Read-only message with current device setup */
 144static ssize_t
 145spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
 146{
 147        struct spidev_data      *spidev;
 148        ssize_t                 status = 0;
 149
 150        /* chipselect only toggles at start or end of operation */
 151        if (count > bufsiz)
 152                return -EMSGSIZE;
 153
 154        spidev = filp->private_data;
 155
 156        mutex_lock(&spidev->buf_lock);
 157        status = spidev_sync_read(spidev, count);
 158        if (status > 0) {
 159                unsigned long   missing;
 160
 161                missing = copy_to_user(buf, spidev->rx_buffer, status);
 162                if (missing == status)
 163                        status = -EFAULT;
 164                else
 165                        status = status - missing;
 166        }
 167        mutex_unlock(&spidev->buf_lock);
 168
 169        return status;
 170}
 171
 172/* Write-only message with current device setup */
 173static ssize_t
 174spidev_write(struct file *filp, const char __user *buf,
 175                size_t count, loff_t *f_pos)
 176{
 177        struct spidev_data      *spidev;
 178        ssize_t                 status = 0;
 179        unsigned long           missing;
 180
 181        /* chipselect only toggles at start or end of operation */
 182        if (count > bufsiz)
 183                return -EMSGSIZE;
 184
 185        spidev = filp->private_data;
 186
 187        mutex_lock(&spidev->buf_lock);
 188        missing = copy_from_user(spidev->tx_buffer, buf, count);
 189        if (missing == 0)
 190                status = spidev_sync_write(spidev, count);
 191        else
 192                status = -EFAULT;
 193        mutex_unlock(&spidev->buf_lock);
 194
 195        return status;
 196}
 197
 198static int spidev_message(struct spidev_data *spidev,
 199                struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
 200{
 201        struct spi_message      msg;
 202        struct spi_transfer     *k_xfers;
 203        struct spi_transfer     *k_tmp;
 204        struct spi_ioc_transfer *u_tmp;
 205        unsigned                n, total, tx_total, rx_total;
 206        u8                      *tx_buf, *rx_buf;
 207        int                     status = -EFAULT;
 208
 209        spi_message_init(&msg);
 210        k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
 211        if (k_xfers == NULL)
 212                return -ENOMEM;
 213
 214        /* Construct spi_message, copying any tx data to bounce buffer.
 215         * We walk the array of user-provided transfers, using each one
 216         * to initialize a kernel version of the same transfer.
 217         */
 218        tx_buf = spidev->tx_buffer;
 219        rx_buf = spidev->rx_buffer;
 220        total = 0;
 221        tx_total = 0;
 222        rx_total = 0;
 223        for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
 224                        n;
 225                        n--, k_tmp++, u_tmp++) {
 226                k_tmp->len = u_tmp->len;
 227
 228                total += k_tmp->len;
 229                /* Since the function returns the total length of transfers
 230                 * on success, restrict the total to positive int values to
 231                 * avoid the return value looking like an error.  Also check
 232                 * each transfer length to avoid arithmetic overflow.
 233                 */
 234                if (total > INT_MAX || k_tmp->len > INT_MAX) {
 235                        status = -EMSGSIZE;
 236                        goto done;
 237                }
 238
 239                if (u_tmp->rx_buf) {
 240                        /* this transfer needs space in RX bounce buffer */
 241                        rx_total += k_tmp->len;
 242                        if (rx_total > bufsiz) {
 243                                status = -EMSGSIZE;
 244                                goto done;
 245                        }
 246                        k_tmp->rx_buf = rx_buf;
 247                        rx_buf += k_tmp->len;
 248                }
 249                if (u_tmp->tx_buf) {
 250                        /* this transfer needs space in TX bounce buffer */
 251                        tx_total += k_tmp->len;
 252                        if (tx_total > bufsiz) {
 253                                status = -EMSGSIZE;
 254                                goto done;
 255                        }
 256                        k_tmp->tx_buf = tx_buf;
 257                        if (copy_from_user(tx_buf, (const u8 __user *)
 258                                                (uintptr_t) u_tmp->tx_buf,
 259                                        u_tmp->len))
 260                                goto done;
 261                        tx_buf += k_tmp->len;
 262                }
 263
 264                k_tmp->cs_change = !!u_tmp->cs_change;
 265                k_tmp->tx_nbits = u_tmp->tx_nbits;
 266                k_tmp->rx_nbits = u_tmp->rx_nbits;
 267                k_tmp->bits_per_word = u_tmp->bits_per_word;
 268                k_tmp->delay.value = u_tmp->delay_usecs;
 269                k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
 270                k_tmp->speed_hz = u_tmp->speed_hz;
 271                k_tmp->word_delay.value = u_tmp->word_delay_usecs;
 272                k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
 273                if (!k_tmp->speed_hz)
 274                        k_tmp->speed_hz = spidev->speed_hz;
 275#ifdef VERBOSE
 276                dev_dbg(&spidev->spi->dev,
 277                        "  xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
 278                        u_tmp->len,
 279                        u_tmp->rx_buf ? "rx " : "",
 280                        u_tmp->tx_buf ? "tx " : "",
 281                        u_tmp->cs_change ? "cs " : "",
 282                        u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
 283                        u_tmp->delay_usecs,
 284                        u_tmp->word_delay_usecs,
 285                        u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
 286#endif
 287                spi_message_add_tail(k_tmp, &msg);
 288        }
 289
 290        status = spidev_sync(spidev, &msg);
 291        if (status < 0)
 292                goto done;
 293
 294        /* copy any rx data out of bounce buffer */
 295        rx_buf = spidev->rx_buffer;
 296        for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
 297                if (u_tmp->rx_buf) {
 298                        if (copy_to_user((u8 __user *)
 299                                        (uintptr_t) u_tmp->rx_buf, rx_buf,
 300                                        u_tmp->len)) {
 301                                status = -EFAULT;
 302                                goto done;
 303                        }
 304                        rx_buf += u_tmp->len;
 305                }
 306        }
 307        status = total;
 308
 309done:
 310        kfree(k_xfers);
 311        return status;
 312}
 313
 314static struct spi_ioc_transfer *
 315spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
 316                unsigned *n_ioc)
 317{
 318        u32     tmp;
 319
 320        /* Check type, command number and direction */
 321        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
 322                        || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
 323                        || _IOC_DIR(cmd) != _IOC_WRITE)
 324                return ERR_PTR(-ENOTTY);
 325
 326        tmp = _IOC_SIZE(cmd);
 327        if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
 328                return ERR_PTR(-EINVAL);
 329        *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
 330        if (*n_ioc == 0)
 331                return NULL;
 332
 333        /* copy into scratch area */
 334        return memdup_user(u_ioc, tmp);
 335}
 336
 337static long
 338spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 339{
 340        int                     retval = 0;
 341        struct spidev_data      *spidev;
 342        struct spi_device       *spi;
 343        u32                     tmp;
 344        unsigned                n_ioc;
 345        struct spi_ioc_transfer *ioc;
 346
 347        /* Check type and command number */
 348        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
 349                return -ENOTTY;
 350
 351        /* guard against device removal before, or while,
 352         * we issue this ioctl.
 353         */
 354        spidev = filp->private_data;
 355        spin_lock_irq(&spidev->spi_lock);
 356        spi = spi_dev_get(spidev->spi);
 357        spin_unlock_irq(&spidev->spi_lock);
 358
 359        if (spi == NULL)
 360                return -ESHUTDOWN;
 361
 362        /* use the buffer lock here for triple duty:
 363         *  - prevent I/O (from us) so calling spi_setup() is safe;
 364         *  - prevent concurrent SPI_IOC_WR_* from morphing
 365         *    data fields while SPI_IOC_RD_* reads them;
 366         *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
 367         */
 368        mutex_lock(&spidev->buf_lock);
 369
 370        switch (cmd) {
 371        /* read requests */
 372        case SPI_IOC_RD_MODE:
 373                retval = put_user(spi->mode & SPI_MODE_MASK,
 374                                        (__u8 __user *)arg);
 375                break;
 376        case SPI_IOC_RD_MODE32:
 377                retval = put_user(spi->mode & SPI_MODE_MASK,
 378                                        (__u32 __user *)arg);
 379                break;
 380        case SPI_IOC_RD_LSB_FIRST:
 381                retval = put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
 382                                        (__u8 __user *)arg);
 383                break;
 384        case SPI_IOC_RD_BITS_PER_WORD:
 385                retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
 386                break;
 387        case SPI_IOC_RD_MAX_SPEED_HZ:
 388                retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
 389                break;
 390
 391        /* write requests */
 392        case SPI_IOC_WR_MODE:
 393        case SPI_IOC_WR_MODE32:
 394                if (cmd == SPI_IOC_WR_MODE)
 395                        retval = get_user(tmp, (u8 __user *)arg);
 396                else
 397                        retval = get_user(tmp, (u32 __user *)arg);
 398                if (retval == 0) {
 399                        struct spi_controller *ctlr = spi->controller;
 400                        u32     save = spi->mode;
 401
 402                        if (tmp & ~SPI_MODE_MASK) {
 403                                retval = -EINVAL;
 404                                break;
 405                        }
 406
 407                        if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
 408                            ctlr->cs_gpiods[spi->chip_select])
 409                                tmp |= SPI_CS_HIGH;
 410
 411                        tmp |= spi->mode & ~SPI_MODE_MASK;
 412                        spi->mode = (u16)tmp;
 413                        retval = spi_setup(spi);
 414                        if (retval < 0)
 415                                spi->mode = save;
 416                        else
 417                                dev_dbg(&spi->dev, "spi mode %x\n", tmp);
 418                }
 419                break;
 420        case SPI_IOC_WR_LSB_FIRST:
 421                retval = get_user(tmp, (__u8 __user *)arg);
 422                if (retval == 0) {
 423                        u32     save = spi->mode;
 424
 425                        if (tmp)
 426                                spi->mode |= SPI_LSB_FIRST;
 427                        else
 428                                spi->mode &= ~SPI_LSB_FIRST;
 429                        retval = spi_setup(spi);
 430                        if (retval < 0)
 431                                spi->mode = save;
 432                        else
 433                                dev_dbg(&spi->dev, "%csb first\n",
 434                                                tmp ? 'l' : 'm');
 435                }
 436                break;
 437        case SPI_IOC_WR_BITS_PER_WORD:
 438                retval = get_user(tmp, (__u8 __user *)arg);
 439                if (retval == 0) {
 440                        u8      save = spi->bits_per_word;
 441
 442                        spi->bits_per_word = tmp;
 443                        retval = spi_setup(spi);
 444                        if (retval < 0)
 445                                spi->bits_per_word = save;
 446                        else
 447                                dev_dbg(&spi->dev, "%d bits per word\n", tmp);
 448                }
 449                break;
 450        case SPI_IOC_WR_MAX_SPEED_HZ:
 451                retval = get_user(tmp, (__u32 __user *)arg);
 452                if (retval == 0) {
 453                        u32     save = spi->max_speed_hz;
 454
 455                        spi->max_speed_hz = tmp;
 456                        retval = spi_setup(spi);
 457                        if (retval >= 0)
 458                                spidev->speed_hz = tmp;
 459                        else
 460                                dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
 461                        spi->max_speed_hz = save;
 462                }
 463                break;
 464
 465        default:
 466                /* segmented and/or full-duplex I/O request */
 467                /* Check message and copy into scratch area */
 468                ioc = spidev_get_ioc_message(cmd,
 469                                (struct spi_ioc_transfer __user *)arg, &n_ioc);
 470                if (IS_ERR(ioc)) {
 471                        retval = PTR_ERR(ioc);
 472                        break;
 473                }
 474                if (!ioc)
 475                        break;  /* n_ioc is also 0 */
 476
 477                /* translate to spi_message, execute */
 478                retval = spidev_message(spidev, ioc, n_ioc);
 479                kfree(ioc);
 480                break;
 481        }
 482
 483        mutex_unlock(&spidev->buf_lock);
 484        spi_dev_put(spi);
 485        return retval;
 486}
 487
 488#ifdef CONFIG_COMPAT
 489static long
 490spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
 491                unsigned long arg)
 492{
 493        struct spi_ioc_transfer __user  *u_ioc;
 494        int                             retval = 0;
 495        struct spidev_data              *spidev;
 496        struct spi_device               *spi;
 497        unsigned                        n_ioc, n;
 498        struct spi_ioc_transfer         *ioc;
 499
 500        u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
 501
 502        /* guard against device removal before, or while,
 503         * we issue this ioctl.
 504         */
 505        spidev = filp->private_data;
 506        spin_lock_irq(&spidev->spi_lock);
 507        spi = spi_dev_get(spidev->spi);
 508        spin_unlock_irq(&spidev->spi_lock);
 509
 510        if (spi == NULL)
 511                return -ESHUTDOWN;
 512
 513        /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
 514        mutex_lock(&spidev->buf_lock);
 515
 516        /* Check message and copy into scratch area */
 517        ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
 518        if (IS_ERR(ioc)) {
 519                retval = PTR_ERR(ioc);
 520                goto done;
 521        }
 522        if (!ioc)
 523                goto done;      /* n_ioc is also 0 */
 524
 525        /* Convert buffer pointers */
 526        for (n = 0; n < n_ioc; n++) {
 527                ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
 528                ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
 529        }
 530
 531        /* translate to spi_message, execute */
 532        retval = spidev_message(spidev, ioc, n_ioc);
 533        kfree(ioc);
 534
 535done:
 536        mutex_unlock(&spidev->buf_lock);
 537        spi_dev_put(spi);
 538        return retval;
 539}
 540
 541static long
 542spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 543{
 544        if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
 545                        && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
 546                        && _IOC_DIR(cmd) == _IOC_WRITE)
 547                return spidev_compat_ioc_message(filp, cmd, arg);
 548
 549        return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
 550}
 551#else
 552#define spidev_compat_ioctl NULL
 553#endif /* CONFIG_COMPAT */
 554
 555static int spidev_open(struct inode *inode, struct file *filp)
 556{
 557        struct spidev_data      *spidev;
 558        int                     status = -ENXIO;
 559
 560        mutex_lock(&device_list_lock);
 561
 562        list_for_each_entry(spidev, &device_list, device_entry) {
 563                if (spidev->devt == inode->i_rdev) {
 564                        status = 0;
 565                        break;
 566                }
 567        }
 568
 569        if (status) {
 570                pr_debug("spidev: nothing for minor %d\n", iminor(inode));
 571                goto err_find_dev;
 572        }
 573
 574        if (!spidev->tx_buffer) {
 575                spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 576                if (!spidev->tx_buffer) {
 577                        dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 578                        status = -ENOMEM;
 579                        goto err_find_dev;
 580                }
 581        }
 582
 583        if (!spidev->rx_buffer) {
 584                spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 585                if (!spidev->rx_buffer) {
 586                        dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 587                        status = -ENOMEM;
 588                        goto err_alloc_rx_buf;
 589                }
 590        }
 591
 592        spidev->users++;
 593        filp->private_data = spidev;
 594        stream_open(inode, filp);
 595
 596        mutex_unlock(&device_list_lock);
 597        return 0;
 598
 599err_alloc_rx_buf:
 600        kfree(spidev->tx_buffer);
 601        spidev->tx_buffer = NULL;
 602err_find_dev:
 603        mutex_unlock(&device_list_lock);
 604        return status;
 605}
 606
 607static int spidev_release(struct inode *inode, struct file *filp)
 608{
 609        struct spidev_data      *spidev;
 610
 611        mutex_lock(&device_list_lock);
 612        spidev = filp->private_data;
 613        filp->private_data = NULL;
 614
 615        /* last close? */
 616        spidev->users--;
 617        if (!spidev->users) {
 618                int             dofree;
 619
 620                kfree(spidev->tx_buffer);
 621                spidev->tx_buffer = NULL;
 622
 623                kfree(spidev->rx_buffer);
 624                spidev->rx_buffer = NULL;
 625
 626                spin_lock_irq(&spidev->spi_lock);
 627                if (spidev->spi)
 628                        spidev->speed_hz = spidev->spi->max_speed_hz;
 629
 630                /* ... after we unbound from the underlying device? */
 631                dofree = (spidev->spi == NULL);
 632                spin_unlock_irq(&spidev->spi_lock);
 633
 634                if (dofree)
 635                        kfree(spidev);
 636        }
 637#ifdef CONFIG_SPI_SLAVE
 638        spi_slave_abort(spidev->spi);
 639#endif
 640        mutex_unlock(&device_list_lock);
 641
 642        return 0;
 643}
 644
 645static const struct file_operations spidev_fops = {
 646        .owner =        THIS_MODULE,
 647        /* REVISIT switch to aio primitives, so that userspace
 648         * gets more complete API coverage.  It'll simplify things
 649         * too, except for the locking.
 650         */
 651        .write =        spidev_write,
 652        .read =         spidev_read,
 653        .unlocked_ioctl = spidev_ioctl,
 654        .compat_ioctl = spidev_compat_ioctl,
 655        .open =         spidev_open,
 656        .release =      spidev_release,
 657        .llseek =       no_llseek,
 658};
 659
 660/*-------------------------------------------------------------------------*/
 661
 662/* The main reason to have this class is to make mdev/udev create the
 663 * /dev/spidevB.C character device nodes exposing our userspace API.
 664 * It also simplifies memory management.
 665 */
 666
 667static struct class *spidev_class;
 668
 669#ifdef CONFIG_OF
 670static const struct of_device_id spidev_dt_ids[] = {
 671        { .compatible = "rohm,dh2228fv" },
 672        { .compatible = "lineartechnology,ltc2488" },
 673        { .compatible = "ge,achc" },
 674        { .compatible = "semtech,sx1301" },
 675        { .compatible = "lwn,bk4" },
 676        { .compatible = "dh,dhcom-board" },
 677        { .compatible = "menlo,m53cpld" },
 678        {},
 679};
 680MODULE_DEVICE_TABLE(of, spidev_dt_ids);
 681#endif
 682
 683#ifdef CONFIG_ACPI
 684
 685/* Dummy SPI devices not to be used in production systems */
 686#define SPIDEV_ACPI_DUMMY       1
 687
 688static const struct acpi_device_id spidev_acpi_ids[] = {
 689        /*
 690         * The ACPI SPT000* devices are only meant for development and
 691         * testing. Systems used in production should have a proper ACPI
 692         * description of the connected peripheral and they should also use
 693         * a proper driver instead of poking directly to the SPI bus.
 694         */
 695        { "SPT0001", SPIDEV_ACPI_DUMMY },
 696        { "SPT0002", SPIDEV_ACPI_DUMMY },
 697        { "SPT0003", SPIDEV_ACPI_DUMMY },
 698        {},
 699};
 700MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
 701
 702static void spidev_probe_acpi(struct spi_device *spi)
 703{
 704        const struct acpi_device_id *id;
 705
 706        if (!has_acpi_companion(&spi->dev))
 707                return;
 708
 709        id = acpi_match_device(spidev_acpi_ids, &spi->dev);
 710        if (WARN_ON(!id))
 711                return;
 712
 713        if (id->driver_data == SPIDEV_ACPI_DUMMY)
 714                dev_warn(&spi->dev, "do not use this driver in production systems!\n");
 715}
 716#else
 717static inline void spidev_probe_acpi(struct spi_device *spi) {}
 718#endif
 719
 720/*-------------------------------------------------------------------------*/
 721
 722static int spidev_probe(struct spi_device *spi)
 723{
 724        struct spidev_data      *spidev;
 725        int                     status;
 726        unsigned long           minor;
 727
 728        /*
 729         * spidev should never be referenced in DT without a specific
 730         * compatible string, it is a Linux implementation thing
 731         * rather than a description of the hardware.
 732         */
 733        WARN(spi->dev.of_node &&
 734             of_device_is_compatible(spi->dev.of_node, "spidev"),
 735             "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
 736
 737        spidev_probe_acpi(spi);
 738
 739        /* Allocate driver data */
 740        spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
 741        if (!spidev)
 742                return -ENOMEM;
 743
 744        /* Initialize the driver data */
 745        spidev->spi = spi;
 746        spin_lock_init(&spidev->spi_lock);
 747        mutex_init(&spidev->buf_lock);
 748
 749        INIT_LIST_HEAD(&spidev->device_entry);
 750
 751        /* If we can allocate a minor number, hook up this device.
 752         * Reusing minors is fine so long as udev or mdev is working.
 753         */
 754        mutex_lock(&device_list_lock);
 755        minor = find_first_zero_bit(minors, N_SPI_MINORS);
 756        if (minor < N_SPI_MINORS) {
 757                struct device *dev;
 758
 759                spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
 760                dev = device_create(spidev_class, &spi->dev, spidev->devt,
 761                                    spidev, "spidev%d.%d",
 762                                    spi->master->bus_num, spi->chip_select);
 763                status = PTR_ERR_OR_ZERO(dev);
 764        } else {
 765                dev_dbg(&spi->dev, "no minor number available!\n");
 766                status = -ENODEV;
 767        }
 768        if (status == 0) {
 769                set_bit(minor, minors);
 770                list_add(&spidev->device_entry, &device_list);
 771        }
 772        mutex_unlock(&device_list_lock);
 773
 774        spidev->speed_hz = spi->max_speed_hz;
 775
 776        if (status == 0)
 777                spi_set_drvdata(spi, spidev);
 778        else
 779                kfree(spidev);
 780
 781        return status;
 782}
 783
 784static int spidev_remove(struct spi_device *spi)
 785{
 786        struct spidev_data      *spidev = spi_get_drvdata(spi);
 787
 788        /* make sure ops on existing fds can abort cleanly */
 789        spin_lock_irq(&spidev->spi_lock);
 790        spidev->spi = NULL;
 791        spin_unlock_irq(&spidev->spi_lock);
 792
 793        /* prevent new opens */
 794        mutex_lock(&device_list_lock);
 795        list_del(&spidev->device_entry);
 796        device_destroy(spidev_class, spidev->devt);
 797        clear_bit(MINOR(spidev->devt), minors);
 798        if (spidev->users == 0)
 799                kfree(spidev);
 800        mutex_unlock(&device_list_lock);
 801
 802        return 0;
 803}
 804
 805static struct spi_driver spidev_spi_driver = {
 806        .driver = {
 807                .name =         "spidev",
 808                .of_match_table = of_match_ptr(spidev_dt_ids),
 809                .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
 810        },
 811        .probe =        spidev_probe,
 812        .remove =       spidev_remove,
 813
 814        /* NOTE:  suspend/resume methods are not necessary here.
 815         * We don't do anything except pass the requests to/from
 816         * the underlying controller.  The refrigerator handles
 817         * most issues; the controller driver handles the rest.
 818         */
 819};
 820
 821/*-------------------------------------------------------------------------*/
 822
 823static int __init spidev_init(void)
 824{
 825        int status;
 826
 827        /* Claim our 256 reserved device numbers.  Then register a class
 828         * that will key udev/mdev to add/remove /dev nodes.  Last, register
 829         * the driver which manages those device numbers.
 830         */
 831        BUILD_BUG_ON(N_SPI_MINORS > 256);
 832        status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
 833        if (status < 0)
 834                return status;
 835
 836        spidev_class = class_create(THIS_MODULE, "spidev");
 837        if (IS_ERR(spidev_class)) {
 838                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 839                return PTR_ERR(spidev_class);
 840        }
 841
 842        status = spi_register_driver(&spidev_spi_driver);
 843        if (status < 0) {
 844                class_destroy(spidev_class);
 845                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 846        }
 847        return status;
 848}
 849module_init(spidev_init);
 850
 851static void __exit spidev_exit(void)
 852{
 853        spi_unregister_driver(&spidev_spi_driver);
 854        class_destroy(spidev_class);
 855        unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 856}
 857module_exit(spidev_exit);
 858
 859MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
 860MODULE_DESCRIPTION("User mode SPI device interface");
 861MODULE_LICENSE("GPL");
 862MODULE_ALIAS("spi:spidev");
 863