linux/drivers/spi/spidev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Simple synchronous userspace interface to SPI devices
   4 *
   5 * Copyright (C) 2006 SWAPP
   6 *      Andrea Paterniani <a.paterniani@swapp-eng.it>
   7 * Copyright (C) 2007 David Brownell (simplification, cleanup)
   8 */
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/ioctl.h>
  13#include <linux/fs.h>
  14#include <linux/device.h>
  15#include <linux/err.h>
  16#include <linux/list.h>
  17#include <linux/errno.h>
  18#include <linux/mutex.h>
  19#include <linux/slab.h>
  20#include <linux/compat.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/acpi.h>
  24
  25#include <linux/spi/spi.h>
  26#include <linux/spi/spidev.h>
  27
  28#include <linux/uaccess.h>
  29
  30
  31/*
  32 * This supports access to SPI devices using normal userspace I/O calls.
  33 * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  34 * and often mask message boundaries, full SPI support requires full duplex
  35 * transfers.  There are several kinds of internal message boundaries to
  36 * handle chipselect management and other protocol options.
  37 *
  38 * SPI has a character major number assigned.  We allocate minor numbers
  39 * dynamically using a bitmask.  You must use hotplug tools, such as udev
  40 * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
  41 * nodes, since there is no fixed association of minor numbers with any
  42 * particular SPI bus or device.
  43 */
  44#define SPIDEV_MAJOR                    153     /* assigned */
  45#define N_SPI_MINORS                    32      /* ... up to 256 */
  46
  47static DECLARE_BITMAP(minors, N_SPI_MINORS);
  48
  49
  50/* Bit masks for spi_device.mode management.  Note that incorrect
  51 * settings for some settings can cause *lots* of trouble for other
  52 * devices on a shared bus:
  53 *
  54 *  - CS_HIGH ... this device will be active when it shouldn't be
  55 *  - 3WIRE ... when active, it won't behave as it should
  56 *  - NO_CS ... there will be no explicit message boundaries; this
  57 *      is completely incompatible with the shared bus model
  58 *  - READY ... transfers may proceed when they shouldn't.
  59 *
  60 * REVISIT should changing those flags be privileged?
  61 */
  62#define SPI_MODE_MASK           (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
  63                                | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
  64                                | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
  65                                | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
  66
  67struct spidev_data {
  68        dev_t                   devt;
  69        spinlock_t              spi_lock;
  70        struct spi_device       *spi;
  71        struct list_head        device_entry;
  72
  73        /* TX/RX buffers are NULL unless this device is open (users > 0) */
  74        struct mutex            buf_lock;
  75        unsigned                users;
  76        u8                      *tx_buffer;
  77        u8                      *rx_buffer;
  78        u32                     speed_hz;
  79};
  80
  81static LIST_HEAD(device_list);
  82static DEFINE_MUTEX(device_list_lock);
  83
  84static unsigned bufsiz = 4096;
  85module_param(bufsiz, uint, S_IRUGO);
  86MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
  87
  88/*-------------------------------------------------------------------------*/
  89
  90static ssize_t
  91spidev_sync(struct spidev_data *spidev, struct spi_message *message)
  92{
  93        int status;
  94        struct spi_device *spi;
  95
  96        spin_lock_irq(&spidev->spi_lock);
  97        spi = spidev->spi;
  98        spin_unlock_irq(&spidev->spi_lock);
  99
 100        if (spi == NULL)
 101                status = -ESHUTDOWN;
 102        else
 103                status = spi_sync(spi, message);
 104
 105        if (status == 0)
 106                status = message->actual_length;
 107
 108        return status;
 109}
 110
 111static inline ssize_t
 112spidev_sync_write(struct spidev_data *spidev, size_t len)
 113{
 114        struct spi_transfer     t = {
 115                        .tx_buf         = spidev->tx_buffer,
 116                        .len            = len,
 117                        .speed_hz       = spidev->speed_hz,
 118                };
 119        struct spi_message      m;
 120
 121        spi_message_init(&m);
 122        spi_message_add_tail(&t, &m);
 123        return spidev_sync(spidev, &m);
 124}
 125
 126static inline ssize_t
 127spidev_sync_read(struct spidev_data *spidev, size_t len)
 128{
 129        struct spi_transfer     t = {
 130                        .rx_buf         = spidev->rx_buffer,
 131                        .len            = len,
 132                        .speed_hz       = spidev->speed_hz,
 133                };
 134        struct spi_message      m;
 135
 136        spi_message_init(&m);
 137        spi_message_add_tail(&t, &m);
 138        return spidev_sync(spidev, &m);
 139}
 140
 141/*-------------------------------------------------------------------------*/
 142
 143/* Read-only message with current device setup */
 144static ssize_t
 145spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
 146{
 147        struct spidev_data      *spidev;
 148        ssize_t                 status = 0;
 149
 150        /* chipselect only toggles at start or end of operation */
 151        if (count > bufsiz)
 152                return -EMSGSIZE;
 153
 154        spidev = filp->private_data;
 155
 156        mutex_lock(&spidev->buf_lock);
 157        status = spidev_sync_read(spidev, count);
 158        if (status > 0) {
 159                unsigned long   missing;
 160
 161                missing = copy_to_user(buf, spidev->rx_buffer, status);
 162                if (missing == status)
 163                        status = -EFAULT;
 164                else
 165                        status = status - missing;
 166        }
 167        mutex_unlock(&spidev->buf_lock);
 168
 169        return status;
 170}
 171
 172/* Write-only message with current device setup */
 173static ssize_t
 174spidev_write(struct file *filp, const char __user *buf,
 175                size_t count, loff_t *f_pos)
 176{
 177        struct spidev_data      *spidev;
 178        ssize_t                 status = 0;
 179        unsigned long           missing;
 180
 181        /* chipselect only toggles at start or end of operation */
 182        if (count > bufsiz)
 183                return -EMSGSIZE;
 184
 185        spidev = filp->private_data;
 186
 187        mutex_lock(&spidev->buf_lock);
 188        missing = copy_from_user(spidev->tx_buffer, buf, count);
 189        if (missing == 0)
 190                status = spidev_sync_write(spidev, count);
 191        else
 192                status = -EFAULT;
 193        mutex_unlock(&spidev->buf_lock);
 194
 195        return status;
 196}
 197
 198static int spidev_message(struct spidev_data *spidev,
 199                struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
 200{
 201        struct spi_message      msg;
 202        struct spi_transfer     *k_xfers;
 203        struct spi_transfer     *k_tmp;
 204        struct spi_ioc_transfer *u_tmp;
 205        unsigned                n, total, tx_total, rx_total;
 206        u8                      *tx_buf, *rx_buf;
 207        int                     status = -EFAULT;
 208
 209        spi_message_init(&msg);
 210        k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
 211        if (k_xfers == NULL)
 212                return -ENOMEM;
 213
 214        /* Construct spi_message, copying any tx data to bounce buffer.
 215         * We walk the array of user-provided transfers, using each one
 216         * to initialize a kernel version of the same transfer.
 217         */
 218        tx_buf = spidev->tx_buffer;
 219        rx_buf = spidev->rx_buffer;
 220        total = 0;
 221        tx_total = 0;
 222        rx_total = 0;
 223        for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
 224                        n;
 225                        n--, k_tmp++, u_tmp++) {
 226                k_tmp->len = u_tmp->len;
 227
 228                total += k_tmp->len;
 229                /* Since the function returns the total length of transfers
 230                 * on success, restrict the total to positive int values to
 231                 * avoid the return value looking like an error.  Also check
 232                 * each transfer length to avoid arithmetic overflow.
 233                 */
 234                if (total > INT_MAX || k_tmp->len > INT_MAX) {
 235                        status = -EMSGSIZE;
 236                        goto done;
 237                }
 238
 239                if (u_tmp->rx_buf) {
 240                        /* this transfer needs space in RX bounce buffer */
 241                        rx_total += k_tmp->len;
 242                        if (rx_total > bufsiz) {
 243                                status = -EMSGSIZE;
 244                                goto done;
 245                        }
 246                        k_tmp->rx_buf = rx_buf;
 247                        rx_buf += k_tmp->len;
 248                }
 249                if (u_tmp->tx_buf) {
 250                        /* this transfer needs space in TX bounce buffer */
 251                        tx_total += k_tmp->len;
 252                        if (tx_total > bufsiz) {
 253                                status = -EMSGSIZE;
 254                                goto done;
 255                        }
 256                        k_tmp->tx_buf = tx_buf;
 257                        if (copy_from_user(tx_buf, (const u8 __user *)
 258                                                (uintptr_t) u_tmp->tx_buf,
 259                                        u_tmp->len))
 260                                goto done;
 261                        tx_buf += k_tmp->len;
 262                }
 263
 264                k_tmp->cs_change = !!u_tmp->cs_change;
 265                k_tmp->tx_nbits = u_tmp->tx_nbits;
 266                k_tmp->rx_nbits = u_tmp->rx_nbits;
 267                k_tmp->bits_per_word = u_tmp->bits_per_word;
 268                k_tmp->delay.value = u_tmp->delay_usecs;
 269                k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
 270                k_tmp->speed_hz = u_tmp->speed_hz;
 271                k_tmp->word_delay.value = u_tmp->word_delay_usecs;
 272                k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
 273                if (!k_tmp->speed_hz)
 274                        k_tmp->speed_hz = spidev->speed_hz;
 275#ifdef VERBOSE
 276                dev_dbg(&spidev->spi->dev,
 277                        "  xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
 278                        u_tmp->len,
 279                        u_tmp->rx_buf ? "rx " : "",
 280                        u_tmp->tx_buf ? "tx " : "",
 281                        u_tmp->cs_change ? "cs " : "",
 282                        u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
 283                        u_tmp->delay_usecs,
 284                        u_tmp->word_delay_usecs,
 285                        u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
 286#endif
 287                spi_message_add_tail(k_tmp, &msg);
 288        }
 289
 290        status = spidev_sync(spidev, &msg);
 291        if (status < 0)
 292                goto done;
 293
 294        /* copy any rx data out of bounce buffer */
 295        rx_buf = spidev->rx_buffer;
 296        for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
 297                if (u_tmp->rx_buf) {
 298                        if (copy_to_user((u8 __user *)
 299                                        (uintptr_t) u_tmp->rx_buf, rx_buf,
 300                                        u_tmp->len)) {
 301                                status = -EFAULT;
 302                                goto done;
 303                        }
 304                        rx_buf += u_tmp->len;
 305                }
 306        }
 307        status = total;
 308
 309done:
 310        kfree(k_xfers);
 311        return status;
 312}
 313
 314static struct spi_ioc_transfer *
 315spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
 316                unsigned *n_ioc)
 317{
 318        u32     tmp;
 319
 320        /* Check type, command number and direction */
 321        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
 322                        || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
 323                        || _IOC_DIR(cmd) != _IOC_WRITE)
 324                return ERR_PTR(-ENOTTY);
 325
 326        tmp = _IOC_SIZE(cmd);
 327        if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
 328                return ERR_PTR(-EINVAL);
 329        *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
 330        if (*n_ioc == 0)
 331                return NULL;
 332
 333        /* copy into scratch area */
 334        return memdup_user(u_ioc, tmp);
 335}
 336
 337static long
 338spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 339{
 340        int                     retval = 0;
 341        struct spidev_data      *spidev;
 342        struct spi_device       *spi;
 343        u32                     tmp;
 344        unsigned                n_ioc;
 345        struct spi_ioc_transfer *ioc;
 346
 347        /* Check type and command number */
 348        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
 349                return -ENOTTY;
 350
 351        /* guard against device removal before, or while,
 352         * we issue this ioctl.
 353         */
 354        spidev = filp->private_data;
 355        spin_lock_irq(&spidev->spi_lock);
 356        spi = spi_dev_get(spidev->spi);
 357        spin_unlock_irq(&spidev->spi_lock);
 358
 359        if (spi == NULL)
 360                return -ESHUTDOWN;
 361
 362        /* use the buffer lock here for triple duty:
 363         *  - prevent I/O (from us) so calling spi_setup() is safe;
 364         *  - prevent concurrent SPI_IOC_WR_* from morphing
 365         *    data fields while SPI_IOC_RD_* reads them;
 366         *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
 367         */
 368        mutex_lock(&spidev->buf_lock);
 369
 370        switch (cmd) {
 371        /* read requests */
 372        case SPI_IOC_RD_MODE:
 373                retval = put_user(spi->mode & SPI_MODE_MASK,
 374                                        (__u8 __user *)arg);
 375                break;
 376        case SPI_IOC_RD_MODE32:
 377                retval = put_user(spi->mode & SPI_MODE_MASK,
 378                                        (__u32 __user *)arg);
 379                break;
 380        case SPI_IOC_RD_LSB_FIRST:
 381                retval = put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
 382                                        (__u8 __user *)arg);
 383                break;
 384        case SPI_IOC_RD_BITS_PER_WORD:
 385                retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
 386                break;
 387        case SPI_IOC_RD_MAX_SPEED_HZ:
 388                retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
 389                break;
 390
 391        /* write requests */
 392        case SPI_IOC_WR_MODE:
 393        case SPI_IOC_WR_MODE32:
 394                if (cmd == SPI_IOC_WR_MODE)
 395                        retval = get_user(tmp, (u8 __user *)arg);
 396                else
 397                        retval = get_user(tmp, (u32 __user *)arg);
 398                if (retval == 0) {
 399                        u32     save = spi->mode;
 400
 401                        if (tmp & ~SPI_MODE_MASK) {
 402                                retval = -EINVAL;
 403                                break;
 404                        }
 405
 406                        tmp |= spi->mode & ~SPI_MODE_MASK;
 407                        spi->mode = (u16)tmp;
 408                        retval = spi_setup(spi);
 409                        if (retval < 0)
 410                                spi->mode = save;
 411                        else
 412                                dev_dbg(&spi->dev, "spi mode %x\n", tmp);
 413                }
 414                break;
 415        case SPI_IOC_WR_LSB_FIRST:
 416                retval = get_user(tmp, (__u8 __user *)arg);
 417                if (retval == 0) {
 418                        u32     save = spi->mode;
 419
 420                        if (tmp)
 421                                spi->mode |= SPI_LSB_FIRST;
 422                        else
 423                                spi->mode &= ~SPI_LSB_FIRST;
 424                        retval = spi_setup(spi);
 425                        if (retval < 0)
 426                                spi->mode = save;
 427                        else
 428                                dev_dbg(&spi->dev, "%csb first\n",
 429                                                tmp ? 'l' : 'm');
 430                }
 431                break;
 432        case SPI_IOC_WR_BITS_PER_WORD:
 433                retval = get_user(tmp, (__u8 __user *)arg);
 434                if (retval == 0) {
 435                        u8      save = spi->bits_per_word;
 436
 437                        spi->bits_per_word = tmp;
 438                        retval = spi_setup(spi);
 439                        if (retval < 0)
 440                                spi->bits_per_word = save;
 441                        else
 442                                dev_dbg(&spi->dev, "%d bits per word\n", tmp);
 443                }
 444                break;
 445        case SPI_IOC_WR_MAX_SPEED_HZ:
 446                retval = get_user(tmp, (__u32 __user *)arg);
 447                if (retval == 0) {
 448                        u32     save = spi->max_speed_hz;
 449
 450                        spi->max_speed_hz = tmp;
 451                        retval = spi_setup(spi);
 452                        if (retval >= 0)
 453                                spidev->speed_hz = tmp;
 454                        else
 455                                dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
 456                        spi->max_speed_hz = save;
 457                }
 458                break;
 459
 460        default:
 461                /* segmented and/or full-duplex I/O request */
 462                /* Check message and copy into scratch area */
 463                ioc = spidev_get_ioc_message(cmd,
 464                                (struct spi_ioc_transfer __user *)arg, &n_ioc);
 465                if (IS_ERR(ioc)) {
 466                        retval = PTR_ERR(ioc);
 467                        break;
 468                }
 469                if (!ioc)
 470                        break;  /* n_ioc is also 0 */
 471
 472                /* translate to spi_message, execute */
 473                retval = spidev_message(spidev, ioc, n_ioc);
 474                kfree(ioc);
 475                break;
 476        }
 477
 478        mutex_unlock(&spidev->buf_lock);
 479        spi_dev_put(spi);
 480        return retval;
 481}
 482
 483#ifdef CONFIG_COMPAT
 484static long
 485spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
 486                unsigned long arg)
 487{
 488        struct spi_ioc_transfer __user  *u_ioc;
 489        int                             retval = 0;
 490        struct spidev_data              *spidev;
 491        struct spi_device               *spi;
 492        unsigned                        n_ioc, n;
 493        struct spi_ioc_transfer         *ioc;
 494
 495        u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
 496
 497        /* guard against device removal before, or while,
 498         * we issue this ioctl.
 499         */
 500        spidev = filp->private_data;
 501        spin_lock_irq(&spidev->spi_lock);
 502        spi = spi_dev_get(spidev->spi);
 503        spin_unlock_irq(&spidev->spi_lock);
 504
 505        if (spi == NULL)
 506                return -ESHUTDOWN;
 507
 508        /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
 509        mutex_lock(&spidev->buf_lock);
 510
 511        /* Check message and copy into scratch area */
 512        ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
 513        if (IS_ERR(ioc)) {
 514                retval = PTR_ERR(ioc);
 515                goto done;
 516        }
 517        if (!ioc)
 518                goto done;      /* n_ioc is also 0 */
 519
 520        /* Convert buffer pointers */
 521        for (n = 0; n < n_ioc; n++) {
 522                ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
 523                ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
 524        }
 525
 526        /* translate to spi_message, execute */
 527        retval = spidev_message(spidev, ioc, n_ioc);
 528        kfree(ioc);
 529
 530done:
 531        mutex_unlock(&spidev->buf_lock);
 532        spi_dev_put(spi);
 533        return retval;
 534}
 535
 536static long
 537spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 538{
 539        if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
 540                        && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
 541                        && _IOC_DIR(cmd) == _IOC_WRITE)
 542                return spidev_compat_ioc_message(filp, cmd, arg);
 543
 544        return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
 545}
 546#else
 547#define spidev_compat_ioctl NULL
 548#endif /* CONFIG_COMPAT */
 549
 550static int spidev_open(struct inode *inode, struct file *filp)
 551{
 552        struct spidev_data      *spidev;
 553        int                     status = -ENXIO;
 554
 555        mutex_lock(&device_list_lock);
 556
 557        list_for_each_entry(spidev, &device_list, device_entry) {
 558                if (spidev->devt == inode->i_rdev) {
 559                        status = 0;
 560                        break;
 561                }
 562        }
 563
 564        if (status) {
 565                pr_debug("spidev: nothing for minor %d\n", iminor(inode));
 566                goto err_find_dev;
 567        }
 568
 569        if (!spidev->tx_buffer) {
 570                spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 571                if (!spidev->tx_buffer) {
 572                        dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 573                        status = -ENOMEM;
 574                        goto err_find_dev;
 575                }
 576        }
 577
 578        if (!spidev->rx_buffer) {
 579                spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 580                if (!spidev->rx_buffer) {
 581                        dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 582                        status = -ENOMEM;
 583                        goto err_alloc_rx_buf;
 584                }
 585        }
 586
 587        spidev->users++;
 588        filp->private_data = spidev;
 589        stream_open(inode, filp);
 590
 591        mutex_unlock(&device_list_lock);
 592        return 0;
 593
 594err_alloc_rx_buf:
 595        kfree(spidev->tx_buffer);
 596        spidev->tx_buffer = NULL;
 597err_find_dev:
 598        mutex_unlock(&device_list_lock);
 599        return status;
 600}
 601
 602static int spidev_release(struct inode *inode, struct file *filp)
 603{
 604        struct spidev_data      *spidev;
 605
 606        mutex_lock(&device_list_lock);
 607        spidev = filp->private_data;
 608        filp->private_data = NULL;
 609
 610        /* last close? */
 611        spidev->users--;
 612        if (!spidev->users) {
 613                int             dofree;
 614
 615                kfree(spidev->tx_buffer);
 616                spidev->tx_buffer = NULL;
 617
 618                kfree(spidev->rx_buffer);
 619                spidev->rx_buffer = NULL;
 620
 621                spin_lock_irq(&spidev->spi_lock);
 622                if (spidev->spi)
 623                        spidev->speed_hz = spidev->spi->max_speed_hz;
 624
 625                /* ... after we unbound from the underlying device? */
 626                dofree = (spidev->spi == NULL);
 627                spin_unlock_irq(&spidev->spi_lock);
 628
 629                if (dofree)
 630                        kfree(spidev);
 631        }
 632#ifdef CONFIG_SPI_SLAVE
 633        spi_slave_abort(spidev->spi);
 634#endif
 635        mutex_unlock(&device_list_lock);
 636
 637        return 0;
 638}
 639
 640static const struct file_operations spidev_fops = {
 641        .owner =        THIS_MODULE,
 642        /* REVISIT switch to aio primitives, so that userspace
 643         * gets more complete API coverage.  It'll simplify things
 644         * too, except for the locking.
 645         */
 646        .write =        spidev_write,
 647        .read =         spidev_read,
 648        .unlocked_ioctl = spidev_ioctl,
 649        .compat_ioctl = spidev_compat_ioctl,
 650        .open =         spidev_open,
 651        .release =      spidev_release,
 652        .llseek =       no_llseek,
 653};
 654
 655/*-------------------------------------------------------------------------*/
 656
 657/* The main reason to have this class is to make mdev/udev create the
 658 * /dev/spidevB.C character device nodes exposing our userspace API.
 659 * It also simplifies memory management.
 660 */
 661
 662static struct class *spidev_class;
 663
 664#ifdef CONFIG_OF
 665static const struct of_device_id spidev_dt_ids[] = {
 666        { .compatible = "rohm,dh2228fv" },
 667        { .compatible = "lineartechnology,ltc2488" },
 668        { .compatible = "ge,achc" },
 669        { .compatible = "semtech,sx1301" },
 670        { .compatible = "lwn,bk4" },
 671        { .compatible = "dh,dhcom-board" },
 672        { .compatible = "menlo,m53cpld" },
 673        {},
 674};
 675MODULE_DEVICE_TABLE(of, spidev_dt_ids);
 676#endif
 677
 678#ifdef CONFIG_ACPI
 679
 680/* Dummy SPI devices not to be used in production systems */
 681#define SPIDEV_ACPI_DUMMY       1
 682
 683static const struct acpi_device_id spidev_acpi_ids[] = {
 684        /*
 685         * The ACPI SPT000* devices are only meant for development and
 686         * testing. Systems used in production should have a proper ACPI
 687         * description of the connected peripheral and they should also use
 688         * a proper driver instead of poking directly to the SPI bus.
 689         */
 690        { "SPT0001", SPIDEV_ACPI_DUMMY },
 691        { "SPT0002", SPIDEV_ACPI_DUMMY },
 692        { "SPT0003", SPIDEV_ACPI_DUMMY },
 693        {},
 694};
 695MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
 696
 697static void spidev_probe_acpi(struct spi_device *spi)
 698{
 699        const struct acpi_device_id *id;
 700
 701        if (!has_acpi_companion(&spi->dev))
 702                return;
 703
 704        id = acpi_match_device(spidev_acpi_ids, &spi->dev);
 705        if (WARN_ON(!id))
 706                return;
 707
 708        if (id->driver_data == SPIDEV_ACPI_DUMMY)
 709                dev_warn(&spi->dev, "do not use this driver in production systems!\n");
 710}
 711#else
 712static inline void spidev_probe_acpi(struct spi_device *spi) {}
 713#endif
 714
 715/*-------------------------------------------------------------------------*/
 716
 717static int spidev_probe(struct spi_device *spi)
 718{
 719        struct spidev_data      *spidev;
 720        int                     status;
 721        unsigned long           minor;
 722
 723        /*
 724         * spidev should never be referenced in DT without a specific
 725         * compatible string, it is a Linux implementation thing
 726         * rather than a description of the hardware.
 727         */
 728        WARN(spi->dev.of_node &&
 729             of_device_is_compatible(spi->dev.of_node, "spidev"),
 730             "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
 731
 732        spidev_probe_acpi(spi);
 733
 734        /* Allocate driver data */
 735        spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
 736        if (!spidev)
 737                return -ENOMEM;
 738
 739        /* Initialize the driver data */
 740        spidev->spi = spi;
 741        spin_lock_init(&spidev->spi_lock);
 742        mutex_init(&spidev->buf_lock);
 743
 744        INIT_LIST_HEAD(&spidev->device_entry);
 745
 746        /* If we can allocate a minor number, hook up this device.
 747         * Reusing minors is fine so long as udev or mdev is working.
 748         */
 749        mutex_lock(&device_list_lock);
 750        minor = find_first_zero_bit(minors, N_SPI_MINORS);
 751        if (minor < N_SPI_MINORS) {
 752                struct device *dev;
 753
 754                spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
 755                dev = device_create(spidev_class, &spi->dev, spidev->devt,
 756                                    spidev, "spidev%d.%d",
 757                                    spi->master->bus_num, spi->chip_select);
 758                status = PTR_ERR_OR_ZERO(dev);
 759        } else {
 760                dev_dbg(&spi->dev, "no minor number available!\n");
 761                status = -ENODEV;
 762        }
 763        if (status == 0) {
 764                set_bit(minor, minors);
 765                list_add(&spidev->device_entry, &device_list);
 766        }
 767        mutex_unlock(&device_list_lock);
 768
 769        spidev->speed_hz = spi->max_speed_hz;
 770
 771        if (status == 0)
 772                spi_set_drvdata(spi, spidev);
 773        else
 774                kfree(spidev);
 775
 776        return status;
 777}
 778
 779static int spidev_remove(struct spi_device *spi)
 780{
 781        struct spidev_data      *spidev = spi_get_drvdata(spi);
 782
 783        /* make sure ops on existing fds can abort cleanly */
 784        spin_lock_irq(&spidev->spi_lock);
 785        spidev->spi = NULL;
 786        spin_unlock_irq(&spidev->spi_lock);
 787
 788        /* prevent new opens */
 789        mutex_lock(&device_list_lock);
 790        list_del(&spidev->device_entry);
 791        device_destroy(spidev_class, spidev->devt);
 792        clear_bit(MINOR(spidev->devt), minors);
 793        if (spidev->users == 0)
 794                kfree(spidev);
 795        mutex_unlock(&device_list_lock);
 796
 797        return 0;
 798}
 799
 800static struct spi_driver spidev_spi_driver = {
 801        .driver = {
 802                .name =         "spidev",
 803                .of_match_table = of_match_ptr(spidev_dt_ids),
 804                .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
 805        },
 806        .probe =        spidev_probe,
 807        .remove =       spidev_remove,
 808
 809        /* NOTE:  suspend/resume methods are not necessary here.
 810         * We don't do anything except pass the requests to/from
 811         * the underlying controller.  The refrigerator handles
 812         * most issues; the controller driver handles the rest.
 813         */
 814};
 815
 816/*-------------------------------------------------------------------------*/
 817
 818static int __init spidev_init(void)
 819{
 820        int status;
 821
 822        /* Claim our 256 reserved device numbers.  Then register a class
 823         * that will key udev/mdev to add/remove /dev nodes.  Last, register
 824         * the driver which manages those device numbers.
 825         */
 826        BUILD_BUG_ON(N_SPI_MINORS > 256);
 827        status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
 828        if (status < 0)
 829                return status;
 830
 831        spidev_class = class_create(THIS_MODULE, "spidev");
 832        if (IS_ERR(spidev_class)) {
 833                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 834                return PTR_ERR(spidev_class);
 835        }
 836
 837        status = spi_register_driver(&spidev_spi_driver);
 838        if (status < 0) {
 839                class_destroy(spidev_class);
 840                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 841        }
 842        return status;
 843}
 844module_init(spidev_init);
 845
 846static void __exit spidev_exit(void)
 847{
 848        spi_unregister_driver(&spidev_spi_driver);
 849        class_destroy(spidev_class);
 850        unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 851}
 852module_exit(spidev_exit);
 853
 854MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
 855MODULE_DESCRIPTION("User mode SPI device interface");
 856MODULE_LICENSE("GPL");
 857MODULE_ALIAS("spi:spidev");
 858