linux/drivers/spi/spidev.c
<<
>>
Prefs
   1/*
   2 * Simple synchronous userspace interface to SPI devices
   3 *
   4 * Copyright (C) 2006 SWAPP
   5 *      Andrea Paterniani <a.paterniani@swapp-eng.it>
   6 * Copyright (C) 2007 David Brownell (simplification, cleanup)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 */
  18
  19#include <linux/init.h>
  20#include <linux/module.h>
  21#include <linux/ioctl.h>
  22#include <linux/fs.h>
  23#include <linux/device.h>
  24#include <linux/err.h>
  25#include <linux/list.h>
  26#include <linux/errno.h>
  27#include <linux/mutex.h>
  28#include <linux/slab.h>
  29#include <linux/compat.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32
  33#include <linux/spi/spi.h>
  34#include <linux/spi/spidev.h>
  35
  36#include <linux/uaccess.h>
  37
  38
  39/*
  40 * This supports access to SPI devices using normal userspace I/O calls.
  41 * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  42 * and often mask message boundaries, full SPI support requires full duplex
  43 * transfers.  There are several kinds of internal message boundaries to
  44 * handle chipselect management and other protocol options.
  45 *
  46 * SPI has a character major number assigned.  We allocate minor numbers
  47 * dynamically using a bitmask.  You must use hotplug tools, such as udev
  48 * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
  49 * nodes, since there is no fixed association of minor numbers with any
  50 * particular SPI bus or device.
  51 */
  52#define SPIDEV_MAJOR                    153     /* assigned */
  53#define N_SPI_MINORS                    32      /* ... up to 256 */
  54
  55static DECLARE_BITMAP(minors, N_SPI_MINORS);
  56
  57
  58/* Bit masks for spi_device.mode management.  Note that incorrect
  59 * settings for some settings can cause *lots* of trouble for other
  60 * devices on a shared bus:
  61 *
  62 *  - CS_HIGH ... this device will be active when it shouldn't be
  63 *  - 3WIRE ... when active, it won't behave as it should
  64 *  - NO_CS ... there will be no explicit message boundaries; this
  65 *      is completely incompatible with the shared bus model
  66 *  - READY ... transfers may proceed when they shouldn't.
  67 *
  68 * REVISIT should changing those flags be privileged?
  69 */
  70#define SPI_MODE_MASK           (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
  71                                | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
  72                                | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
  73                                | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
  74
  75struct spidev_data {
  76        dev_t                   devt;
  77        spinlock_t              spi_lock;
  78        struct spi_device       *spi;
  79        struct list_head        device_entry;
  80
  81        /* TX/RX buffers are NULL unless this device is open (users > 0) */
  82        struct mutex            buf_lock;
  83        unsigned                users;
  84        u8                      *tx_buffer;
  85        u8                      *rx_buffer;
  86        u32                     speed_hz;
  87};
  88
  89static LIST_HEAD(device_list);
  90static DEFINE_MUTEX(device_list_lock);
  91
  92static unsigned bufsiz = 4096;
  93module_param(bufsiz, uint, S_IRUGO);
  94MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
  95
  96/*-------------------------------------------------------------------------*/
  97
  98/*
  99 * We can't use the standard synchronous wrappers for file I/O; we
 100 * need to protect against async removal of the underlying spi_device.
 101 */
 102static void spidev_complete(void *arg)
 103{
 104        complete(arg);
 105}
 106
 107static ssize_t
 108spidev_sync(struct spidev_data *spidev, struct spi_message *message)
 109{
 110        DECLARE_COMPLETION_ONSTACK(done);
 111        int status;
 112
 113        message->complete = spidev_complete;
 114        message->context = &done;
 115
 116        spin_lock_irq(&spidev->spi_lock);
 117        if (spidev->spi == NULL)
 118                status = -ESHUTDOWN;
 119        else
 120                status = spi_async(spidev->spi, message);
 121        spin_unlock_irq(&spidev->spi_lock);
 122
 123        if (status == 0) {
 124                wait_for_completion(&done);
 125                status = message->status;
 126                if (status == 0)
 127                        status = message->actual_length;
 128        }
 129        return status;
 130}
 131
 132static inline ssize_t
 133spidev_sync_write(struct spidev_data *spidev, size_t len)
 134{
 135        struct spi_transfer     t = {
 136                        .tx_buf         = spidev->tx_buffer,
 137                        .len            = len,
 138                        .speed_hz       = spidev->speed_hz,
 139                };
 140        struct spi_message      m;
 141
 142        spi_message_init(&m);
 143        spi_message_add_tail(&t, &m);
 144        return spidev_sync(spidev, &m);
 145}
 146
 147static inline ssize_t
 148spidev_sync_read(struct spidev_data *spidev, size_t len)
 149{
 150        struct spi_transfer     t = {
 151                        .rx_buf         = spidev->rx_buffer,
 152                        .len            = len,
 153                        .speed_hz       = spidev->speed_hz,
 154                };
 155        struct spi_message      m;
 156
 157        spi_message_init(&m);
 158        spi_message_add_tail(&t, &m);
 159        return spidev_sync(spidev, &m);
 160}
 161
 162/*-------------------------------------------------------------------------*/
 163
 164/* Read-only message with current device setup */
 165static ssize_t
 166spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
 167{
 168        struct spidev_data      *spidev;
 169        ssize_t                 status = 0;
 170
 171        /* chipselect only toggles at start or end of operation */
 172        if (count > bufsiz)
 173                return -EMSGSIZE;
 174
 175        spidev = filp->private_data;
 176
 177        mutex_lock(&spidev->buf_lock);
 178        status = spidev_sync_read(spidev, count);
 179        if (status > 0) {
 180                unsigned long   missing;
 181
 182                missing = copy_to_user(buf, spidev->rx_buffer, status);
 183                if (missing == status)
 184                        status = -EFAULT;
 185                else
 186                        status = status - missing;
 187        }
 188        mutex_unlock(&spidev->buf_lock);
 189
 190        return status;
 191}
 192
 193/* Write-only message with current device setup */
 194static ssize_t
 195spidev_write(struct file *filp, const char __user *buf,
 196                size_t count, loff_t *f_pos)
 197{
 198        struct spidev_data      *spidev;
 199        ssize_t                 status = 0;
 200        unsigned long           missing;
 201
 202        /* chipselect only toggles at start or end of operation */
 203        if (count > bufsiz)
 204                return -EMSGSIZE;
 205
 206        spidev = filp->private_data;
 207
 208        mutex_lock(&spidev->buf_lock);
 209        missing = copy_from_user(spidev->tx_buffer, buf, count);
 210        if (missing == 0)
 211                status = spidev_sync_write(spidev, count);
 212        else
 213                status = -EFAULT;
 214        mutex_unlock(&spidev->buf_lock);
 215
 216        return status;
 217}
 218
 219static int spidev_message(struct spidev_data *spidev,
 220                struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
 221{
 222        struct spi_message      msg;
 223        struct spi_transfer     *k_xfers;
 224        struct spi_transfer     *k_tmp;
 225        struct spi_ioc_transfer *u_tmp;
 226        unsigned                n, total, tx_total, rx_total;
 227        u8                      *tx_buf, *rx_buf;
 228        int                     status = -EFAULT;
 229
 230        spi_message_init(&msg);
 231        k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
 232        if (k_xfers == NULL)
 233                return -ENOMEM;
 234
 235        /* Construct spi_message, copying any tx data to bounce buffer.
 236         * We walk the array of user-provided transfers, using each one
 237         * to initialize a kernel version of the same transfer.
 238         */
 239        tx_buf = spidev->tx_buffer;
 240        rx_buf = spidev->rx_buffer;
 241        total = 0;
 242        tx_total = 0;
 243        rx_total = 0;
 244        for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
 245                        n;
 246                        n--, k_tmp++, u_tmp++) {
 247                k_tmp->len = u_tmp->len;
 248
 249                total += k_tmp->len;
 250                /* Since the function returns the total length of transfers
 251                 * on success, restrict the total to positive int values to
 252                 * avoid the return value looking like an error.  Also check
 253                 * each transfer length to avoid arithmetic overflow.
 254                 */
 255                if (total > INT_MAX || k_tmp->len > INT_MAX) {
 256                        status = -EMSGSIZE;
 257                        goto done;
 258                }
 259
 260                if (u_tmp->rx_buf) {
 261                        /* this transfer needs space in RX bounce buffer */
 262                        rx_total += k_tmp->len;
 263                        if (rx_total > bufsiz) {
 264                                status = -EMSGSIZE;
 265                                goto done;
 266                        }
 267                        k_tmp->rx_buf = rx_buf;
 268                        if (!access_ok(VERIFY_WRITE, (u8 __user *)
 269                                                (uintptr_t) u_tmp->rx_buf,
 270                                                u_tmp->len))
 271                                goto done;
 272                        rx_buf += k_tmp->len;
 273                }
 274                if (u_tmp->tx_buf) {
 275                        /* this transfer needs space in TX bounce buffer */
 276                        tx_total += k_tmp->len;
 277                        if (tx_total > bufsiz) {
 278                                status = -EMSGSIZE;
 279                                goto done;
 280                        }
 281                        k_tmp->tx_buf = tx_buf;
 282                        if (copy_from_user(tx_buf, (const u8 __user *)
 283                                                (uintptr_t) u_tmp->tx_buf,
 284                                        u_tmp->len))
 285                                goto done;
 286                        tx_buf += k_tmp->len;
 287                }
 288
 289                k_tmp->cs_change = !!u_tmp->cs_change;
 290                k_tmp->tx_nbits = u_tmp->tx_nbits;
 291                k_tmp->rx_nbits = u_tmp->rx_nbits;
 292                k_tmp->bits_per_word = u_tmp->bits_per_word;
 293                k_tmp->delay_usecs = u_tmp->delay_usecs;
 294                k_tmp->speed_hz = u_tmp->speed_hz;
 295                if (!k_tmp->speed_hz)
 296                        k_tmp->speed_hz = spidev->speed_hz;
 297#ifdef VERBOSE
 298                dev_dbg(&spidev->spi->dev,
 299                        "  xfer len %zd %s%s%s%dbits %u usec %uHz\n",
 300                        u_tmp->len,
 301                        u_tmp->rx_buf ? "rx " : "",
 302                        u_tmp->tx_buf ? "tx " : "",
 303                        u_tmp->cs_change ? "cs " : "",
 304                        u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
 305                        u_tmp->delay_usecs,
 306                        u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
 307#endif
 308                spi_message_add_tail(k_tmp, &msg);
 309        }
 310
 311        status = spidev_sync(spidev, &msg);
 312        if (status < 0)
 313                goto done;
 314
 315        /* copy any rx data out of bounce buffer */
 316        rx_buf = spidev->rx_buffer;
 317        for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
 318                if (u_tmp->rx_buf) {
 319                        if (__copy_to_user((u8 __user *)
 320                                        (uintptr_t) u_tmp->rx_buf, rx_buf,
 321                                        u_tmp->len)) {
 322                                status = -EFAULT;
 323                                goto done;
 324                        }
 325                        rx_buf += u_tmp->len;
 326                }
 327        }
 328        status = total;
 329
 330done:
 331        kfree(k_xfers);
 332        return status;
 333}
 334
 335static struct spi_ioc_transfer *
 336spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
 337                unsigned *n_ioc)
 338{
 339        struct spi_ioc_transfer *ioc;
 340        u32     tmp;
 341
 342        /* Check type, command number and direction */
 343        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
 344                        || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
 345                        || _IOC_DIR(cmd) != _IOC_WRITE)
 346                return ERR_PTR(-ENOTTY);
 347
 348        tmp = _IOC_SIZE(cmd);
 349        if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
 350                return ERR_PTR(-EINVAL);
 351        *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
 352        if (*n_ioc == 0)
 353                return NULL;
 354
 355        /* copy into scratch area */
 356        ioc = kmalloc(tmp, GFP_KERNEL);
 357        if (!ioc)
 358                return ERR_PTR(-ENOMEM);
 359        if (__copy_from_user(ioc, u_ioc, tmp)) {
 360                kfree(ioc);
 361                return ERR_PTR(-EFAULT);
 362        }
 363        return ioc;
 364}
 365
 366static long
 367spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 368{
 369        int                     err = 0;
 370        int                     retval = 0;
 371        struct spidev_data      *spidev;
 372        struct spi_device       *spi;
 373        u32                     tmp;
 374        unsigned                n_ioc;
 375        struct spi_ioc_transfer *ioc;
 376
 377        /* Check type and command number */
 378        if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
 379                return -ENOTTY;
 380
 381        /* Check access direction once here; don't repeat below.
 382         * IOC_DIR is from the user perspective, while access_ok is
 383         * from the kernel perspective; so they look reversed.
 384         */
 385        if (_IOC_DIR(cmd) & _IOC_READ)
 386                err = !access_ok(VERIFY_WRITE,
 387                                (void __user *)arg, _IOC_SIZE(cmd));
 388        if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
 389                err = !access_ok(VERIFY_READ,
 390                                (void __user *)arg, _IOC_SIZE(cmd));
 391        if (err)
 392                return -EFAULT;
 393
 394        /* guard against device removal before, or while,
 395         * we issue this ioctl.
 396         */
 397        spidev = filp->private_data;
 398        spin_lock_irq(&spidev->spi_lock);
 399        spi = spi_dev_get(spidev->spi);
 400        spin_unlock_irq(&spidev->spi_lock);
 401
 402        if (spi == NULL)
 403                return -ESHUTDOWN;
 404
 405        /* use the buffer lock here for triple duty:
 406         *  - prevent I/O (from us) so calling spi_setup() is safe;
 407         *  - prevent concurrent SPI_IOC_WR_* from morphing
 408         *    data fields while SPI_IOC_RD_* reads them;
 409         *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
 410         */
 411        mutex_lock(&spidev->buf_lock);
 412
 413        switch (cmd) {
 414        /* read requests */
 415        case SPI_IOC_RD_MODE:
 416                retval = __put_user(spi->mode & SPI_MODE_MASK,
 417                                        (__u8 __user *)arg);
 418                break;
 419        case SPI_IOC_RD_MODE32:
 420                retval = __put_user(spi->mode & SPI_MODE_MASK,
 421                                        (__u32 __user *)arg);
 422                break;
 423        case SPI_IOC_RD_LSB_FIRST:
 424                retval = __put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
 425                                        (__u8 __user *)arg);
 426                break;
 427        case SPI_IOC_RD_BITS_PER_WORD:
 428                retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
 429                break;
 430        case SPI_IOC_RD_MAX_SPEED_HZ:
 431                retval = __put_user(spidev->speed_hz, (__u32 __user *)arg);
 432                break;
 433
 434        /* write requests */
 435        case SPI_IOC_WR_MODE:
 436        case SPI_IOC_WR_MODE32:
 437                if (cmd == SPI_IOC_WR_MODE)
 438                        retval = __get_user(tmp, (u8 __user *)arg);
 439                else
 440                        retval = __get_user(tmp, (u32 __user *)arg);
 441                if (retval == 0) {
 442                        u32     save = spi->mode;
 443
 444                        if (tmp & ~SPI_MODE_MASK) {
 445                                retval = -EINVAL;
 446                                break;
 447                        }
 448
 449                        tmp |= spi->mode & ~SPI_MODE_MASK;
 450                        spi->mode = (u16)tmp;
 451                        retval = spi_setup(spi);
 452                        if (retval < 0)
 453                                spi->mode = save;
 454                        else
 455                                dev_dbg(&spi->dev, "spi mode %x\n", tmp);
 456                }
 457                break;
 458        case SPI_IOC_WR_LSB_FIRST:
 459                retval = __get_user(tmp, (__u8 __user *)arg);
 460                if (retval == 0) {
 461                        u32     save = spi->mode;
 462
 463                        if (tmp)
 464                                spi->mode |= SPI_LSB_FIRST;
 465                        else
 466                                spi->mode &= ~SPI_LSB_FIRST;
 467                        retval = spi_setup(spi);
 468                        if (retval < 0)
 469                                spi->mode = save;
 470                        else
 471                                dev_dbg(&spi->dev, "%csb first\n",
 472                                                tmp ? 'l' : 'm');
 473                }
 474                break;
 475        case SPI_IOC_WR_BITS_PER_WORD:
 476                retval = __get_user(tmp, (__u8 __user *)arg);
 477                if (retval == 0) {
 478                        u8      save = spi->bits_per_word;
 479
 480                        spi->bits_per_word = tmp;
 481                        retval = spi_setup(spi);
 482                        if (retval < 0)
 483                                spi->bits_per_word = save;
 484                        else
 485                                dev_dbg(&spi->dev, "%d bits per word\n", tmp);
 486                }
 487                break;
 488        case SPI_IOC_WR_MAX_SPEED_HZ:
 489                retval = __get_user(tmp, (__u32 __user *)arg);
 490                if (retval == 0) {
 491                        u32     save = spi->max_speed_hz;
 492
 493                        spi->max_speed_hz = tmp;
 494                        retval = spi_setup(spi);
 495                        if (retval >= 0)
 496                                spidev->speed_hz = tmp;
 497                        else
 498                                dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
 499                        spi->max_speed_hz = save;
 500                }
 501                break;
 502
 503        default:
 504                /* segmented and/or full-duplex I/O request */
 505                /* Check message and copy into scratch area */
 506                ioc = spidev_get_ioc_message(cmd,
 507                                (struct spi_ioc_transfer __user *)arg, &n_ioc);
 508                if (IS_ERR(ioc)) {
 509                        retval = PTR_ERR(ioc);
 510                        break;
 511                }
 512                if (!ioc)
 513                        break;  /* n_ioc is also 0 */
 514
 515                /* translate to spi_message, execute */
 516                retval = spidev_message(spidev, ioc, n_ioc);
 517                kfree(ioc);
 518                break;
 519        }
 520
 521        mutex_unlock(&spidev->buf_lock);
 522        spi_dev_put(spi);
 523        return retval;
 524}
 525
 526#ifdef CONFIG_COMPAT
 527static long
 528spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
 529                unsigned long arg)
 530{
 531        struct spi_ioc_transfer __user  *u_ioc;
 532        int                             retval = 0;
 533        struct spidev_data              *spidev;
 534        struct spi_device               *spi;
 535        unsigned                        n_ioc, n;
 536        struct spi_ioc_transfer         *ioc;
 537
 538        u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
 539        if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
 540                return -EFAULT;
 541
 542        /* guard against device removal before, or while,
 543         * we issue this ioctl.
 544         */
 545        spidev = filp->private_data;
 546        spin_lock_irq(&spidev->spi_lock);
 547        spi = spi_dev_get(spidev->spi);
 548        spin_unlock_irq(&spidev->spi_lock);
 549
 550        if (spi == NULL)
 551                return -ESHUTDOWN;
 552
 553        /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
 554        mutex_lock(&spidev->buf_lock);
 555
 556        /* Check message and copy into scratch area */
 557        ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
 558        if (IS_ERR(ioc)) {
 559                retval = PTR_ERR(ioc);
 560                goto done;
 561        }
 562        if (!ioc)
 563                goto done;      /* n_ioc is also 0 */
 564
 565        /* Convert buffer pointers */
 566        for (n = 0; n < n_ioc; n++) {
 567                ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
 568                ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
 569        }
 570
 571        /* translate to spi_message, execute */
 572        retval = spidev_message(spidev, ioc, n_ioc);
 573        kfree(ioc);
 574
 575done:
 576        mutex_unlock(&spidev->buf_lock);
 577        spi_dev_put(spi);
 578        return retval;
 579}
 580
 581static long
 582spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 583{
 584        if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
 585                        && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
 586                        && _IOC_DIR(cmd) == _IOC_WRITE)
 587                return spidev_compat_ioc_message(filp, cmd, arg);
 588
 589        return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
 590}
 591#else
 592#define spidev_compat_ioctl NULL
 593#endif /* CONFIG_COMPAT */
 594
 595static int spidev_open(struct inode *inode, struct file *filp)
 596{
 597        struct spidev_data      *spidev;
 598        int                     status = -ENXIO;
 599
 600        mutex_lock(&device_list_lock);
 601
 602        list_for_each_entry(spidev, &device_list, device_entry) {
 603                if (spidev->devt == inode->i_rdev) {
 604                        status = 0;
 605                        break;
 606                }
 607        }
 608
 609        if (status) {
 610                pr_debug("spidev: nothing for minor %d\n", iminor(inode));
 611                goto err_find_dev;
 612        }
 613
 614        if (!spidev->tx_buffer) {
 615                spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 616                if (!spidev->tx_buffer) {
 617                                dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 618                                status = -ENOMEM;
 619                        goto err_find_dev;
 620                        }
 621                }
 622
 623        if (!spidev->rx_buffer) {
 624                spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 625                if (!spidev->rx_buffer) {
 626                        dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
 627                        status = -ENOMEM;
 628                        goto err_alloc_rx_buf;
 629                }
 630        }
 631
 632        spidev->users++;
 633        filp->private_data = spidev;
 634        nonseekable_open(inode, filp);
 635
 636        mutex_unlock(&device_list_lock);
 637        return 0;
 638
 639err_alloc_rx_buf:
 640        kfree(spidev->tx_buffer);
 641        spidev->tx_buffer = NULL;
 642err_find_dev:
 643        mutex_unlock(&device_list_lock);
 644        return status;
 645}
 646
 647static int spidev_release(struct inode *inode, struct file *filp)
 648{
 649        struct spidev_data      *spidev;
 650        int                     status = 0;
 651
 652        mutex_lock(&device_list_lock);
 653        spidev = filp->private_data;
 654        filp->private_data = NULL;
 655
 656        /* last close? */
 657        spidev->users--;
 658        if (!spidev->users) {
 659                int             dofree;
 660
 661                kfree(spidev->tx_buffer);
 662                spidev->tx_buffer = NULL;
 663
 664                kfree(spidev->rx_buffer);
 665                spidev->rx_buffer = NULL;
 666
 667                spidev->speed_hz = spidev->spi->max_speed_hz;
 668
 669                /* ... after we unbound from the underlying device? */
 670                spin_lock_irq(&spidev->spi_lock);
 671                dofree = (spidev->spi == NULL);
 672                spin_unlock_irq(&spidev->spi_lock);
 673
 674                if (dofree)
 675                        kfree(spidev);
 676        }
 677        mutex_unlock(&device_list_lock);
 678
 679        return status;
 680}
 681
 682static const struct file_operations spidev_fops = {
 683        .owner =        THIS_MODULE,
 684        /* REVISIT switch to aio primitives, so that userspace
 685         * gets more complete API coverage.  It'll simplify things
 686         * too, except for the locking.
 687         */
 688        .write =        spidev_write,
 689        .read =         spidev_read,
 690        .unlocked_ioctl = spidev_ioctl,
 691        .compat_ioctl = spidev_compat_ioctl,
 692        .open =         spidev_open,
 693        .release =      spidev_release,
 694        .llseek =       no_llseek,
 695};
 696
 697/*-------------------------------------------------------------------------*/
 698
 699/* The main reason to have this class is to make mdev/udev create the
 700 * /dev/spidevB.C character device nodes exposing our userspace API.
 701 * It also simplifies memory management.
 702 */
 703
 704static struct class *spidev_class;
 705
 706#ifdef CONFIG_OF
 707static const struct of_device_id spidev_dt_ids[] = {
 708        { .compatible = "rohm,dh2228fv" },
 709        {},
 710};
 711MODULE_DEVICE_TABLE(of, spidev_dt_ids);
 712#endif
 713
 714/*-------------------------------------------------------------------------*/
 715
 716static int spidev_probe(struct spi_device *spi)
 717{
 718        struct spidev_data      *spidev;
 719        int                     status;
 720        unsigned long           minor;
 721
 722        /*
 723         * spidev should never be referenced in DT without a specific
 724         * compatbile string, it is a Linux implementation thing
 725         * rather than a description of the hardware.
 726         */
 727        if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
 728                dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
 729                WARN_ON(spi->dev.of_node &&
 730                        !of_match_device(spidev_dt_ids, &spi->dev));
 731        }
 732
 733        /* Allocate driver data */
 734        spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
 735        if (!spidev)
 736                return -ENOMEM;
 737
 738        /* Initialize the driver data */
 739        spidev->spi = spi;
 740        spin_lock_init(&spidev->spi_lock);
 741        mutex_init(&spidev->buf_lock);
 742
 743        INIT_LIST_HEAD(&spidev->device_entry);
 744
 745        /* If we can allocate a minor number, hook up this device.
 746         * Reusing minors is fine so long as udev or mdev is working.
 747         */
 748        mutex_lock(&device_list_lock);
 749        minor = find_first_zero_bit(minors, N_SPI_MINORS);
 750        if (minor < N_SPI_MINORS) {
 751                struct device *dev;
 752
 753                spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
 754                dev = device_create(spidev_class, &spi->dev, spidev->devt,
 755                                    spidev, "spidev%d.%d",
 756                                    spi->master->bus_num, spi->chip_select);
 757                status = PTR_ERR_OR_ZERO(dev);
 758        } else {
 759                dev_dbg(&spi->dev, "no minor number available!\n");
 760                status = -ENODEV;
 761        }
 762        if (status == 0) {
 763                set_bit(minor, minors);
 764                list_add(&spidev->device_entry, &device_list);
 765        }
 766        mutex_unlock(&device_list_lock);
 767
 768        spidev->speed_hz = spi->max_speed_hz;
 769
 770        if (status == 0)
 771                spi_set_drvdata(spi, spidev);
 772        else
 773                kfree(spidev);
 774
 775        return status;
 776}
 777
 778static int spidev_remove(struct spi_device *spi)
 779{
 780        struct spidev_data      *spidev = spi_get_drvdata(spi);
 781
 782        /* make sure ops on existing fds can abort cleanly */
 783        spin_lock_irq(&spidev->spi_lock);
 784        spidev->spi = NULL;
 785        spin_unlock_irq(&spidev->spi_lock);
 786
 787        /* prevent new opens */
 788        mutex_lock(&device_list_lock);
 789        list_del(&spidev->device_entry);
 790        device_destroy(spidev_class, spidev->devt);
 791        clear_bit(MINOR(spidev->devt), minors);
 792        if (spidev->users == 0)
 793                kfree(spidev);
 794        mutex_unlock(&device_list_lock);
 795
 796        return 0;
 797}
 798
 799static struct spi_driver spidev_spi_driver = {
 800        .driver = {
 801                .name =         "spidev",
 802                .owner =        THIS_MODULE,
 803                .of_match_table = of_match_ptr(spidev_dt_ids),
 804        },
 805        .probe =        spidev_probe,
 806        .remove =       spidev_remove,
 807
 808        /* NOTE:  suspend/resume methods are not necessary here.
 809         * We don't do anything except pass the requests to/from
 810         * the underlying controller.  The refrigerator handles
 811         * most issues; the controller driver handles the rest.
 812         */
 813};
 814
 815/*-------------------------------------------------------------------------*/
 816
 817static int __init spidev_init(void)
 818{
 819        int status;
 820
 821        /* Claim our 256 reserved device numbers.  Then register a class
 822         * that will key udev/mdev to add/remove /dev nodes.  Last, register
 823         * the driver which manages those device numbers.
 824         */
 825        BUILD_BUG_ON(N_SPI_MINORS > 256);
 826        status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
 827        if (status < 0)
 828                return status;
 829
 830        spidev_class = class_create(THIS_MODULE, "spidev");
 831        if (IS_ERR(spidev_class)) {
 832                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 833                return PTR_ERR(spidev_class);
 834        }
 835
 836        status = spi_register_driver(&spidev_spi_driver);
 837        if (status < 0) {
 838                class_destroy(spidev_class);
 839                unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 840        }
 841        return status;
 842}
 843module_init(spidev_init);
 844
 845static void __exit spidev_exit(void)
 846{
 847        spi_unregister_driver(&spidev_spi_driver);
 848        class_destroy(spidev_class);
 849        unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 850}
 851module_exit(spidev_exit);
 852
 853MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
 854MODULE_DESCRIPTION("User mode SPI device interface");
 855MODULE_LICENSE("GPL");
 856MODULE_ALIAS("spi:spidev");
 857