linux/drivers/platform/goldfish/goldfish_pipe.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Intel, Inc.
   3 * Copyright (C) 2013 Intel, Inc.
   4 * Copyright (C) 2014 Linaro Limited
   5 * Copyright (C) 2011-2016 Google, Inc.
   6 *
   7 * This software is licensed under the terms of the GNU General Public
   8 * License version 2, as published by the Free Software Foundation, and
   9 * may be copied, distributed, and modified under those terms.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 */
  17
  18/* This source file contains the implementation of a special device driver
  19 * that intends to provide a *very* fast communication channel between the
  20 * guest system and the QEMU emulator.
  21 *
  22 * Usage from the guest is simply the following (error handling simplified):
  23 *
  24 *    int  fd = open("/dev/qemu_pipe",O_RDWR);
  25 *    .... write() or read() through the pipe.
  26 *
  27 * This driver doesn't deal with the exact protocol used during the session.
  28 * It is intended to be as simple as something like:
  29 *
  30 *    // do this _just_ after opening the fd to connect to a specific
  31 *    // emulator service.
  32 *    const char*  msg = "<pipename>";
  33 *    if (write(fd, msg, strlen(msg)+1) < 0) {
  34 *       ... could not connect to <pipename> service
  35 *       close(fd);
  36 *    }
  37 *
  38 *    // after this, simply read() and write() to communicate with the
  39 *    // service. Exact protocol details left as an exercise to the reader.
  40 *
  41 * This driver is very fast because it doesn't copy any data through
  42 * intermediate buffers, since the emulator is capable of translating
  43 * guest user addresses into host ones.
  44 *
  45 * Note that we must however ensure that each user page involved in the
  46 * exchange is properly mapped during a transfer.
  47 */
  48
  49
  50#include <linux/module.h>
  51#include <linux/interrupt.h>
  52#include <linux/kernel.h>
  53#include <linux/spinlock.h>
  54#include <linux/miscdevice.h>
  55#include <linux/platform_device.h>
  56#include <linux/poll.h>
  57#include <linux/sched.h>
  58#include <linux/bitops.h>
  59#include <linux/slab.h>
  60#include <linux/io.h>
  61#include <linux/goldfish.h>
  62#include <linux/dma-mapping.h>
  63#include <linux/mm.h>
  64#include <linux/acpi.h>
  65
  66/*
  67 * Update this when something changes in the driver's behavior so the host
  68 * can benefit from knowing it
  69 */
  70enum {
  71        PIPE_DRIVER_VERSION = 2,
  72        PIPE_CURRENT_DEVICE_VERSION = 2
  73};
  74
  75/*
  76 * IMPORTANT: The following constants must match the ones used and defined
  77 * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
  78 */
  79
  80/* List of bitflags returned in status of CMD_POLL command */
  81enum PipePollFlags {
  82        PIPE_POLL_IN    = 1 << 0,
  83        PIPE_POLL_OUT   = 1 << 1,
  84        PIPE_POLL_HUP   = 1 << 2
  85};
  86
  87/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
  88enum PipeErrors {
  89        PIPE_ERROR_INVAL  = -1,
  90        PIPE_ERROR_AGAIN  = -2,
  91        PIPE_ERROR_NOMEM  = -3,
  92        PIPE_ERROR_IO     = -4
  93};
  94
  95/* Bit-flags used to signal events from the emulator */
  96enum PipeWakeFlags {
  97        PIPE_WAKE_CLOSED = 1 << 0,  /* emulator closed pipe */
  98        PIPE_WAKE_READ   = 1 << 1,  /* pipe can now be read from */
  99        PIPE_WAKE_WRITE  = 1 << 2  /* pipe can now be written to */
 100};
 101
 102/* Bit flags for the 'flags' field */
 103enum PipeFlagsBits {
 104        BIT_CLOSED_ON_HOST = 0,  /* pipe closed by host */
 105        BIT_WAKE_ON_WRITE  = 1,  /* want to be woken on writes */
 106        BIT_WAKE_ON_READ   = 2,  /* want to be woken on reads */
 107};
 108
 109enum PipeRegs {
 110        PIPE_REG_CMD = 0,
 111
 112        PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
 113        PIPE_REG_SIGNAL_BUFFER = 8,
 114        PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
 115
 116        PIPE_REG_OPEN_BUFFER_HIGH = 20,
 117        PIPE_REG_OPEN_BUFFER = 24,
 118
 119        PIPE_REG_VERSION = 36,
 120
 121        PIPE_REG_GET_SIGNALLED = 48,
 122};
 123
 124enum PipeCmdCode {
 125        PIPE_CMD_OPEN = 1,      /* to be used by the pipe device itself */
 126        PIPE_CMD_CLOSE,
 127        PIPE_CMD_POLL,
 128        PIPE_CMD_WRITE,
 129        PIPE_CMD_WAKE_ON_WRITE,
 130        PIPE_CMD_READ,
 131        PIPE_CMD_WAKE_ON_READ,
 132
 133        /*
 134         * TODO(zyy): implement a deferred read/write execution to allow
 135         * parallel processing of pipe operations on the host.
 136         */
 137        PIPE_CMD_WAKE_ON_DONE_IO,
 138};
 139
 140enum {
 141        MAX_BUFFERS_PER_COMMAND = 336,
 142        MAX_SIGNALLED_PIPES = 64,
 143        INITIAL_PIPES_CAPACITY = 64
 144};
 145
 146struct goldfish_pipe_dev;
 147struct goldfish_pipe;
 148struct goldfish_pipe_command;
 149
 150/* A per-pipe command structure, shared with the host */
 151struct goldfish_pipe_command {
 152        s32 cmd;                /* PipeCmdCode, guest -> host */
 153        s32 id;                 /* pipe id, guest -> host */
 154        s32 status;             /* command execution status, host -> guest */
 155        s32 reserved;   /* to pad to 64-bit boundary */
 156        union {
 157                /* Parameters for PIPE_CMD_{READ,WRITE} */
 158                struct {
 159                        /* number of buffers, guest -> host */
 160                        u32 buffers_count;
 161                        /* number of consumed bytes, host -> guest */
 162                        s32 consumed_size;
 163                        /* buffer pointers, guest -> host */
 164                        u64 ptrs[MAX_BUFFERS_PER_COMMAND];
 165                        /* buffer sizes, guest -> host */
 166                        u32 sizes[MAX_BUFFERS_PER_COMMAND];
 167                } rw_params;
 168        };
 169};
 170
 171/* A single signalled pipe information */
 172struct signalled_pipe_buffer {
 173        u32 id;
 174        u32 flags;
 175};
 176
 177/* Parameters for the PIPE_CMD_OPEN command */
 178struct open_command_param {
 179        u64 command_buffer_ptr;
 180        u32 rw_params_max_count;
 181};
 182
 183/* Device-level set of buffers shared with the host */
 184struct goldfish_pipe_dev_buffers {
 185        struct open_command_param open_command_params;
 186        struct signalled_pipe_buffer signalled_pipe_buffers[
 187                MAX_SIGNALLED_PIPES];
 188};
 189
 190/* This data type models a given pipe instance */
 191struct goldfish_pipe {
 192        /* pipe ID - index into goldfish_pipe_dev::pipes array */
 193        u32 id;
 194        /* The wake flags pipe is waiting for
 195         * Note: not protected with any lock, uses atomic operations
 196         *  and barriers to make it thread-safe.
 197         */
 198        unsigned long flags;
 199        /* wake flags host have signalled,
 200         *  - protected by goldfish_pipe_dev::lock
 201         */
 202        unsigned long signalled_flags;
 203
 204        /* A pointer to command buffer */
 205        struct goldfish_pipe_command *command_buffer;
 206
 207        /* doubly linked list of signalled pipes, protected by
 208         * goldfish_pipe_dev::lock
 209         */
 210        struct goldfish_pipe *prev_signalled;
 211        struct goldfish_pipe *next_signalled;
 212
 213        /*
 214         * A pipe's own lock. Protects the following:
 215         *  - *command_buffer - makes sure a command can safely write its
 216         *    parameters to the host and read the results back.
 217         */
 218        struct mutex lock;
 219
 220        /* A wake queue for sleeping until host signals an event */
 221        wait_queue_head_t wake_queue;
 222        /* Pointer to the parent goldfish_pipe_dev instance */
 223        struct goldfish_pipe_dev *dev;
 224};
 225
 226/* The global driver data. Holds a reference to the i/o page used to
 227 * communicate with the emulator, and a wake queue for blocked tasks
 228 * waiting to be awoken.
 229 */
 230struct goldfish_pipe_dev {
 231        /*
 232         * Global device spinlock. Protects the following members:
 233         *  - pipes, pipes_capacity
 234         *  - [*pipes, *pipes + pipes_capacity) - array data
 235         *  - first_signalled_pipe,
 236         *      goldfish_pipe::prev_signalled,
 237         *      goldfish_pipe::next_signalled,
 238         *      goldfish_pipe::signalled_flags - all singnalled-related fields,
 239         *                                       in all allocated pipes
 240         *  - open_command_params - PIPE_CMD_OPEN-related buffers
 241         *
 242         * It looks like a lot of different fields, but the trick is that
 243         * the only operation that happens often is the signalled pipes array
 244         * manipulation. That's why it's OK for now to keep the rest of the
 245         * fields under the same lock. If we notice too much contention because
 246         * of PIPE_CMD_OPEN, then we should add a separate lock there.
 247         */
 248        spinlock_t lock;
 249
 250        /*
 251         * Array of the pipes of |pipes_capacity| elements,
 252         * indexed by goldfish_pipe::id
 253         */
 254        struct goldfish_pipe **pipes;
 255        u32 pipes_capacity;
 256
 257        /* Pointers to the buffers host uses for interaction with this driver */
 258        struct goldfish_pipe_dev_buffers *buffers;
 259
 260        /* Head of a doubly linked list of signalled pipes */
 261        struct goldfish_pipe *first_signalled_pipe;
 262
 263        /* Some device-specific data */
 264        int irq;
 265        int version;
 266        unsigned char __iomem *base;
 267};
 268
 269static struct goldfish_pipe_dev pipe_dev[1] = {};
 270
 271static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
 272{
 273        pipe->command_buffer->cmd = cmd;
 274        /* failure by default */
 275        pipe->command_buffer->status = PIPE_ERROR_INVAL;
 276        writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
 277        return pipe->command_buffer->status;
 278}
 279
 280static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
 281{
 282        int status;
 283
 284        if (mutex_lock_interruptible(&pipe->lock))
 285                return PIPE_ERROR_IO;
 286        status = goldfish_cmd_locked(pipe, cmd);
 287        mutex_unlock(&pipe->lock);
 288        return status;
 289}
 290
 291/*
 292 * This function converts an error code returned by the emulator through
 293 * the PIPE_REG_STATUS i/o register into a valid negative errno value.
 294 */
 295static int goldfish_pipe_error_convert(int status)
 296{
 297        switch (status) {
 298        case PIPE_ERROR_AGAIN:
 299                return -EAGAIN;
 300        case PIPE_ERROR_NOMEM:
 301                return -ENOMEM;
 302        case PIPE_ERROR_IO:
 303                return -EIO;
 304        default:
 305                return -EINVAL;
 306        }
 307}
 308
 309static int pin_user_pages(unsigned long first_page, unsigned long last_page,
 310        unsigned int last_page_size, int is_write,
 311        struct page *pages[MAX_BUFFERS_PER_COMMAND],
 312        unsigned int *iter_last_page_size)
 313{
 314        int ret;
 315        int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
 316
 317        if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
 318                requested_pages = MAX_BUFFERS_PER_COMMAND;
 319                *iter_last_page_size = PAGE_SIZE;
 320        } else {
 321                *iter_last_page_size = last_page_size;
 322        }
 323
 324        ret = get_user_pages_fast(
 325                        first_page, requested_pages, !is_write, pages);
 326        if (ret <= 0)
 327                return -EFAULT;
 328        if (ret < requested_pages)
 329                *iter_last_page_size = PAGE_SIZE;
 330        return ret;
 331
 332}
 333
 334static void release_user_pages(struct page **pages, int pages_count,
 335        int is_write, s32 consumed_size)
 336{
 337        int i;
 338
 339        for (i = 0; i < pages_count; i++) {
 340                if (!is_write && consumed_size > 0)
 341                        set_page_dirty(pages[i]);
 342                put_page(pages[i]);
 343        }
 344}
 345
 346/* Populate the call parameters, merging adjacent pages together */
 347static void populate_rw_params(
 348        struct page **pages, int pages_count,
 349        unsigned long address, unsigned long address_end,
 350        unsigned long first_page, unsigned long last_page,
 351        unsigned int iter_last_page_size, int is_write,
 352        struct goldfish_pipe_command *command)
 353{
 354        /*
 355         * Process the first page separately - it's the only page that
 356         * needs special handling for its start address.
 357         */
 358        unsigned long xaddr = page_to_phys(pages[0]);
 359        unsigned long xaddr_prev = xaddr;
 360        int buffer_idx = 0;
 361        int i = 1;
 362        int size_on_page = first_page == last_page
 363                        ? (int)(address_end - address)
 364                        : (PAGE_SIZE - (address & ~PAGE_MASK));
 365        command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
 366        command->rw_params.sizes[0] = size_on_page;
 367        for (; i < pages_count; ++i) {
 368                xaddr = page_to_phys(pages[i]);
 369                size_on_page = (i == pages_count - 1) ?
 370                        iter_last_page_size : PAGE_SIZE;
 371                if (xaddr == xaddr_prev + PAGE_SIZE) {
 372                        command->rw_params.sizes[buffer_idx] += size_on_page;
 373                } else {
 374                        ++buffer_idx;
 375                        command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
 376                        command->rw_params.sizes[buffer_idx] = size_on_page;
 377                }
 378                xaddr_prev = xaddr;
 379        }
 380        command->rw_params.buffers_count = buffer_idx + 1;
 381}
 382
 383static int transfer_max_buffers(struct goldfish_pipe *pipe,
 384        unsigned long address, unsigned long address_end, int is_write,
 385        unsigned long last_page, unsigned int last_page_size,
 386        s32 *consumed_size, int *status)
 387{
 388        static struct page *pages[MAX_BUFFERS_PER_COMMAND];
 389        unsigned long first_page = address & PAGE_MASK;
 390        unsigned int iter_last_page_size;
 391        int pages_count = pin_user_pages(first_page, last_page,
 392                        last_page_size, is_write,
 393                        pages, &iter_last_page_size);
 394
 395        if (pages_count < 0)
 396                return pages_count;
 397
 398        /* Serialize access to the pipe command buffers */
 399        if (mutex_lock_interruptible(&pipe->lock))
 400                return -ERESTARTSYS;
 401
 402        populate_rw_params(pages, pages_count, address, address_end,
 403                first_page, last_page, iter_last_page_size, is_write,
 404                pipe->command_buffer);
 405
 406        /* Transfer the data */
 407        *status = goldfish_cmd_locked(pipe,
 408                                is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
 409
 410        *consumed_size = pipe->command_buffer->rw_params.consumed_size;
 411
 412        release_user_pages(pages, pages_count, is_write, *consumed_size);
 413
 414        mutex_unlock(&pipe->lock);
 415
 416        return 0;
 417}
 418
 419static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
 420{
 421        u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
 422
 423        set_bit(wakeBit, &pipe->flags);
 424
 425        /* Tell the emulator we're going to wait for a wake event */
 426        (void)goldfish_cmd(pipe,
 427                is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
 428
 429        while (test_bit(wakeBit, &pipe->flags)) {
 430                if (wait_event_interruptible(
 431                                pipe->wake_queue,
 432                                !test_bit(wakeBit, &pipe->flags)))
 433                        return -ERESTARTSYS;
 434
 435                if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
 436                        return -EIO;
 437        }
 438
 439        return 0;
 440}
 441
 442static ssize_t goldfish_pipe_read_write(struct file *filp,
 443        char __user *buffer, size_t bufflen, int is_write)
 444{
 445        struct goldfish_pipe *pipe = filp->private_data;
 446        int count = 0, ret = -EINVAL;
 447        unsigned long address, address_end, last_page;
 448        unsigned int last_page_size;
 449
 450        /* If the emulator already closed the pipe, no need to go further */
 451        if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
 452                return -EIO;
 453        /* Null reads or writes succeeds */
 454        if (unlikely(bufflen == 0))
 455                return 0;
 456        /* Check the buffer range for access */
 457        if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
 458                        buffer, bufflen)))
 459                return -EFAULT;
 460
 461        address = (unsigned long)buffer;
 462        address_end = address + bufflen;
 463        last_page = (address_end - 1) & PAGE_MASK;
 464        last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
 465
 466        while (address < address_end) {
 467                s32 consumed_size;
 468                int status;
 469
 470                ret = transfer_max_buffers(pipe, address, address_end, is_write,
 471                                last_page, last_page_size, &consumed_size,
 472                                &status);
 473                if (ret < 0)
 474                        break;
 475
 476                if (consumed_size > 0) {
 477                        /* No matter what's the status, we've transferred
 478                         * something.
 479                         */
 480                        count += consumed_size;
 481                        address += consumed_size;
 482                }
 483                if (status > 0)
 484                        continue;
 485                if (status == 0) {
 486                        /* EOF */
 487                        ret = 0;
 488                        break;
 489                }
 490                if (count > 0) {
 491                        /*
 492                         * An error occurred, but we already transferred
 493                         * something on one of the previous iterations.
 494                         * Just return what we already copied and log this
 495                         * err.
 496                         */
 497                        if (status != PIPE_ERROR_AGAIN)
 498                                pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n",
 499                                        status, is_write ? "write" : "read");
 500                        break;
 501                }
 502
 503                /*
 504                 * If the error is not PIPE_ERROR_AGAIN, or if we are in
 505                 * non-blocking mode, just return the error code.
 506                 */
 507                if (status != PIPE_ERROR_AGAIN ||
 508                        (filp->f_flags & O_NONBLOCK) != 0) {
 509                        ret = goldfish_pipe_error_convert(status);
 510                        break;
 511                }
 512
 513                status = wait_for_host_signal(pipe, is_write);
 514                if (status < 0)
 515                        return status;
 516        }
 517
 518        if (count > 0)
 519                return count;
 520        return ret;
 521}
 522
 523static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
 524                                size_t bufflen, loff_t *ppos)
 525{
 526        return goldfish_pipe_read_write(filp, buffer, bufflen,
 527                        /* is_write */ 0);
 528}
 529
 530static ssize_t goldfish_pipe_write(struct file *filp,
 531                                const char __user *buffer, size_t bufflen,
 532                                loff_t *ppos)
 533{
 534        return goldfish_pipe_read_write(filp,
 535                        /* cast away the const */(char __user *)buffer, bufflen,
 536                        /* is_write */ 1);
 537}
 538
 539static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
 540{
 541        struct goldfish_pipe *pipe = filp->private_data;
 542        __poll_t mask = 0;
 543        int status;
 544
 545        poll_wait(filp, &pipe->wake_queue, wait);
 546
 547        status = goldfish_cmd(pipe, PIPE_CMD_POLL);
 548        if (status < 0)
 549                return -ERESTARTSYS;
 550
 551        if (status & PIPE_POLL_IN)
 552                mask |= EPOLLIN | EPOLLRDNORM;
 553        if (status & PIPE_POLL_OUT)
 554                mask |= EPOLLOUT | EPOLLWRNORM;
 555        if (status & PIPE_POLL_HUP)
 556                mask |= EPOLLHUP;
 557        if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
 558                mask |= EPOLLERR;
 559
 560        return mask;
 561}
 562
 563static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
 564        u32 id, u32 flags)
 565{
 566        struct goldfish_pipe *pipe;
 567
 568        if (WARN_ON(id >= dev->pipes_capacity))
 569                return;
 570
 571        pipe = dev->pipes[id];
 572        if (!pipe)
 573                return;
 574        pipe->signalled_flags |= flags;
 575
 576        if (pipe->prev_signalled || pipe->next_signalled
 577                || dev->first_signalled_pipe == pipe)
 578                return; /* already in the list */
 579        pipe->next_signalled = dev->first_signalled_pipe;
 580        if (dev->first_signalled_pipe)
 581                dev->first_signalled_pipe->prev_signalled = pipe;
 582        dev->first_signalled_pipe = pipe;
 583}
 584
 585static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
 586        struct goldfish_pipe *pipe) {
 587        if (pipe->prev_signalled)
 588                pipe->prev_signalled->next_signalled = pipe->next_signalled;
 589        if (pipe->next_signalled)
 590                pipe->next_signalled->prev_signalled = pipe->prev_signalled;
 591        if (pipe == dev->first_signalled_pipe)
 592                dev->first_signalled_pipe = pipe->next_signalled;
 593        pipe->prev_signalled = NULL;
 594        pipe->next_signalled = NULL;
 595}
 596
 597static struct goldfish_pipe *signalled_pipes_pop_front(
 598                struct goldfish_pipe_dev *dev, int *wakes)
 599{
 600        struct goldfish_pipe *pipe;
 601        unsigned long flags;
 602
 603        spin_lock_irqsave(&dev->lock, flags);
 604
 605        pipe = dev->first_signalled_pipe;
 606        if (pipe) {
 607                *wakes = pipe->signalled_flags;
 608                pipe->signalled_flags = 0;
 609                /*
 610                 * This is an optimized version of
 611                 * signalled_pipes_remove_locked()
 612                 * - We want to make it as fast as possible to
 613                 * wake the sleeping pipe operations faster.
 614                 */
 615                dev->first_signalled_pipe = pipe->next_signalled;
 616                if (dev->first_signalled_pipe)
 617                        dev->first_signalled_pipe->prev_signalled = NULL;
 618                pipe->next_signalled = NULL;
 619        }
 620
 621        spin_unlock_irqrestore(&dev->lock, flags);
 622        return pipe;
 623}
 624
 625static void goldfish_interrupt_task(unsigned long unused)
 626{
 627        struct goldfish_pipe_dev *dev = pipe_dev;
 628        /* Iterate over the signalled pipes and wake them one by one */
 629        struct goldfish_pipe *pipe;
 630        int wakes;
 631
 632        while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
 633                if (wakes & PIPE_WAKE_CLOSED) {
 634                        pipe->flags = 1 << BIT_CLOSED_ON_HOST;
 635                } else {
 636                        if (wakes & PIPE_WAKE_READ)
 637                                clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
 638                        if (wakes & PIPE_WAKE_WRITE)
 639                                clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
 640                }
 641                /*
 642                 * wake_up_interruptible() implies a write barrier, so don't
 643                 * explicitly add another one here.
 644                 */
 645                wake_up_interruptible(&pipe->wake_queue);
 646        }
 647}
 648DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
 649
 650/*
 651 * The general idea of the interrupt handling:
 652 *
 653 *  1. device raises an interrupt if there's at least one signalled pipe
 654 *  2. IRQ handler reads the signalled pipes and their count from the device
 655 *  3. device writes them into a shared buffer and returns the count
 656 *      it only resets the IRQ if it has returned all signalled pipes,
 657 *      otherwise it leaves it raised, so IRQ handler will be called
 658 *      again for the next chunk
 659 *  4. IRQ handler adds all returned pipes to the device's signalled pipes list
 660 *  5. IRQ handler launches a tasklet to process the signalled pipes from the
 661 *      list in a separate context
 662 */
 663static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
 664{
 665        u32 count;
 666        u32 i;
 667        unsigned long flags;
 668        struct goldfish_pipe_dev *dev = dev_id;
 669
 670        if (dev != pipe_dev)
 671                return IRQ_NONE;
 672
 673        /* Request the signalled pipes from the device */
 674        spin_lock_irqsave(&dev->lock, flags);
 675
 676        count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
 677        if (count == 0) {
 678                spin_unlock_irqrestore(&dev->lock, flags);
 679                return IRQ_NONE;
 680        }
 681        if (count > MAX_SIGNALLED_PIPES)
 682                count = MAX_SIGNALLED_PIPES;
 683
 684        for (i = 0; i < count; ++i)
 685                signalled_pipes_add_locked(dev,
 686                        dev->buffers->signalled_pipe_buffers[i].id,
 687                        dev->buffers->signalled_pipe_buffers[i].flags);
 688
 689        spin_unlock_irqrestore(&dev->lock, flags);
 690
 691        tasklet_schedule(&goldfish_interrupt_tasklet);
 692        return IRQ_HANDLED;
 693}
 694
 695static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
 696{
 697        int id;
 698
 699        for (id = 0; id < dev->pipes_capacity; ++id)
 700                if (!dev->pipes[id])
 701                        return id;
 702
 703        {
 704                /* Reallocate the array */
 705                u32 new_capacity = 2 * dev->pipes_capacity;
 706                struct goldfish_pipe **pipes =
 707                        kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
 708                if (!pipes)
 709                        return -ENOMEM;
 710                memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
 711                kfree(dev->pipes);
 712                dev->pipes = pipes;
 713                id = dev->pipes_capacity;
 714                dev->pipes_capacity = new_capacity;
 715        }
 716        return id;
 717}
 718
 719/**
 720 *      goldfish_pipe_open - open a channel to the AVD
 721 *      @inode: inode of device
 722 *      @file: file struct of opener
 723 *
 724 *      Create a new pipe link between the emulator and the use application.
 725 *      Each new request produces a new pipe.
 726 *
 727 *      Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
 728 *      right now so this is fine. A move to 64bit will need this addressing
 729 */
 730static int goldfish_pipe_open(struct inode *inode, struct file *file)
 731{
 732        struct goldfish_pipe_dev *dev = pipe_dev;
 733        unsigned long flags;
 734        int id;
 735        int status;
 736
 737        /* Allocate new pipe kernel object */
 738        struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
 739        if (pipe == NULL)
 740                return -ENOMEM;
 741
 742        pipe->dev = dev;
 743        mutex_init(&pipe->lock);
 744        init_waitqueue_head(&pipe->wake_queue);
 745
 746        /*
 747         * Command buffer needs to be allocated on its own page to make sure
 748         * it is physically contiguous in host's address space.
 749         */
 750        pipe->command_buffer =
 751                (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
 752        if (!pipe->command_buffer) {
 753                status = -ENOMEM;
 754                goto err_pipe;
 755        }
 756
 757        spin_lock_irqsave(&dev->lock, flags);
 758
 759        id = get_free_pipe_id_locked(dev);
 760        if (id < 0) {
 761                status = id;
 762                goto err_id_locked;
 763        }
 764
 765        dev->pipes[id] = pipe;
 766        pipe->id = id;
 767        pipe->command_buffer->id = id;
 768
 769        /* Now tell the emulator we're opening a new pipe. */
 770        dev->buffers->open_command_params.rw_params_max_count =
 771                        MAX_BUFFERS_PER_COMMAND;
 772        dev->buffers->open_command_params.command_buffer_ptr =
 773                        (u64)(unsigned long)__pa(pipe->command_buffer);
 774        status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN);
 775        spin_unlock_irqrestore(&dev->lock, flags);
 776        if (status < 0)
 777                goto err_cmd;
 778        /* All is done, save the pipe into the file's private data field */
 779        file->private_data = pipe;
 780        return 0;
 781
 782err_cmd:
 783        spin_lock_irqsave(&dev->lock, flags);
 784        dev->pipes[id] = NULL;
 785err_id_locked:
 786        spin_unlock_irqrestore(&dev->lock, flags);
 787        free_page((unsigned long)pipe->command_buffer);
 788err_pipe:
 789        kfree(pipe);
 790        return status;
 791}
 792
 793static int goldfish_pipe_release(struct inode *inode, struct file *filp)
 794{
 795        unsigned long flags;
 796        struct goldfish_pipe *pipe = filp->private_data;
 797        struct goldfish_pipe_dev *dev = pipe->dev;
 798
 799        /* The guest is closing the channel, so tell the emulator right now */
 800        (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE);
 801
 802        spin_lock_irqsave(&dev->lock, flags);
 803        dev->pipes[pipe->id] = NULL;
 804        signalled_pipes_remove_locked(dev, pipe);
 805        spin_unlock_irqrestore(&dev->lock, flags);
 806
 807        filp->private_data = NULL;
 808        free_page((unsigned long)pipe->command_buffer);
 809        kfree(pipe);
 810        return 0;
 811}
 812
 813static const struct file_operations goldfish_pipe_fops = {
 814        .owner = THIS_MODULE,
 815        .read = goldfish_pipe_read,
 816        .write = goldfish_pipe_write,
 817        .poll = goldfish_pipe_poll,
 818        .open = goldfish_pipe_open,
 819        .release = goldfish_pipe_release,
 820};
 821
 822static struct miscdevice goldfish_pipe_dev = {
 823        .minor = MISC_DYNAMIC_MINOR,
 824        .name = "goldfish_pipe",
 825        .fops = &goldfish_pipe_fops,
 826};
 827
 828static int goldfish_pipe_device_init(struct platform_device *pdev)
 829{
 830        char *page;
 831        struct goldfish_pipe_dev *dev = pipe_dev;
 832        int err = devm_request_irq(&pdev->dev, dev->irq,
 833                                goldfish_pipe_interrupt,
 834                                IRQF_SHARED, "goldfish_pipe", dev);
 835        if (err) {
 836                dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
 837                return err;
 838        }
 839
 840        err = misc_register(&goldfish_pipe_dev);
 841        if (err) {
 842                dev_err(&pdev->dev, "unable to register v2 device\n");
 843                return err;
 844        }
 845
 846        dev->first_signalled_pipe = NULL;
 847        dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
 848        dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
 849                                        GFP_KERNEL);
 850        if (!dev->pipes)
 851                return -ENOMEM;
 852
 853        /*
 854         * We're going to pass two buffers, open_command_params and
 855         * signalled_pipe_buffers, to the host. This means each of those buffers
 856         * needs to be contained in a single physical page. The easiest choice
 857         * is to just allocate a page and place the buffers in it.
 858         */
 859        if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE))
 860                return -ENOMEM;
 861
 862        page = (char *)__get_free_page(GFP_KERNEL);
 863        if (!page) {
 864                kfree(dev->pipes);
 865                return -ENOMEM;
 866        }
 867        dev->buffers = (struct goldfish_pipe_dev_buffers *)page;
 868
 869        /* Send the buffer addresses to the host */
 870        {
 871                u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers);
 872
 873                writel((u32)(unsigned long)(paddr >> 32),
 874                        dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
 875                writel((u32)(unsigned long)paddr,
 876                        dev->base + PIPE_REG_SIGNAL_BUFFER);
 877                writel((u32)MAX_SIGNALLED_PIPES,
 878                        dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
 879
 880                paddr = __pa(&dev->buffers->open_command_params);
 881                writel((u32)(unsigned long)(paddr >> 32),
 882                        dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
 883                writel((u32)(unsigned long)paddr,
 884                        dev->base + PIPE_REG_OPEN_BUFFER);
 885        }
 886        return 0;
 887}
 888
 889static void goldfish_pipe_device_deinit(struct platform_device *pdev)
 890{
 891        struct goldfish_pipe_dev *dev = pipe_dev;
 892
 893        misc_deregister(&goldfish_pipe_dev);
 894        kfree(dev->pipes);
 895        free_page((unsigned long)dev->buffers);
 896}
 897
 898static int goldfish_pipe_probe(struct platform_device *pdev)
 899{
 900        int err;
 901        struct resource *r;
 902        struct goldfish_pipe_dev *dev = pipe_dev;
 903
 904        if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE))
 905                return -ENOMEM;
 906
 907        /* not thread safe, but this should not happen */
 908        WARN_ON(dev->base != NULL);
 909
 910        spin_lock_init(&dev->lock);
 911
 912        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 913        if (r == NULL || resource_size(r) < PAGE_SIZE) {
 914                dev_err(&pdev->dev, "can't allocate i/o page\n");
 915                return -EINVAL;
 916        }
 917        dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
 918        if (dev->base == NULL) {
 919                dev_err(&pdev->dev, "ioremap failed\n");
 920                return -EINVAL;
 921        }
 922
 923        r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 924        if (r == NULL) {
 925                err = -EINVAL;
 926                goto error;
 927        }
 928        dev->irq = r->start;
 929
 930        /*
 931         * Exchange the versions with the host device
 932         *
 933         * Note: v1 driver used to not report its version, so we write it before
 934         *  reading device version back: this allows the host implementation to
 935         *  detect the old driver (if there was no version write before read).
 936         */
 937        writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
 938        dev->version = readl(dev->base + PIPE_REG_VERSION);
 939        if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
 940                return -EINVAL;
 941
 942        err = goldfish_pipe_device_init(pdev);
 943        if (!err)
 944                return 0;
 945
 946error:
 947        dev->base = NULL;
 948        return err;
 949}
 950
 951static int goldfish_pipe_remove(struct platform_device *pdev)
 952{
 953        struct goldfish_pipe_dev *dev = pipe_dev;
 954        goldfish_pipe_device_deinit(pdev);
 955        dev->base = NULL;
 956        return 0;
 957}
 958
 959static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
 960        { "GFSH0003", 0 },
 961        { },
 962};
 963MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
 964
 965static const struct of_device_id goldfish_pipe_of_match[] = {
 966        { .compatible = "google,android-pipe", },
 967        {},
 968};
 969MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
 970
 971static struct platform_driver goldfish_pipe_driver = {
 972        .probe = goldfish_pipe_probe,
 973        .remove = goldfish_pipe_remove,
 974        .driver = {
 975                .name = "goldfish_pipe",
 976                .of_match_table = goldfish_pipe_of_match,
 977                .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
 978        }
 979};
 980
 981module_platform_driver(goldfish_pipe_driver);
 982MODULE_AUTHOR("David Turner <digit@google.com>");
 983MODULE_LICENSE("GPL");
 984