linux/drivers/firewire/core-cdev.c
<<
>>
Prefs
   1/*
   2 * Char device for device raw access
   3 *
   4 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#include <linux/bug.h>
  22#include <linux/compat.h>
  23#include <linux/delay.h>
  24#include <linux/device.h>
  25#include <linux/errno.h>
  26#include <linux/firewire.h>
  27#include <linux/firewire-cdev.h>
  28#include <linux/idr.h>
  29#include <linux/irqflags.h>
  30#include <linux/jiffies.h>
  31#include <linux/kernel.h>
  32#include <linux/kref.h>
  33#include <linux/mm.h>
  34#include <linux/module.h>
  35#include <linux/mutex.h>
  36#include <linux/poll.h>
  37#include <linux/sched.h> /* required for linux/wait.h */
  38#include <linux/slab.h>
  39#include <linux/spinlock.h>
  40#include <linux/string.h>
  41#include <linux/time.h>
  42#include <linux/uaccess.h>
  43#include <linux/vmalloc.h>
  44#include <linux/wait.h>
  45#include <linux/workqueue.h>
  46
  47#include <asm/system.h>
  48
  49#include "core.h"
  50
  51/*
  52 * ABI version history is documented in linux/firewire-cdev.h.
  53 */
  54#define FW_CDEV_KERNEL_VERSION                  4
  55#define FW_CDEV_VERSION_EVENT_REQUEST2          4
  56#define FW_CDEV_VERSION_ALLOCATE_REGION_END     4
  57
  58struct client {
  59        u32 version;
  60        struct fw_device *device;
  61
  62        spinlock_t lock;
  63        bool in_shutdown;
  64        struct idr resource_idr;
  65        struct list_head event_list;
  66        wait_queue_head_t wait;
  67        wait_queue_head_t tx_flush_wait;
  68        u64 bus_reset_closure;
  69
  70        struct fw_iso_context *iso_context;
  71        u64 iso_closure;
  72        struct fw_iso_buffer buffer;
  73        unsigned long vm_start;
  74
  75        struct list_head phy_receiver_link;
  76        u64 phy_receiver_closure;
  77
  78        struct list_head link;
  79        struct kref kref;
  80};
  81
  82static inline void client_get(struct client *client)
  83{
  84        kref_get(&client->kref);
  85}
  86
  87static void client_release(struct kref *kref)
  88{
  89        struct client *client = container_of(kref, struct client, kref);
  90
  91        fw_device_put(client->device);
  92        kfree(client);
  93}
  94
  95static void client_put(struct client *client)
  96{
  97        kref_put(&client->kref, client_release);
  98}
  99
 100struct client_resource;
 101typedef void (*client_resource_release_fn_t)(struct client *,
 102                                             struct client_resource *);
 103struct client_resource {
 104        client_resource_release_fn_t release;
 105        int handle;
 106};
 107
 108struct address_handler_resource {
 109        struct client_resource resource;
 110        struct fw_address_handler handler;
 111        __u64 closure;
 112        struct client *client;
 113};
 114
 115struct outbound_transaction_resource {
 116        struct client_resource resource;
 117        struct fw_transaction transaction;
 118};
 119
 120struct inbound_transaction_resource {
 121        struct client_resource resource;
 122        struct fw_card *card;
 123        struct fw_request *request;
 124        void *data;
 125        size_t length;
 126};
 127
 128struct descriptor_resource {
 129        struct client_resource resource;
 130        struct fw_descriptor descriptor;
 131        u32 data[0];
 132};
 133
 134struct iso_resource {
 135        struct client_resource resource;
 136        struct client *client;
 137        /* Schedule work and access todo only with client->lock held. */
 138        struct delayed_work work;
 139        enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
 140              ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
 141        int generation;
 142        u64 channels;
 143        s32 bandwidth;
 144        struct iso_resource_event *e_alloc, *e_dealloc;
 145};
 146
 147static void release_iso_resource(struct client *, struct client_resource *);
 148
 149static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
 150{
 151        client_get(r->client);
 152        if (!queue_delayed_work(fw_workqueue, &r->work, delay))
 153                client_put(r->client);
 154}
 155
 156static void schedule_if_iso_resource(struct client_resource *resource)
 157{
 158        if (resource->release == release_iso_resource)
 159                schedule_iso_resource(container_of(resource,
 160                                        struct iso_resource, resource), 0);
 161}
 162
 163/*
 164 * dequeue_event() just kfree()'s the event, so the event has to be
 165 * the first field in a struct XYZ_event.
 166 */
 167struct event {
 168        struct { void *data; size_t size; } v[2];
 169        struct list_head link;
 170};
 171
 172struct bus_reset_event {
 173        struct event event;
 174        struct fw_cdev_event_bus_reset reset;
 175};
 176
 177struct outbound_transaction_event {
 178        struct event event;
 179        struct client *client;
 180        struct outbound_transaction_resource r;
 181        struct fw_cdev_event_response response;
 182};
 183
 184struct inbound_transaction_event {
 185        struct event event;
 186        union {
 187                struct fw_cdev_event_request request;
 188                struct fw_cdev_event_request2 request2;
 189        } req;
 190};
 191
 192struct iso_interrupt_event {
 193        struct event event;
 194        struct fw_cdev_event_iso_interrupt interrupt;
 195};
 196
 197struct iso_interrupt_mc_event {
 198        struct event event;
 199        struct fw_cdev_event_iso_interrupt_mc interrupt;
 200};
 201
 202struct iso_resource_event {
 203        struct event event;
 204        struct fw_cdev_event_iso_resource iso_resource;
 205};
 206
 207struct outbound_phy_packet_event {
 208        struct event event;
 209        struct client *client;
 210        struct fw_packet p;
 211        struct fw_cdev_event_phy_packet phy_packet;
 212};
 213
 214struct inbound_phy_packet_event {
 215        struct event event;
 216        struct fw_cdev_event_phy_packet phy_packet;
 217};
 218
 219static inline void __user *u64_to_uptr(__u64 value)
 220{
 221        return (void __user *)(unsigned long)value;
 222}
 223
 224static inline __u64 uptr_to_u64(void __user *ptr)
 225{
 226        return (__u64)(unsigned long)ptr;
 227}
 228
 229static int fw_device_op_open(struct inode *inode, struct file *file)
 230{
 231        struct fw_device *device;
 232        struct client *client;
 233
 234        device = fw_device_get_by_devt(inode->i_rdev);
 235        if (device == NULL)
 236                return -ENODEV;
 237
 238        if (fw_device_is_shutdown(device)) {
 239                fw_device_put(device);
 240                return -ENODEV;
 241        }
 242
 243        client = kzalloc(sizeof(*client), GFP_KERNEL);
 244        if (client == NULL) {
 245                fw_device_put(device);
 246                return -ENOMEM;
 247        }
 248
 249        client->device = device;
 250        spin_lock_init(&client->lock);
 251        idr_init(&client->resource_idr);
 252        INIT_LIST_HEAD(&client->event_list);
 253        init_waitqueue_head(&client->wait);
 254        init_waitqueue_head(&client->tx_flush_wait);
 255        INIT_LIST_HEAD(&client->phy_receiver_link);
 256        kref_init(&client->kref);
 257
 258        file->private_data = client;
 259
 260        mutex_lock(&device->client_list_mutex);
 261        list_add_tail(&client->link, &device->client_list);
 262        mutex_unlock(&device->client_list_mutex);
 263
 264        return nonseekable_open(inode, file);
 265}
 266
 267static void queue_event(struct client *client, struct event *event,
 268                        void *data0, size_t size0, void *data1, size_t size1)
 269{
 270        unsigned long flags;
 271
 272        event->v[0].data = data0;
 273        event->v[0].size = size0;
 274        event->v[1].data = data1;
 275        event->v[1].size = size1;
 276
 277        spin_lock_irqsave(&client->lock, flags);
 278        if (client->in_shutdown)
 279                kfree(event);
 280        else
 281                list_add_tail(&event->link, &client->event_list);
 282        spin_unlock_irqrestore(&client->lock, flags);
 283
 284        wake_up_interruptible(&client->wait);
 285}
 286
 287static int dequeue_event(struct client *client,
 288                         char __user *buffer, size_t count)
 289{
 290        struct event *event;
 291        size_t size, total;
 292        int i, ret;
 293
 294        ret = wait_event_interruptible(client->wait,
 295                        !list_empty(&client->event_list) ||
 296                        fw_device_is_shutdown(client->device));
 297        if (ret < 0)
 298                return ret;
 299
 300        if (list_empty(&client->event_list) &&
 301                       fw_device_is_shutdown(client->device))
 302                return -ENODEV;
 303
 304        spin_lock_irq(&client->lock);
 305        event = list_first_entry(&client->event_list, struct event, link);
 306        list_del(&event->link);
 307        spin_unlock_irq(&client->lock);
 308
 309        total = 0;
 310        for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
 311                size = min(event->v[i].size, count - total);
 312                if (copy_to_user(buffer + total, event->v[i].data, size)) {
 313                        ret = -EFAULT;
 314                        goto out;
 315                }
 316                total += size;
 317        }
 318        ret = total;
 319
 320 out:
 321        kfree(event);
 322
 323        return ret;
 324}
 325
 326static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
 327                                 size_t count, loff_t *offset)
 328{
 329        struct client *client = file->private_data;
 330
 331        return dequeue_event(client, buffer, count);
 332}
 333
 334static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
 335                                 struct client *client)
 336{
 337        struct fw_card *card = client->device->card;
 338
 339        spin_lock_irq(&card->lock);
 340
 341        event->closure       = client->bus_reset_closure;
 342        event->type          = FW_CDEV_EVENT_BUS_RESET;
 343        event->generation    = client->device->generation;
 344        event->node_id       = client->device->node_id;
 345        event->local_node_id = card->local_node->node_id;
 346        event->bm_node_id    = card->bm_node_id;
 347        event->irm_node_id   = card->irm_node->node_id;
 348        event->root_node_id  = card->root_node->node_id;
 349
 350        spin_unlock_irq(&card->lock);
 351}
 352
 353static void for_each_client(struct fw_device *device,
 354                            void (*callback)(struct client *client))
 355{
 356        struct client *c;
 357
 358        mutex_lock(&device->client_list_mutex);
 359        list_for_each_entry(c, &device->client_list, link)
 360                callback(c);
 361        mutex_unlock(&device->client_list_mutex);
 362}
 363
 364static int schedule_reallocations(int id, void *p, void *data)
 365{
 366        schedule_if_iso_resource(p);
 367
 368        return 0;
 369}
 370
 371static void queue_bus_reset_event(struct client *client)
 372{
 373        struct bus_reset_event *e;
 374
 375        e = kzalloc(sizeof(*e), GFP_KERNEL);
 376        if (e == NULL) {
 377                fw_notify("Out of memory when allocating event\n");
 378                return;
 379        }
 380
 381        fill_bus_reset_event(&e->reset, client);
 382
 383        queue_event(client, &e->event,
 384                    &e->reset, sizeof(e->reset), NULL, 0);
 385
 386        spin_lock_irq(&client->lock);
 387        idr_for_each(&client->resource_idr, schedule_reallocations, client);
 388        spin_unlock_irq(&client->lock);
 389}
 390
 391void fw_device_cdev_update(struct fw_device *device)
 392{
 393        for_each_client(device, queue_bus_reset_event);
 394}
 395
 396static void wake_up_client(struct client *client)
 397{
 398        wake_up_interruptible(&client->wait);
 399}
 400
 401void fw_device_cdev_remove(struct fw_device *device)
 402{
 403        for_each_client(device, wake_up_client);
 404}
 405
 406union ioctl_arg {
 407        struct fw_cdev_get_info                 get_info;
 408        struct fw_cdev_send_request             send_request;
 409        struct fw_cdev_allocate                 allocate;
 410        struct fw_cdev_deallocate               deallocate;
 411        struct fw_cdev_send_response            send_response;
 412        struct fw_cdev_initiate_bus_reset       initiate_bus_reset;
 413        struct fw_cdev_add_descriptor           add_descriptor;
 414        struct fw_cdev_remove_descriptor        remove_descriptor;
 415        struct fw_cdev_create_iso_context       create_iso_context;
 416        struct fw_cdev_queue_iso                queue_iso;
 417        struct fw_cdev_start_iso                start_iso;
 418        struct fw_cdev_stop_iso                 stop_iso;
 419        struct fw_cdev_get_cycle_timer          get_cycle_timer;
 420        struct fw_cdev_allocate_iso_resource    allocate_iso_resource;
 421        struct fw_cdev_send_stream_packet       send_stream_packet;
 422        struct fw_cdev_get_cycle_timer2         get_cycle_timer2;
 423        struct fw_cdev_send_phy_packet          send_phy_packet;
 424        struct fw_cdev_receive_phy_packets      receive_phy_packets;
 425        struct fw_cdev_set_iso_channels         set_iso_channels;
 426};
 427
 428static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
 429{
 430        struct fw_cdev_get_info *a = &arg->get_info;
 431        struct fw_cdev_event_bus_reset bus_reset;
 432        unsigned long ret = 0;
 433
 434        client->version = a->version;
 435        a->version = FW_CDEV_KERNEL_VERSION;
 436        a->card = client->device->card->index;
 437
 438        down_read(&fw_device_rwsem);
 439
 440        if (a->rom != 0) {
 441                size_t want = a->rom_length;
 442                size_t have = client->device->config_rom_length * 4;
 443
 444                ret = copy_to_user(u64_to_uptr(a->rom),
 445                                   client->device->config_rom, min(want, have));
 446        }
 447        a->rom_length = client->device->config_rom_length * 4;
 448
 449        up_read(&fw_device_rwsem);
 450
 451        if (ret != 0)
 452                return -EFAULT;
 453
 454        client->bus_reset_closure = a->bus_reset_closure;
 455        if (a->bus_reset != 0) {
 456                fill_bus_reset_event(&bus_reset, client);
 457                if (copy_to_user(u64_to_uptr(a->bus_reset),
 458                                 &bus_reset, sizeof(bus_reset)))
 459                        return -EFAULT;
 460        }
 461
 462        return 0;
 463}
 464
 465static int add_client_resource(struct client *client,
 466                               struct client_resource *resource, gfp_t gfp_mask)
 467{
 468        unsigned long flags;
 469        int ret;
 470
 471 retry:
 472        if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
 473                return -ENOMEM;
 474
 475        spin_lock_irqsave(&client->lock, flags);
 476        if (client->in_shutdown)
 477                ret = -ECANCELED;
 478        else
 479                ret = idr_get_new(&client->resource_idr, resource,
 480                                  &resource->handle);
 481        if (ret >= 0) {
 482                client_get(client);
 483                schedule_if_iso_resource(resource);
 484        }
 485        spin_unlock_irqrestore(&client->lock, flags);
 486
 487        if (ret == -EAGAIN)
 488                goto retry;
 489
 490        return ret < 0 ? ret : 0;
 491}
 492
 493static int release_client_resource(struct client *client, u32 handle,
 494                                   client_resource_release_fn_t release,
 495                                   struct client_resource **return_resource)
 496{
 497        struct client_resource *resource;
 498
 499        spin_lock_irq(&client->lock);
 500        if (client->in_shutdown)
 501                resource = NULL;
 502        else
 503                resource = idr_find(&client->resource_idr, handle);
 504        if (resource && resource->release == release)
 505                idr_remove(&client->resource_idr, handle);
 506        spin_unlock_irq(&client->lock);
 507
 508        if (!(resource && resource->release == release))
 509                return -EINVAL;
 510
 511        if (return_resource)
 512                *return_resource = resource;
 513        else
 514                resource->release(client, resource);
 515
 516        client_put(client);
 517
 518        return 0;
 519}
 520
 521static void release_transaction(struct client *client,
 522                                struct client_resource *resource)
 523{
 524}
 525
 526static void complete_transaction(struct fw_card *card, int rcode,
 527                                 void *payload, size_t length, void *data)
 528{
 529        struct outbound_transaction_event *e = data;
 530        struct fw_cdev_event_response *rsp = &e->response;
 531        struct client *client = e->client;
 532        unsigned long flags;
 533
 534        if (length < rsp->length)
 535                rsp->length = length;
 536        if (rcode == RCODE_COMPLETE)
 537                memcpy(rsp->data, payload, rsp->length);
 538
 539        spin_lock_irqsave(&client->lock, flags);
 540        idr_remove(&client->resource_idr, e->r.resource.handle);
 541        if (client->in_shutdown)
 542                wake_up(&client->tx_flush_wait);
 543        spin_unlock_irqrestore(&client->lock, flags);
 544
 545        rsp->type = FW_CDEV_EVENT_RESPONSE;
 546        rsp->rcode = rcode;
 547
 548        /*
 549         * In the case that sizeof(*rsp) doesn't align with the position of the
 550         * data, and the read is short, preserve an extra copy of the data
 551         * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
 552         * for short reads and some apps depended on it, this is both safe
 553         * and prudent for compatibility.
 554         */
 555        if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
 556                queue_event(client, &e->event, rsp, sizeof(*rsp),
 557                            rsp->data, rsp->length);
 558        else
 559                queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
 560                            NULL, 0);
 561
 562        /* Drop the idr's reference */
 563        client_put(client);
 564}
 565
 566static int init_request(struct client *client,
 567                        struct fw_cdev_send_request *request,
 568                        int destination_id, int speed)
 569{
 570        struct outbound_transaction_event *e;
 571        int ret;
 572
 573        if (request->tcode != TCODE_STREAM_DATA &&
 574            (request->length > 4096 || request->length > 512 << speed))
 575                return -EIO;
 576
 577        if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
 578            request->length < 4)
 579                return -EINVAL;
 580
 581        e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
 582        if (e == NULL)
 583                return -ENOMEM;
 584
 585        e->client = client;
 586        e->response.length = request->length;
 587        e->response.closure = request->closure;
 588
 589        if (request->data &&
 590            copy_from_user(e->response.data,
 591                           u64_to_uptr(request->data), request->length)) {
 592                ret = -EFAULT;
 593                goto failed;
 594        }
 595
 596        e->r.resource.release = release_transaction;
 597        ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
 598        if (ret < 0)
 599                goto failed;
 600
 601        fw_send_request(client->device->card, &e->r.transaction,
 602                        request->tcode, destination_id, request->generation,
 603                        speed, request->offset, e->response.data,
 604                        request->length, complete_transaction, e);
 605        return 0;
 606
 607 failed:
 608        kfree(e);
 609
 610        return ret;
 611}
 612
 613static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
 614{
 615        switch (arg->send_request.tcode) {
 616        case TCODE_WRITE_QUADLET_REQUEST:
 617        case TCODE_WRITE_BLOCK_REQUEST:
 618        case TCODE_READ_QUADLET_REQUEST:
 619        case TCODE_READ_BLOCK_REQUEST:
 620        case TCODE_LOCK_MASK_SWAP:
 621        case TCODE_LOCK_COMPARE_SWAP:
 622        case TCODE_LOCK_FETCH_ADD:
 623        case TCODE_LOCK_LITTLE_ADD:
 624        case TCODE_LOCK_BOUNDED_ADD:
 625        case TCODE_LOCK_WRAP_ADD:
 626        case TCODE_LOCK_VENDOR_DEPENDENT:
 627                break;
 628        default:
 629                return -EINVAL;
 630        }
 631
 632        return init_request(client, &arg->send_request, client->device->node_id,
 633                            client->device->max_speed);
 634}
 635
 636static inline bool is_fcp_request(struct fw_request *request)
 637{
 638        return request == NULL;
 639}
 640
 641static void release_request(struct client *client,
 642                            struct client_resource *resource)
 643{
 644        struct inbound_transaction_resource *r = container_of(resource,
 645                        struct inbound_transaction_resource, resource);
 646
 647        if (is_fcp_request(r->request))
 648                kfree(r->data);
 649        else
 650                fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
 651
 652        fw_card_put(r->card);
 653        kfree(r);
 654}
 655
 656static void handle_request(struct fw_card *card, struct fw_request *request,
 657                           int tcode, int destination, int source,
 658                           int generation, unsigned long long offset,
 659                           void *payload, size_t length, void *callback_data)
 660{
 661        struct address_handler_resource *handler = callback_data;
 662        struct inbound_transaction_resource *r;
 663        struct inbound_transaction_event *e;
 664        size_t event_size0;
 665        void *fcp_frame = NULL;
 666        int ret;
 667
 668        /* card may be different from handler->client->device->card */
 669        fw_card_get(card);
 670
 671        r = kmalloc(sizeof(*r), GFP_ATOMIC);
 672        e = kmalloc(sizeof(*e), GFP_ATOMIC);
 673        if (r == NULL || e == NULL) {
 674                fw_notify("Out of memory when allocating event\n");
 675                goto failed;
 676        }
 677        r->card    = card;
 678        r->request = request;
 679        r->data    = payload;
 680        r->length  = length;
 681
 682        if (is_fcp_request(request)) {
 683                /*
 684                 * FIXME: Let core-transaction.c manage a
 685                 * single reference-counted copy?
 686                 */
 687                fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
 688                if (fcp_frame == NULL)
 689                        goto failed;
 690
 691                r->data = fcp_frame;
 692        }
 693
 694        r->resource.release = release_request;
 695        ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
 696        if (ret < 0)
 697                goto failed;
 698
 699        if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
 700                struct fw_cdev_event_request *req = &e->req.request;
 701
 702                if (tcode & 0x10)
 703                        tcode = TCODE_LOCK_REQUEST;
 704
 705                req->type       = FW_CDEV_EVENT_REQUEST;
 706                req->tcode      = tcode;
 707                req->offset     = offset;
 708                req->length     = length;
 709                req->handle     = r->resource.handle;
 710                req->closure    = handler->closure;
 711                event_size0     = sizeof(*req);
 712        } else {
 713                struct fw_cdev_event_request2 *req = &e->req.request2;
 714
 715                req->type       = FW_CDEV_EVENT_REQUEST2;
 716                req->tcode      = tcode;
 717                req->offset     = offset;
 718                req->source_node_id = source;
 719                req->destination_node_id = destination;
 720                req->card       = card->index;
 721                req->generation = generation;
 722                req->length     = length;
 723                req->handle     = r->resource.handle;
 724                req->closure    = handler->closure;
 725                event_size0     = sizeof(*req);
 726        }
 727
 728        queue_event(handler->client, &e->event,
 729                    &e->req, event_size0, r->data, length);
 730        return;
 731
 732 failed:
 733        kfree(r);
 734        kfree(e);
 735        kfree(fcp_frame);
 736
 737        if (!is_fcp_request(request))
 738                fw_send_response(card, request, RCODE_CONFLICT_ERROR);
 739
 740        fw_card_put(card);
 741}
 742
 743static void release_address_handler(struct client *client,
 744                                    struct client_resource *resource)
 745{
 746        struct address_handler_resource *r =
 747            container_of(resource, struct address_handler_resource, resource);
 748
 749        fw_core_remove_address_handler(&r->handler);
 750        kfree(r);
 751}
 752
 753static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
 754{
 755        struct fw_cdev_allocate *a = &arg->allocate;
 756        struct address_handler_resource *r;
 757        struct fw_address_region region;
 758        int ret;
 759
 760        r = kmalloc(sizeof(*r), GFP_KERNEL);
 761        if (r == NULL)
 762                return -ENOMEM;
 763
 764        region.start = a->offset;
 765        if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
 766                region.end = a->offset + a->length;
 767        else
 768                region.end = a->region_end;
 769
 770        r->handler.length           = a->length;
 771        r->handler.address_callback = handle_request;
 772        r->handler.callback_data    = r;
 773        r->closure   = a->closure;
 774        r->client    = client;
 775
 776        ret = fw_core_add_address_handler(&r->handler, &region);
 777        if (ret < 0) {
 778                kfree(r);
 779                return ret;
 780        }
 781        a->offset = r->handler.offset;
 782
 783        r->resource.release = release_address_handler;
 784        ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 785        if (ret < 0) {
 786                release_address_handler(client, &r->resource);
 787                return ret;
 788        }
 789        a->handle = r->resource.handle;
 790
 791        return 0;
 792}
 793
 794static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
 795{
 796        return release_client_resource(client, arg->deallocate.handle,
 797                                       release_address_handler, NULL);
 798}
 799
 800static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
 801{
 802        struct fw_cdev_send_response *a = &arg->send_response;
 803        struct client_resource *resource;
 804        struct inbound_transaction_resource *r;
 805        int ret = 0;
 806
 807        if (release_client_resource(client, a->handle,
 808                                    release_request, &resource) < 0)
 809                return -EINVAL;
 810
 811        r = container_of(resource, struct inbound_transaction_resource,
 812                         resource);
 813        if (is_fcp_request(r->request))
 814                goto out;
 815
 816        if (a->length != fw_get_response_length(r->request)) {
 817                ret = -EINVAL;
 818                kfree(r->request);
 819                goto out;
 820        }
 821        if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
 822                ret = -EFAULT;
 823                kfree(r->request);
 824                goto out;
 825        }
 826        fw_send_response(r->card, r->request, a->rcode);
 827 out:
 828        fw_card_put(r->card);
 829        kfree(r);
 830
 831        return ret;
 832}
 833
 834static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
 835{
 836        fw_schedule_bus_reset(client->device->card, true,
 837                        arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
 838        return 0;
 839}
 840
 841static void release_descriptor(struct client *client,
 842                               struct client_resource *resource)
 843{
 844        struct descriptor_resource *r =
 845                container_of(resource, struct descriptor_resource, resource);
 846
 847        fw_core_remove_descriptor(&r->descriptor);
 848        kfree(r);
 849}
 850
 851static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
 852{
 853        struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
 854        struct descriptor_resource *r;
 855        int ret;
 856
 857        /* Access policy: Allow this ioctl only on local nodes' device files. */
 858        if (!client->device->is_local)
 859                return -ENOSYS;
 860
 861        if (a->length > 256)
 862                return -EINVAL;
 863
 864        r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
 865        if (r == NULL)
 866                return -ENOMEM;
 867
 868        if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
 869                ret = -EFAULT;
 870                goto failed;
 871        }
 872
 873        r->descriptor.length    = a->length;
 874        r->descriptor.immediate = a->immediate;
 875        r->descriptor.key       = a->key;
 876        r->descriptor.data      = r->data;
 877
 878        ret = fw_core_add_descriptor(&r->descriptor);
 879        if (ret < 0)
 880                goto failed;
 881
 882        r->resource.release = release_descriptor;
 883        ret = add_client_resource(client, &r->resource, GFP_KERNEL);
 884        if (ret < 0) {
 885                fw_core_remove_descriptor(&r->descriptor);
 886                goto failed;
 887        }
 888        a->handle = r->resource.handle;
 889
 890        return 0;
 891 failed:
 892        kfree(r);
 893
 894        return ret;
 895}
 896
 897static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
 898{
 899        return release_client_resource(client, arg->remove_descriptor.handle,
 900                                       release_descriptor, NULL);
 901}
 902
 903static void iso_callback(struct fw_iso_context *context, u32 cycle,
 904                         size_t header_length, void *header, void *data)
 905{
 906        struct client *client = data;
 907        struct iso_interrupt_event *e;
 908
 909        e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
 910        if (e == NULL) {
 911                fw_notify("Out of memory when allocating event\n");
 912                return;
 913        }
 914        e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
 915        e->interrupt.closure   = client->iso_closure;
 916        e->interrupt.cycle     = cycle;
 917        e->interrupt.header_length = header_length;
 918        memcpy(e->interrupt.header, header, header_length);
 919        queue_event(client, &e->event, &e->interrupt,
 920                    sizeof(e->interrupt) + header_length, NULL, 0);
 921}
 922
 923static void iso_mc_callback(struct fw_iso_context *context,
 924                            dma_addr_t completed, void *data)
 925{
 926        struct client *client = data;
 927        struct iso_interrupt_mc_event *e;
 928
 929        e = kmalloc(sizeof(*e), GFP_ATOMIC);
 930        if (e == NULL) {
 931                fw_notify("Out of memory when allocating event\n");
 932                return;
 933        }
 934        e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
 935        e->interrupt.closure   = client->iso_closure;
 936        e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
 937                                                      completed);
 938        queue_event(client, &e->event, &e->interrupt,
 939                    sizeof(e->interrupt), NULL, 0);
 940}
 941
 942static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
 943{
 944        struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
 945        struct fw_iso_context *context;
 946        fw_iso_callback_t cb;
 947
 948        BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
 949                     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
 950                     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
 951                                        FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
 952
 953        switch (a->type) {
 954        case FW_ISO_CONTEXT_TRANSMIT:
 955                if (a->speed > SCODE_3200 || a->channel > 63)
 956                        return -EINVAL;
 957
 958                cb = iso_callback;
 959                break;
 960
 961        case FW_ISO_CONTEXT_RECEIVE:
 962                if (a->header_size < 4 || (a->header_size & 3) ||
 963                    a->channel > 63)
 964                        return -EINVAL;
 965
 966                cb = iso_callback;
 967                break;
 968
 969        case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
 970                cb = (fw_iso_callback_t)iso_mc_callback;
 971                break;
 972
 973        default:
 974                return -EINVAL;
 975        }
 976
 977        context = fw_iso_context_create(client->device->card, a->type,
 978                        a->channel, a->speed, a->header_size, cb, client);
 979        if (IS_ERR(context))
 980                return PTR_ERR(context);
 981
 982        /* We only support one context at this time. */
 983        spin_lock_irq(&client->lock);
 984        if (client->iso_context != NULL) {
 985                spin_unlock_irq(&client->lock);
 986                fw_iso_context_destroy(context);
 987                return -EBUSY;
 988        }
 989        client->iso_closure = a->closure;
 990        client->iso_context = context;
 991        spin_unlock_irq(&client->lock);
 992
 993        a->handle = 0;
 994
 995        return 0;
 996}
 997
 998static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
 999{
1000        struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1001        struct fw_iso_context *ctx = client->iso_context;
1002
1003        if (ctx == NULL || a->handle != 0)
1004                return -EINVAL;
1005
1006        return fw_iso_context_set_channels(ctx, &a->channels);
1007}
1008
1009/* Macros for decoding the iso packet control header. */
1010#define GET_PAYLOAD_LENGTH(v)   ((v) & 0xffff)
1011#define GET_INTERRUPT(v)        (((v) >> 16) & 0x01)
1012#define GET_SKIP(v)             (((v) >> 17) & 0x01)
1013#define GET_TAG(v)              (((v) >> 18) & 0x03)
1014#define GET_SY(v)               (((v) >> 20) & 0x0f)
1015#define GET_HEADER_LENGTH(v)    (((v) >> 24) & 0xff)
1016
1017static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1018{
1019        struct fw_cdev_queue_iso *a = &arg->queue_iso;
1020        struct fw_cdev_iso_packet __user *p, *end, *next;
1021        struct fw_iso_context *ctx = client->iso_context;
1022        unsigned long payload, buffer_end, transmit_header_bytes = 0;
1023        u32 control;
1024        int count;
1025        struct {
1026                struct fw_iso_packet packet;
1027                u8 header[256];
1028        } u;
1029
1030        if (ctx == NULL || a->handle != 0)
1031                return -EINVAL;
1032
1033        /*
1034         * If the user passes a non-NULL data pointer, has mmap()'ed
1035         * the iso buffer, and the pointer points inside the buffer,
1036         * we setup the payload pointers accordingly.  Otherwise we
1037         * set them both to 0, which will still let packets with
1038         * payload_length == 0 through.  In other words, if no packets
1039         * use the indirect payload, the iso buffer need not be mapped
1040         * and the a->data pointer is ignored.
1041         */
1042        payload = (unsigned long)a->data - client->vm_start;
1043        buffer_end = client->buffer.page_count << PAGE_SHIFT;
1044        if (a->data == 0 || client->buffer.pages == NULL ||
1045            payload >= buffer_end) {
1046                payload = 0;
1047                buffer_end = 0;
1048        }
1049
1050        if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1051                return -EINVAL;
1052
1053        p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1054        if (!access_ok(VERIFY_READ, p, a->size))
1055                return -EFAULT;
1056
1057        end = (void __user *)p + a->size;
1058        count = 0;
1059        while (p < end) {
1060                if (get_user(control, &p->control))
1061                        return -EFAULT;
1062                u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1063                u.packet.interrupt = GET_INTERRUPT(control);
1064                u.packet.skip = GET_SKIP(control);
1065                u.packet.tag = GET_TAG(control);
1066                u.packet.sy = GET_SY(control);
1067                u.packet.header_length = GET_HEADER_LENGTH(control);
1068
1069                switch (ctx->type) {
1070                case FW_ISO_CONTEXT_TRANSMIT:
1071                        if (u.packet.header_length & 3)
1072                                return -EINVAL;
1073                        transmit_header_bytes = u.packet.header_length;
1074                        break;
1075
1076                case FW_ISO_CONTEXT_RECEIVE:
1077                        if (u.packet.header_length == 0 ||
1078                            u.packet.header_length % ctx->header_size != 0)
1079                                return -EINVAL;
1080                        break;
1081
1082                case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1083                        if (u.packet.payload_length == 0 ||
1084                            u.packet.payload_length & 3)
1085                                return -EINVAL;
1086                        break;
1087                }
1088
1089                next = (struct fw_cdev_iso_packet __user *)
1090                        &p->header[transmit_header_bytes / 4];
1091                if (next > end)
1092                        return -EINVAL;
1093                if (__copy_from_user
1094                    (u.packet.header, p->header, transmit_header_bytes))
1095                        return -EFAULT;
1096                if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1097                    u.packet.header_length + u.packet.payload_length > 0)
1098                        return -EINVAL;
1099                if (payload + u.packet.payload_length > buffer_end)
1100                        return -EINVAL;
1101
1102                if (fw_iso_context_queue(ctx, &u.packet,
1103                                         &client->buffer, payload))
1104                        break;
1105
1106                p = next;
1107                payload += u.packet.payload_length;
1108                count++;
1109        }
1110        fw_iso_context_queue_flush(ctx);
1111
1112        a->size    -= uptr_to_u64(p) - a->packets;
1113        a->packets  = uptr_to_u64(p);
1114        a->data     = client->vm_start + payload;
1115
1116        return count;
1117}
1118
1119static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1120{
1121        struct fw_cdev_start_iso *a = &arg->start_iso;
1122
1123        BUILD_BUG_ON(
1124            FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1125            FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1126            FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1127            FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1128            FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1129
1130        if (client->iso_context == NULL || a->handle != 0)
1131                return -EINVAL;
1132
1133        if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1134            (a->tags == 0 || a->tags > 15 || a->sync > 15))
1135                return -EINVAL;
1136
1137        return fw_iso_context_start(client->iso_context,
1138                                    a->cycle, a->sync, a->tags);
1139}
1140
1141static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1142{
1143        struct fw_cdev_stop_iso *a = &arg->stop_iso;
1144
1145        if (client->iso_context == NULL || a->handle != 0)
1146                return -EINVAL;
1147
1148        return fw_iso_context_stop(client->iso_context);
1149}
1150
1151static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1152{
1153        struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1154        struct fw_card *card = client->device->card;
1155        struct timespec ts = {0, 0};
1156        u32 cycle_time;
1157        int ret = 0;
1158
1159        local_irq_disable();
1160
1161        cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1162
1163        switch (a->clk_id) {
1164        case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
1165        case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
1166        case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
1167        default:
1168                ret = -EINVAL;
1169        }
1170
1171        local_irq_enable();
1172
1173        a->tv_sec      = ts.tv_sec;
1174        a->tv_nsec     = ts.tv_nsec;
1175        a->cycle_timer = cycle_time;
1176
1177        return ret;
1178}
1179
1180static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1181{
1182        struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1183        struct fw_cdev_get_cycle_timer2 ct2;
1184
1185        ct2.clk_id = CLOCK_REALTIME;
1186        ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1187
1188        a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1189        a->cycle_timer = ct2.cycle_timer;
1190
1191        return 0;
1192}
1193
1194static void iso_resource_work(struct work_struct *work)
1195{
1196        struct iso_resource_event *e;
1197        struct iso_resource *r =
1198                        container_of(work, struct iso_resource, work.work);
1199        struct client *client = r->client;
1200        int generation, channel, bandwidth, todo;
1201        bool skip, free, success;
1202
1203        spin_lock_irq(&client->lock);
1204        generation = client->device->generation;
1205        todo = r->todo;
1206        /* Allow 1000ms grace period for other reallocations. */
1207        if (todo == ISO_RES_ALLOC &&
1208            time_before64(get_jiffies_64(),
1209                          client->device->card->reset_jiffies + HZ)) {
1210                schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1211                skip = true;
1212        } else {
1213                /* We could be called twice within the same generation. */
1214                skip = todo == ISO_RES_REALLOC &&
1215                       r->generation == generation;
1216        }
1217        free = todo == ISO_RES_DEALLOC ||
1218               todo == ISO_RES_ALLOC_ONCE ||
1219               todo == ISO_RES_DEALLOC_ONCE;
1220        r->generation = generation;
1221        spin_unlock_irq(&client->lock);
1222
1223        if (skip)
1224                goto out;
1225
1226        bandwidth = r->bandwidth;
1227
1228        fw_iso_resource_manage(client->device->card, generation,
1229                        r->channels, &channel, &bandwidth,
1230                        todo == ISO_RES_ALLOC ||
1231                        todo == ISO_RES_REALLOC ||
1232                        todo == ISO_RES_ALLOC_ONCE);
1233        /*
1234         * Is this generation outdated already?  As long as this resource sticks
1235         * in the idr, it will be scheduled again for a newer generation or at
1236         * shutdown.
1237         */
1238        if (channel == -EAGAIN &&
1239            (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1240                goto out;
1241
1242        success = channel >= 0 || bandwidth > 0;
1243
1244        spin_lock_irq(&client->lock);
1245        /*
1246         * Transit from allocation to reallocation, except if the client
1247         * requested deallocation in the meantime.
1248         */
1249        if (r->todo == ISO_RES_ALLOC)
1250                r->todo = ISO_RES_REALLOC;
1251        /*
1252         * Allocation or reallocation failure?  Pull this resource out of the
1253         * idr and prepare for deletion, unless the client is shutting down.
1254         */
1255        if (r->todo == ISO_RES_REALLOC && !success &&
1256            !client->in_shutdown &&
1257            idr_find(&client->resource_idr, r->resource.handle)) {
1258                idr_remove(&client->resource_idr, r->resource.handle);
1259                client_put(client);
1260                free = true;
1261        }
1262        spin_unlock_irq(&client->lock);
1263
1264        if (todo == ISO_RES_ALLOC && channel >= 0)
1265                r->channels = 1ULL << channel;
1266
1267        if (todo == ISO_RES_REALLOC && success)
1268                goto out;
1269
1270        if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1271                e = r->e_alloc;
1272                r->e_alloc = NULL;
1273        } else {
1274                e = r->e_dealloc;
1275                r->e_dealloc = NULL;
1276        }
1277        e->iso_resource.handle    = r->resource.handle;
1278        e->iso_resource.channel   = channel;
1279        e->iso_resource.bandwidth = bandwidth;
1280
1281        queue_event(client, &e->event,
1282                    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1283
1284        if (free) {
1285                cancel_delayed_work(&r->work);
1286                kfree(r->e_alloc);
1287                kfree(r->e_dealloc);
1288                kfree(r);
1289        }
1290 out:
1291        client_put(client);
1292}
1293
1294static void release_iso_resource(struct client *client,
1295                                 struct client_resource *resource)
1296{
1297        struct iso_resource *r =
1298                container_of(resource, struct iso_resource, resource);
1299
1300        spin_lock_irq(&client->lock);
1301        r->todo = ISO_RES_DEALLOC;
1302        schedule_iso_resource(r, 0);
1303        spin_unlock_irq(&client->lock);
1304}
1305
1306static int init_iso_resource(struct client *client,
1307                struct fw_cdev_allocate_iso_resource *request, int todo)
1308{
1309        struct iso_resource_event *e1, *e2;
1310        struct iso_resource *r;
1311        int ret;
1312
1313        if ((request->channels == 0 && request->bandwidth == 0) ||
1314            request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1315            request->bandwidth < 0)
1316                return -EINVAL;
1317
1318        r  = kmalloc(sizeof(*r), GFP_KERNEL);
1319        e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1320        e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1321        if (r == NULL || e1 == NULL || e2 == NULL) {
1322                ret = -ENOMEM;
1323                goto fail;
1324        }
1325
1326        INIT_DELAYED_WORK(&r->work, iso_resource_work);
1327        r->client       = client;
1328        r->todo         = todo;
1329        r->generation   = -1;
1330        r->channels     = request->channels;
1331        r->bandwidth    = request->bandwidth;
1332        r->e_alloc      = e1;
1333        r->e_dealloc    = e2;
1334
1335        e1->iso_resource.closure = request->closure;
1336        e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1337        e2->iso_resource.closure = request->closure;
1338        e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1339
1340        if (todo == ISO_RES_ALLOC) {
1341                r->resource.release = release_iso_resource;
1342                ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1343                if (ret < 0)
1344                        goto fail;
1345        } else {
1346                r->resource.release = NULL;
1347                r->resource.handle = -1;
1348                schedule_iso_resource(r, 0);
1349        }
1350        request->handle = r->resource.handle;
1351
1352        return 0;
1353 fail:
1354        kfree(r);
1355        kfree(e1);
1356        kfree(e2);
1357
1358        return ret;
1359}
1360
1361static int ioctl_allocate_iso_resource(struct client *client,
1362                                       union ioctl_arg *arg)
1363{
1364        return init_iso_resource(client,
1365                        &arg->allocate_iso_resource, ISO_RES_ALLOC);
1366}
1367
1368static int ioctl_deallocate_iso_resource(struct client *client,
1369                                         union ioctl_arg *arg)
1370{
1371        return release_client_resource(client,
1372                        arg->deallocate.handle, release_iso_resource, NULL);
1373}
1374
1375static int ioctl_allocate_iso_resource_once(struct client *client,
1376                                            union ioctl_arg *arg)
1377{
1378        return init_iso_resource(client,
1379                        &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1380}
1381
1382static int ioctl_deallocate_iso_resource_once(struct client *client,
1383                                              union ioctl_arg *arg)
1384{
1385        return init_iso_resource(client,
1386                        &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1387}
1388
1389/*
1390 * Returns a speed code:  Maximum speed to or from this device,
1391 * limited by the device's link speed, the local node's link speed,
1392 * and all PHY port speeds between the two links.
1393 */
1394static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1395{
1396        return client->device->max_speed;
1397}
1398
1399static int ioctl_send_broadcast_request(struct client *client,
1400                                        union ioctl_arg *arg)
1401{
1402        struct fw_cdev_send_request *a = &arg->send_request;
1403
1404        switch (a->tcode) {
1405        case TCODE_WRITE_QUADLET_REQUEST:
1406        case TCODE_WRITE_BLOCK_REQUEST:
1407                break;
1408        default:
1409                return -EINVAL;
1410        }
1411
1412        /* Security policy: Only allow accesses to Units Space. */
1413        if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1414                return -EACCES;
1415
1416        return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1417}
1418
1419static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1420{
1421        struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1422        struct fw_cdev_send_request request;
1423        int dest;
1424
1425        if (a->speed > client->device->card->link_speed ||
1426            a->length > 1024 << a->speed)
1427                return -EIO;
1428
1429        if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1430                return -EINVAL;
1431
1432        dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1433        request.tcode           = TCODE_STREAM_DATA;
1434        request.length          = a->length;
1435        request.closure         = a->closure;
1436        request.data            = a->data;
1437        request.generation      = a->generation;
1438
1439        return init_request(client, &request, dest, a->speed);
1440}
1441
1442static void outbound_phy_packet_callback(struct fw_packet *packet,
1443                                         struct fw_card *card, int status)
1444{
1445        struct outbound_phy_packet_event *e =
1446                container_of(packet, struct outbound_phy_packet_event, p);
1447
1448        switch (status) {
1449        /* expected: */
1450        case ACK_COMPLETE:      e->phy_packet.rcode = RCODE_COMPLETE;   break;
1451        /* should never happen with PHY packets: */
1452        case ACK_PENDING:       e->phy_packet.rcode = RCODE_COMPLETE;   break;
1453        case ACK_BUSY_X:
1454        case ACK_BUSY_A:
1455        case ACK_BUSY_B:        e->phy_packet.rcode = RCODE_BUSY;       break;
1456        case ACK_DATA_ERROR:    e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1457        case ACK_TYPE_ERROR:    e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1458        /* stale generation; cancelled; on certain controllers: no ack */
1459        default:                e->phy_packet.rcode = status;           break;
1460        }
1461        e->phy_packet.data[0] = packet->timestamp;
1462
1463        queue_event(e->client, &e->event, &e->phy_packet,
1464                    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1465        client_put(e->client);
1466}
1467
1468static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1469{
1470        struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1471        struct fw_card *card = client->device->card;
1472        struct outbound_phy_packet_event *e;
1473
1474        /* Access policy: Allow this ioctl only on local nodes' device files. */
1475        if (!client->device->is_local)
1476                return -ENOSYS;
1477
1478        e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1479        if (e == NULL)
1480                return -ENOMEM;
1481
1482        client_get(client);
1483        e->client               = client;
1484        e->p.speed              = SCODE_100;
1485        e->p.generation         = a->generation;
1486        e->p.header[0]          = TCODE_LINK_INTERNAL << 4;
1487        e->p.header[1]          = a->data[0];
1488        e->p.header[2]          = a->data[1];
1489        e->p.header_length      = 12;
1490        e->p.callback           = outbound_phy_packet_callback;
1491        e->phy_packet.closure   = a->closure;
1492        e->phy_packet.type      = FW_CDEV_EVENT_PHY_PACKET_SENT;
1493        if (is_ping_packet(a->data))
1494                        e->phy_packet.length = 4;
1495
1496        card->driver->send_request(card, &e->p);
1497
1498        return 0;
1499}
1500
1501static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1502{
1503        struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1504        struct fw_card *card = client->device->card;
1505
1506        /* Access policy: Allow this ioctl only on local nodes' device files. */
1507        if (!client->device->is_local)
1508                return -ENOSYS;
1509
1510        spin_lock_irq(&card->lock);
1511
1512        list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1513        client->phy_receiver_closure = a->closure;
1514
1515        spin_unlock_irq(&card->lock);
1516
1517        return 0;
1518}
1519
1520void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1521{
1522        struct client *client;
1523        struct inbound_phy_packet_event *e;
1524        unsigned long flags;
1525
1526        spin_lock_irqsave(&card->lock, flags);
1527
1528        list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1529                e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1530                if (e == NULL) {
1531                        fw_notify("Out of memory when allocating event\n");
1532                        break;
1533                }
1534                e->phy_packet.closure   = client->phy_receiver_closure;
1535                e->phy_packet.type      = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1536                e->phy_packet.rcode     = RCODE_COMPLETE;
1537                e->phy_packet.length    = 8;
1538                e->phy_packet.data[0]   = p->header[1];
1539                e->phy_packet.data[1]   = p->header[2];
1540                queue_event(client, &e->event,
1541                            &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1542        }
1543
1544        spin_unlock_irqrestore(&card->lock, flags);
1545}
1546
1547static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1548        [0x00] = ioctl_get_info,
1549        [0x01] = ioctl_send_request,
1550        [0x02] = ioctl_allocate,
1551        [0x03] = ioctl_deallocate,
1552        [0x04] = ioctl_send_response,
1553        [0x05] = ioctl_initiate_bus_reset,
1554        [0x06] = ioctl_add_descriptor,
1555        [0x07] = ioctl_remove_descriptor,
1556        [0x08] = ioctl_create_iso_context,
1557        [0x09] = ioctl_queue_iso,
1558        [0x0a] = ioctl_start_iso,
1559        [0x0b] = ioctl_stop_iso,
1560        [0x0c] = ioctl_get_cycle_timer,
1561        [0x0d] = ioctl_allocate_iso_resource,
1562        [0x0e] = ioctl_deallocate_iso_resource,
1563        [0x0f] = ioctl_allocate_iso_resource_once,
1564        [0x10] = ioctl_deallocate_iso_resource_once,
1565        [0x11] = ioctl_get_speed,
1566        [0x12] = ioctl_send_broadcast_request,
1567        [0x13] = ioctl_send_stream_packet,
1568        [0x14] = ioctl_get_cycle_timer2,
1569        [0x15] = ioctl_send_phy_packet,
1570        [0x16] = ioctl_receive_phy_packets,
1571        [0x17] = ioctl_set_iso_channels,
1572};
1573
1574static int dispatch_ioctl(struct client *client,
1575                          unsigned int cmd, void __user *arg)
1576{
1577        union ioctl_arg buffer;
1578        int ret;
1579
1580        if (fw_device_is_shutdown(client->device))
1581                return -ENODEV;
1582
1583        if (_IOC_TYPE(cmd) != '#' ||
1584            _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1585            _IOC_SIZE(cmd) > sizeof(buffer))
1586                return -EINVAL;
1587
1588        if (_IOC_DIR(cmd) == _IOC_READ)
1589                memset(&buffer, 0, _IOC_SIZE(cmd));
1590
1591        if (_IOC_DIR(cmd) & _IOC_WRITE)
1592                if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1593                        return -EFAULT;
1594
1595        ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1596        if (ret < 0)
1597                return ret;
1598
1599        if (_IOC_DIR(cmd) & _IOC_READ)
1600                if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1601                        return -EFAULT;
1602
1603        return ret;
1604}
1605
1606static long fw_device_op_ioctl(struct file *file,
1607                               unsigned int cmd, unsigned long arg)
1608{
1609        return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1610}
1611
1612#ifdef CONFIG_COMPAT
1613static long fw_device_op_compat_ioctl(struct file *file,
1614                                      unsigned int cmd, unsigned long arg)
1615{
1616        return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1617}
1618#endif
1619
1620static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1621{
1622        struct client *client = file->private_data;
1623        enum dma_data_direction direction;
1624        unsigned long size;
1625        int page_count, ret;
1626
1627        if (fw_device_is_shutdown(client->device))
1628                return -ENODEV;
1629
1630        /* FIXME: We could support multiple buffers, but we don't. */
1631        if (client->buffer.pages != NULL)
1632                return -EBUSY;
1633
1634        if (!(vma->vm_flags & VM_SHARED))
1635                return -EINVAL;
1636
1637        if (vma->vm_start & ~PAGE_MASK)
1638                return -EINVAL;
1639
1640        client->vm_start = vma->vm_start;
1641        size = vma->vm_end - vma->vm_start;
1642        page_count = size >> PAGE_SHIFT;
1643        if (size & ~PAGE_MASK)
1644                return -EINVAL;
1645
1646        if (vma->vm_flags & VM_WRITE)
1647                direction = DMA_TO_DEVICE;
1648        else
1649                direction = DMA_FROM_DEVICE;
1650
1651        ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1652                                 page_count, direction);
1653        if (ret < 0)
1654                return ret;
1655
1656        ret = fw_iso_buffer_map(&client->buffer, vma);
1657        if (ret < 0)
1658                fw_iso_buffer_destroy(&client->buffer, client->device->card);
1659
1660        return ret;
1661}
1662
1663static int is_outbound_transaction_resource(int id, void *p, void *data)
1664{
1665        struct client_resource *resource = p;
1666
1667        return resource->release == release_transaction;
1668}
1669
1670static int has_outbound_transactions(struct client *client)
1671{
1672        int ret;
1673
1674        spin_lock_irq(&client->lock);
1675        ret = idr_for_each(&client->resource_idr,
1676                           is_outbound_transaction_resource, NULL);
1677        spin_unlock_irq(&client->lock);
1678
1679        return ret;
1680}
1681
1682static int shutdown_resource(int id, void *p, void *data)
1683{
1684        struct client_resource *resource = p;
1685        struct client *client = data;
1686
1687        resource->release(client, resource);
1688        client_put(client);
1689
1690        return 0;
1691}
1692
1693static int fw_device_op_release(struct inode *inode, struct file *file)
1694{
1695        struct client *client = file->private_data;
1696        struct event *event, *next_event;
1697
1698        spin_lock_irq(&client->device->card->lock);
1699        list_del(&client->phy_receiver_link);
1700        spin_unlock_irq(&client->device->card->lock);
1701
1702        mutex_lock(&client->device->client_list_mutex);
1703        list_del(&client->link);
1704        mutex_unlock(&client->device->client_list_mutex);
1705
1706        if (client->iso_context)
1707                fw_iso_context_destroy(client->iso_context);
1708
1709        if (client->buffer.pages)
1710                fw_iso_buffer_destroy(&client->buffer, client->device->card);
1711
1712        /* Freeze client->resource_idr and client->event_list */
1713        spin_lock_irq(&client->lock);
1714        client->in_shutdown = true;
1715        spin_unlock_irq(&client->lock);
1716
1717        wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1718
1719        idr_for_each(&client->resource_idr, shutdown_resource, client);
1720        idr_remove_all(&client->resource_idr);
1721        idr_destroy(&client->resource_idr);
1722
1723        list_for_each_entry_safe(event, next_event, &client->event_list, link)
1724                kfree(event);
1725
1726        client_put(client);
1727
1728        return 0;
1729}
1730
1731static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1732{
1733        struct client *client = file->private_data;
1734        unsigned int mask = 0;
1735
1736        poll_wait(file, &client->wait, pt);
1737
1738        if (fw_device_is_shutdown(client->device))
1739                mask |= POLLHUP | POLLERR;
1740        if (!list_empty(&client->event_list))
1741                mask |= POLLIN | POLLRDNORM;
1742
1743        return mask;
1744}
1745
1746const struct file_operations fw_device_ops = {
1747        .owner          = THIS_MODULE,
1748        .llseek         = no_llseek,
1749        .open           = fw_device_op_open,
1750        .read           = fw_device_op_read,
1751        .unlocked_ioctl = fw_device_op_ioctl,
1752        .mmap           = fw_device_op_mmap,
1753        .release        = fw_device_op_release,
1754        .poll           = fw_device_op_poll,
1755#ifdef CONFIG_COMPAT
1756        .compat_ioctl   = fw_device_op_compat_ioctl,
1757#endif
1758};
1759