linux/drivers/firewire/core-card.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software Foundation,
  16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 */
  18
  19#include <linux/bug.h>
  20#include <linux/completion.h>
  21#include <linux/crc-itu-t.h>
  22#include <linux/device.h>
  23#include <linux/errno.h>
  24#include <linux/firewire.h>
  25#include <linux/firewire-constants.h>
  26#include <linux/jiffies.h>
  27#include <linux/kernel.h>
  28#include <linux/kref.h>
  29#include <linux/list.h>
  30#include <linux/module.h>
  31#include <linux/mutex.h>
  32#include <linux/spinlock.h>
  33#include <linux/workqueue.h>
  34
  35#include <asm/atomic.h>
  36#include <asm/byteorder.h>
  37
  38#include "core.h"
  39
  40int fw_compute_block_crc(__be32 *block)
  41{
  42        int length;
  43        u16 crc;
  44
  45        length = (be32_to_cpu(block[0]) >> 16) & 0xff;
  46        crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
  47        *block |= cpu_to_be32(crc);
  48
  49        return length;
  50}
  51
  52static DEFINE_MUTEX(card_mutex);
  53static LIST_HEAD(card_list);
  54
  55static LIST_HEAD(descriptor_list);
  56static int descriptor_count;
  57
  58static __be32 tmp_config_rom[256];
  59/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
  60static size_t config_rom_length = 1 + 4 + 1 + 1;
  61
  62#define BIB_CRC(v)              ((v) <<  0)
  63#define BIB_CRC_LENGTH(v)       ((v) << 16)
  64#define BIB_INFO_LENGTH(v)      ((v) << 24)
  65#define BIB_BUS_NAME            0x31333934 /* "1394" */
  66#define BIB_LINK_SPEED(v)       ((v) <<  0)
  67#define BIB_GENERATION(v)       ((v) <<  4)
  68#define BIB_MAX_ROM(v)          ((v) <<  8)
  69#define BIB_MAX_RECEIVE(v)      ((v) << 12)
  70#define BIB_CYC_CLK_ACC(v)      ((v) << 16)
  71#define BIB_PMC                 ((1) << 27)
  72#define BIB_BMC                 ((1) << 28)
  73#define BIB_ISC                 ((1) << 29)
  74#define BIB_CMC                 ((1) << 30)
  75#define BIB_IRMC                ((1) << 31)
  76#define NODE_CAPABILITIES       0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
  77
  78#define CANON_OUI               0x000085
  79
  80static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
  81{
  82        struct fw_descriptor *desc;
  83        int i, j, k, length;
  84
  85        /*
  86         * Initialize contents of config rom buffer.  On the OHCI
  87         * controller, block reads to the config rom accesses the host
  88         * memory, but quadlet read access the hardware bus info block
  89         * registers.  That's just crack, but it means we should make
  90         * sure the contents of bus info block in host memory matches
  91         * the version stored in the OHCI registers.
  92         */
  93
  94        config_rom[0] = cpu_to_be32(
  95                BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
  96        config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
  97        config_rom[2] = cpu_to_be32(
  98                BIB_LINK_SPEED(card->link_speed) |
  99                BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
 100                BIB_MAX_ROM(2) |
 101                BIB_MAX_RECEIVE(card->max_receive) |
 102                BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
 103        config_rom[3] = cpu_to_be32(card->guid >> 32);
 104        config_rom[4] = cpu_to_be32(card->guid);
 105
 106        /* Generate root directory. */
 107        config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
 108        i = 7;
 109        j = 7 + descriptor_count;
 110
 111        /* Generate root directory entries for descriptors. */
 112        list_for_each_entry (desc, &descriptor_list, link) {
 113                if (desc->immediate > 0)
 114                        config_rom[i++] = cpu_to_be32(desc->immediate);
 115                config_rom[i] = cpu_to_be32(desc->key | (j - i));
 116                i++;
 117                j += desc->length;
 118        }
 119
 120        /* Update root directory length. */
 121        config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
 122
 123        /* End of root directory, now copy in descriptors. */
 124        list_for_each_entry (desc, &descriptor_list, link) {
 125                for (k = 0; k < desc->length; k++)
 126                        config_rom[i + k] = cpu_to_be32(desc->data[k]);
 127                i += desc->length;
 128        }
 129
 130        /* Calculate CRCs for all blocks in the config rom.  This
 131         * assumes that CRC length and info length are identical for
 132         * the bus info block, which is always the case for this
 133         * implementation. */
 134        for (i = 0; i < j; i += length + 1)
 135                length = fw_compute_block_crc(config_rom + i);
 136
 137        WARN_ON(j != config_rom_length);
 138}
 139
 140static void update_config_roms(void)
 141{
 142        struct fw_card *card;
 143
 144        list_for_each_entry (card, &card_list, link) {
 145                generate_config_rom(card, tmp_config_rom);
 146                card->driver->set_config_rom(card, tmp_config_rom,
 147                                             config_rom_length);
 148        }
 149}
 150
 151static size_t required_space(struct fw_descriptor *desc)
 152{
 153        /* descriptor + entry into root dir + optional immediate entry */
 154        return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
 155}
 156
 157int fw_core_add_descriptor(struct fw_descriptor *desc)
 158{
 159        size_t i;
 160        int ret;
 161
 162        /*
 163         * Check descriptor is valid; the length of all blocks in the
 164         * descriptor has to add up to exactly the length of the
 165         * block.
 166         */
 167        i = 0;
 168        while (i < desc->length)
 169                i += (desc->data[i] >> 16) + 1;
 170
 171        if (i != desc->length)
 172                return -EINVAL;
 173
 174        mutex_lock(&card_mutex);
 175
 176        if (config_rom_length + required_space(desc) > 256) {
 177                ret = -EBUSY;
 178        } else {
 179                list_add_tail(&desc->link, &descriptor_list);
 180                config_rom_length += required_space(desc);
 181                descriptor_count++;
 182                if (desc->immediate > 0)
 183                        descriptor_count++;
 184                update_config_roms();
 185                ret = 0;
 186        }
 187
 188        mutex_unlock(&card_mutex);
 189
 190        return ret;
 191}
 192EXPORT_SYMBOL(fw_core_add_descriptor);
 193
 194void fw_core_remove_descriptor(struct fw_descriptor *desc)
 195{
 196        mutex_lock(&card_mutex);
 197
 198        list_del(&desc->link);
 199        config_rom_length -= required_space(desc);
 200        descriptor_count--;
 201        if (desc->immediate > 0)
 202                descriptor_count--;
 203        update_config_roms();
 204
 205        mutex_unlock(&card_mutex);
 206}
 207EXPORT_SYMBOL(fw_core_remove_descriptor);
 208
 209static int reset_bus(struct fw_card *card, bool short_reset)
 210{
 211        int reg = short_reset ? 5 : 1;
 212        int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
 213
 214        return card->driver->update_phy_reg(card, reg, 0, bit);
 215}
 216
 217void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
 218{
 219        /* We don't try hard to sort out requests of long vs. short resets. */
 220        card->br_short = short_reset;
 221
 222        /* Use an arbitrary short delay to combine multiple reset requests. */
 223        fw_card_get(card);
 224        if (!schedule_delayed_work(&card->br_work,
 225                                   delayed ? DIV_ROUND_UP(HZ, 100) : 0))
 226                fw_card_put(card);
 227}
 228EXPORT_SYMBOL(fw_schedule_bus_reset);
 229
 230static void br_work(struct work_struct *work)
 231{
 232        struct fw_card *card = container_of(work, struct fw_card, br_work.work);
 233
 234        /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
 235        if (card->reset_jiffies != 0 &&
 236            time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) {
 237                if (!schedule_delayed_work(&card->br_work, 2 * HZ))
 238                        fw_card_put(card);
 239                return;
 240        }
 241
 242        fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
 243                           FW_PHY_CONFIG_CURRENT_GAP_COUNT);
 244        reset_bus(card, card->br_short);
 245        fw_card_put(card);
 246}
 247
 248static void allocate_broadcast_channel(struct fw_card *card, int generation)
 249{
 250        int channel, bandwidth = 0;
 251
 252        if (!card->broadcast_channel_allocated) {
 253                fw_iso_resource_manage(card, generation, 1ULL << 31,
 254                                       &channel, &bandwidth, true,
 255                                       card->bm_transaction_data);
 256                if (channel != 31) {
 257                        fw_notify("failed to allocate broadcast channel\n");
 258                        return;
 259                }
 260                card->broadcast_channel_allocated = true;
 261        }
 262
 263        device_for_each_child(card->device, (void *)(long)generation,
 264                              fw_device_set_broadcast_channel);
 265}
 266
 267static const char gap_count_table[] = {
 268        63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
 269};
 270
 271void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
 272{
 273        fw_card_get(card);
 274        if (!schedule_delayed_work(&card->bm_work, delay))
 275                fw_card_put(card);
 276}
 277
 278static void bm_work(struct work_struct *work)
 279{
 280        struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
 281        struct fw_device *root_device, *irm_device;
 282        struct fw_node *root_node;
 283        int root_id, new_root_id, irm_id, bm_id, local_id;
 284        int gap_count, generation, grace, rcode;
 285        bool do_reset = false;
 286        bool root_device_is_running;
 287        bool root_device_is_cmc;
 288        bool irm_is_1394_1995_only;
 289        bool keep_this_irm;
 290
 291        spin_lock_irq(&card->lock);
 292
 293        if (card->local_node == NULL) {
 294                spin_unlock_irq(&card->lock);
 295                goto out_put_card;
 296        }
 297
 298        generation = card->generation;
 299
 300        root_node = card->root_node;
 301        fw_node_get(root_node);
 302        root_device = root_node->data;
 303        root_device_is_running = root_device &&
 304                        atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
 305        root_device_is_cmc = root_device && root_device->cmc;
 306
 307        irm_device = card->irm_node->data;
 308        irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
 309                        (irm_device->config_rom[2] & 0x000000f0) == 0;
 310
 311        /* Canon MV5i works unreliably if it is not root node. */
 312        keep_this_irm = irm_device && irm_device->config_rom &&
 313                        irm_device->config_rom[3] >> 8 == CANON_OUI;
 314
 315        root_id  = root_node->node_id;
 316        irm_id   = card->irm_node->node_id;
 317        local_id = card->local_node->node_id;
 318
 319        grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
 320
 321        if ((is_next_generation(generation, card->bm_generation) &&
 322             !card->bm_abdicate) ||
 323            (card->bm_generation != generation && grace)) {
 324                /*
 325                 * This first step is to figure out who is IRM and
 326                 * then try to become bus manager.  If the IRM is not
 327                 * well defined (e.g. does not have an active link
 328                 * layer or does not responds to our lock request, we
 329                 * will have to do a little vigilante bus management.
 330                 * In that case, we do a goto into the gap count logic
 331                 * so that when we do the reset, we still optimize the
 332                 * gap count.  That could well save a reset in the
 333                 * next generation.
 334                 */
 335
 336                if (!card->irm_node->link_on) {
 337                        new_root_id = local_id;
 338                        fw_notify("%s, making local node (%02x) root.\n",
 339                                  "IRM has link off", new_root_id);
 340                        goto pick_me;
 341                }
 342
 343                if (irm_is_1394_1995_only && !keep_this_irm) {
 344                        new_root_id = local_id;
 345                        fw_notify("%s, making local node (%02x) root.\n",
 346                                  "IRM is not 1394a compliant", new_root_id);
 347                        goto pick_me;
 348                }
 349
 350                card->bm_transaction_data[0] = cpu_to_be32(0x3f);
 351                card->bm_transaction_data[1] = cpu_to_be32(local_id);
 352
 353                spin_unlock_irq(&card->lock);
 354
 355                rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
 356                                irm_id, generation, SCODE_100,
 357                                CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
 358                                card->bm_transaction_data, 8);
 359
 360                if (rcode == RCODE_GENERATION)
 361                        /* Another bus reset, BM work has been rescheduled. */
 362                        goto out;
 363
 364                bm_id = be32_to_cpu(card->bm_transaction_data[0]);
 365
 366                spin_lock_irq(&card->lock);
 367                if (rcode == RCODE_COMPLETE && generation == card->generation)
 368                        card->bm_node_id =
 369                            bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
 370                spin_unlock_irq(&card->lock);
 371
 372                if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
 373                        /* Somebody else is BM.  Only act as IRM. */
 374                        if (local_id == irm_id)
 375                                allocate_broadcast_channel(card, generation);
 376
 377                        goto out;
 378                }
 379
 380                if (rcode == RCODE_SEND_ERROR) {
 381                        /*
 382                         * We have been unable to send the lock request due to
 383                         * some local problem.  Let's try again later and hope
 384                         * that the problem has gone away by then.
 385                         */
 386                        fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
 387                        goto out;
 388                }
 389
 390                spin_lock_irq(&card->lock);
 391
 392                if (rcode != RCODE_COMPLETE && !keep_this_irm) {
 393                        /*
 394                         * The lock request failed, maybe the IRM
 395                         * isn't really IRM capable after all. Let's
 396                         * do a bus reset and pick the local node as
 397                         * root, and thus, IRM.
 398                         */
 399                        new_root_id = local_id;
 400                        fw_notify("%s, making local node (%02x) root.\n",
 401                                  "BM lock failed", new_root_id);
 402                        goto pick_me;
 403                }
 404        } else if (card->bm_generation != generation) {
 405                /*
 406                 * We weren't BM in the last generation, and the last
 407                 * bus reset is less than 125ms ago.  Reschedule this job.
 408                 */
 409                spin_unlock_irq(&card->lock);
 410                fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
 411                goto out;
 412        }
 413
 414        /*
 415         * We're bus manager for this generation, so next step is to
 416         * make sure we have an active cycle master and do gap count
 417         * optimization.
 418         */
 419        card->bm_generation = generation;
 420
 421        if (root_device == NULL) {
 422                /*
 423                 * Either link_on is false, or we failed to read the
 424                 * config rom.  In either case, pick another root.
 425                 */
 426                new_root_id = local_id;
 427        } else if (!root_device_is_running) {
 428                /*
 429                 * If we haven't probed this device yet, bail out now
 430                 * and let's try again once that's done.
 431                 */
 432                spin_unlock_irq(&card->lock);
 433                goto out;
 434        } else if (root_device_is_cmc) {
 435                /*
 436                 * We will send out a force root packet for this
 437                 * node as part of the gap count optimization.
 438                 */
 439                new_root_id = root_id;
 440        } else {
 441                /*
 442                 * Current root has an active link layer and we
 443                 * successfully read the config rom, but it's not
 444                 * cycle master capable.
 445                 */
 446                new_root_id = local_id;
 447        }
 448
 449 pick_me:
 450        /*
 451         * Pick a gap count from 1394a table E-1.  The table doesn't cover
 452         * the typically much larger 1394b beta repeater delays though.
 453         */
 454        if (!card->beta_repeaters_present &&
 455            root_node->max_hops < ARRAY_SIZE(gap_count_table))
 456                gap_count = gap_count_table[root_node->max_hops];
 457        else
 458                gap_count = 63;
 459
 460        /*
 461         * Finally, figure out if we should do a reset or not.  If we have
 462         * done less than 5 resets with the same physical topology and we
 463         * have either a new root or a new gap count setting, let's do it.
 464         */
 465
 466        if (card->bm_retries++ < 5 &&
 467            (card->gap_count != gap_count || new_root_id != root_id))
 468                do_reset = true;
 469
 470        spin_unlock_irq(&card->lock);
 471
 472        if (do_reset) {
 473                fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
 474                          card->index, new_root_id, gap_count);
 475                fw_send_phy_config(card, new_root_id, generation, gap_count);
 476                reset_bus(card, true);
 477                /* Will allocate broadcast channel after the reset. */
 478                goto out;
 479        }
 480
 481        if (root_device_is_cmc) {
 482                /*
 483                 * Make sure that the cycle master sends cycle start packets.
 484                 */
 485                card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
 486                rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
 487                                root_id, generation, SCODE_100,
 488                                CSR_REGISTER_BASE + CSR_STATE_SET,
 489                                card->bm_transaction_data, 4);
 490                if (rcode == RCODE_GENERATION)
 491                        goto out;
 492        }
 493
 494        if (local_id == irm_id)
 495                allocate_broadcast_channel(card, generation);
 496
 497 out:
 498        fw_node_put(root_node);
 499 out_put_card:
 500        fw_card_put(card);
 501}
 502
 503void fw_card_initialize(struct fw_card *card,
 504                        const struct fw_card_driver *driver,
 505                        struct device *device)
 506{
 507        static atomic_t index = ATOMIC_INIT(-1);
 508
 509        card->index = atomic_inc_return(&index);
 510        card->driver = driver;
 511        card->device = device;
 512        card->current_tlabel = 0;
 513        card->tlabel_mask = 0;
 514        card->split_timeout_hi = 0;
 515        card->split_timeout_lo = 800 << 19;
 516        card->split_timeout_cycles = 800;
 517        card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10);
 518        card->color = 0;
 519        card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
 520
 521        kref_init(&card->kref);
 522        init_completion(&card->done);
 523        INIT_LIST_HEAD(&card->transaction_list);
 524        INIT_LIST_HEAD(&card->phy_receiver_list);
 525        spin_lock_init(&card->lock);
 526
 527        card->local_node = NULL;
 528
 529        INIT_DELAYED_WORK(&card->br_work, br_work);
 530        INIT_DELAYED_WORK(&card->bm_work, bm_work);
 531}
 532EXPORT_SYMBOL(fw_card_initialize);
 533
 534int fw_card_add(struct fw_card *card,
 535                u32 max_receive, u32 link_speed, u64 guid)
 536{
 537        int ret;
 538
 539        card->max_receive = max_receive;
 540        card->link_speed = link_speed;
 541        card->guid = guid;
 542
 543        mutex_lock(&card_mutex);
 544
 545        generate_config_rom(card, tmp_config_rom);
 546        ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
 547        if (ret == 0)
 548                list_add_tail(&card->link, &card_list);
 549
 550        mutex_unlock(&card_mutex);
 551
 552        return ret;
 553}
 554EXPORT_SYMBOL(fw_card_add);
 555
 556/*
 557 * The next few functions implement a dummy driver that is used once a card
 558 * driver shuts down an fw_card.  This allows the driver to cleanly unload,
 559 * as all IO to the card will be handled (and failed) by the dummy driver
 560 * instead of calling into the module.  Only functions for iso context
 561 * shutdown still need to be provided by the card driver.
 562 *
 563 * .read/write_csr() should never be called anymore after the dummy driver
 564 * was bound since they are only used within request handler context.
 565 * .set_config_rom() is never called since the card is taken out of card_list
 566 * before switching to the dummy driver.
 567 */
 568
 569static int dummy_read_phy_reg(struct fw_card *card, int address)
 570{
 571        return -ENODEV;
 572}
 573
 574static int dummy_update_phy_reg(struct fw_card *card, int address,
 575                                int clear_bits, int set_bits)
 576{
 577        return -ENODEV;
 578}
 579
 580static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
 581{
 582        packet->callback(packet, card, RCODE_CANCELLED);
 583}
 584
 585static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
 586{
 587        packet->callback(packet, card, RCODE_CANCELLED);
 588}
 589
 590static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
 591{
 592        return -ENOENT;
 593}
 594
 595static int dummy_enable_phys_dma(struct fw_card *card,
 596                                 int node_id, int generation)
 597{
 598        return -ENODEV;
 599}
 600
 601static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
 602                                int type, int channel, size_t header_size)
 603{
 604        return ERR_PTR(-ENODEV);
 605}
 606
 607static int dummy_start_iso(struct fw_iso_context *ctx,
 608                           s32 cycle, u32 sync, u32 tags)
 609{
 610        return -ENODEV;
 611}
 612
 613static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
 614{
 615        return -ENODEV;
 616}
 617
 618static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
 619                           struct fw_iso_buffer *buffer, unsigned long payload)
 620{
 621        return -ENODEV;
 622}
 623
 624static const struct fw_card_driver dummy_driver_template = {
 625        .read_phy_reg           = dummy_read_phy_reg,
 626        .update_phy_reg         = dummy_update_phy_reg,
 627        .send_request           = dummy_send_request,
 628        .send_response          = dummy_send_response,
 629        .cancel_packet          = dummy_cancel_packet,
 630        .enable_phys_dma        = dummy_enable_phys_dma,
 631        .allocate_iso_context   = dummy_allocate_iso_context,
 632        .start_iso              = dummy_start_iso,
 633        .set_iso_channels       = dummy_set_iso_channels,
 634        .queue_iso              = dummy_queue_iso,
 635};
 636
 637void fw_card_release(struct kref *kref)
 638{
 639        struct fw_card *card = container_of(kref, struct fw_card, kref);
 640
 641        complete(&card->done);
 642}
 643
 644void fw_core_remove_card(struct fw_card *card)
 645{
 646        struct fw_card_driver dummy_driver = dummy_driver_template;
 647
 648        card->driver->update_phy_reg(card, 4,
 649                                     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
 650        fw_schedule_bus_reset(card, false, true);
 651
 652        mutex_lock(&card_mutex);
 653        list_del_init(&card->link);
 654        mutex_unlock(&card_mutex);
 655
 656        /* Switch off most of the card driver interface. */
 657        dummy_driver.free_iso_context   = card->driver->free_iso_context;
 658        dummy_driver.stop_iso           = card->driver->stop_iso;
 659        card->driver = &dummy_driver;
 660
 661        fw_destroy_nodes(card);
 662
 663        /* Wait for all users, especially device workqueue jobs, to finish. */
 664        fw_card_put(card);
 665        wait_for_completion(&card->done);
 666
 667        WARN_ON(!list_empty(&card->transaction_list));
 668}
 669EXPORT_SYMBOL(fw_core_remove_card);
 670