linux/drivers/net/wireless/b43legacy/dma.c
<<
>>
Prefs
   1/*
   2
   3  Broadcom B43legacy wireless driver
   4
   5  DMA ringbuffer and descriptor allocation/management
   6
   7  Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
   8
   9  Some code in this file is derived from the b44.c driver
  10  Copyright (C) 2002 David S. Miller
  11  Copyright (C) Pekka Pietikainen
  12
  13  This program is free software; you can redistribute it and/or modify
  14  it under the terms of the GNU General Public License as published by
  15  the Free Software Foundation; either version 2 of the License, or
  16  (at your option) any later version.
  17
  18  This program is distributed in the hope that it will be useful,
  19  but WITHOUT ANY WARRANTY; without even the implied warranty of
  20  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21  GNU General Public License for more details.
  22
  23  You should have received a copy of the GNU General Public License
  24  along with this program; see the file COPYING.  If not, write to
  25  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  26  Boston, MA 02110-1301, USA.
  27
  28*/
  29
  30#include "b43legacy.h"
  31#include "dma.h"
  32#include "main.h"
  33#include "debugfs.h"
  34#include "xmit.h"
  35
  36#include <linux/dma-mapping.h>
  37#include <linux/pci.h>
  38#include <linux/delay.h>
  39#include <linux/skbuff.h>
  40#include <linux/slab.h>
  41#include <net/dst.h>
  42
  43/* 32bit DMA ops. */
  44static
  45struct b43legacy_dmadesc_generic *op32_idx2desc(
  46                                        struct b43legacy_dmaring *ring,
  47                                        int slot,
  48                                        struct b43legacy_dmadesc_meta **meta)
  49{
  50        struct b43legacy_dmadesc32 *desc;
  51
  52        *meta = &(ring->meta[slot]);
  53        desc = ring->descbase;
  54        desc = &(desc[slot]);
  55
  56        return (struct b43legacy_dmadesc_generic *)desc;
  57}
  58
  59static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
  60                                 struct b43legacy_dmadesc_generic *desc,
  61                                 dma_addr_t dmaaddr, u16 bufsize,
  62                                 int start, int end, int irq)
  63{
  64        struct b43legacy_dmadesc32 *descbase = ring->descbase;
  65        int slot;
  66        u32 ctl;
  67        u32 addr;
  68        u32 addrext;
  69
  70        slot = (int)(&(desc->dma32) - descbase);
  71        B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  72
  73        addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  74        addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
  75                   >> SSB_DMA_TRANSLATION_SHIFT;
  76        addr |= ssb_dma_translation(ring->dev->dev);
  77        ctl = (bufsize - ring->frameoffset)
  78              & B43legacy_DMA32_DCTL_BYTECNT;
  79        if (slot == ring->nr_slots - 1)
  80                ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
  81        if (start)
  82                ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
  83        if (end)
  84                ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
  85        if (irq)
  86                ctl |= B43legacy_DMA32_DCTL_IRQ;
  87        ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
  88               & B43legacy_DMA32_DCTL_ADDREXT_MASK;
  89
  90        desc->dma32.control = cpu_to_le32(ctl);
  91        desc->dma32.address = cpu_to_le32(addr);
  92}
  93
  94static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
  95{
  96        b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
  97                            (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
  98}
  99
 100static void op32_tx_suspend(struct b43legacy_dmaring *ring)
 101{
 102        b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 103                            b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
 104                            | B43legacy_DMA32_TXSUSPEND);
 105}
 106
 107static void op32_tx_resume(struct b43legacy_dmaring *ring)
 108{
 109        b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 110                            b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
 111                            & ~B43legacy_DMA32_TXSUSPEND);
 112}
 113
 114static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
 115{
 116        u32 val;
 117
 118        val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
 119        val &= B43legacy_DMA32_RXDPTR;
 120
 121        return (val / sizeof(struct b43legacy_dmadesc32));
 122}
 123
 124static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
 125                                    int slot)
 126{
 127        b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
 128                            (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
 129}
 130
 131static const struct b43legacy_dma_ops dma32_ops = {
 132        .idx2desc               = op32_idx2desc,
 133        .fill_descriptor        = op32_fill_descriptor,
 134        .poke_tx                = op32_poke_tx,
 135        .tx_suspend             = op32_tx_suspend,
 136        .tx_resume              = op32_tx_resume,
 137        .get_current_rxslot     = op32_get_current_rxslot,
 138        .set_current_rxslot     = op32_set_current_rxslot,
 139};
 140
 141/* 64bit DMA ops. */
 142static
 143struct b43legacy_dmadesc_generic *op64_idx2desc(
 144                                        struct b43legacy_dmaring *ring,
 145                                        int slot,
 146                                        struct b43legacy_dmadesc_meta
 147                                        **meta)
 148{
 149        struct b43legacy_dmadesc64 *desc;
 150
 151        *meta = &(ring->meta[slot]);
 152        desc = ring->descbase;
 153        desc = &(desc[slot]);
 154
 155        return (struct b43legacy_dmadesc_generic *)desc;
 156}
 157
 158static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
 159                                 struct b43legacy_dmadesc_generic *desc,
 160                                 dma_addr_t dmaaddr, u16 bufsize,
 161                                 int start, int end, int irq)
 162{
 163        struct b43legacy_dmadesc64 *descbase = ring->descbase;
 164        int slot;
 165        u32 ctl0 = 0;
 166        u32 ctl1 = 0;
 167        u32 addrlo;
 168        u32 addrhi;
 169        u32 addrext;
 170
 171        slot = (int)(&(desc->dma64) - descbase);
 172        B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
 173
 174        addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
 175        addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
 176        addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
 177                  >> SSB_DMA_TRANSLATION_SHIFT;
 178        addrhi |= ssb_dma_translation(ring->dev->dev);
 179        if (slot == ring->nr_slots - 1)
 180                ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
 181        if (start)
 182                ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART;
 183        if (end)
 184                ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND;
 185        if (irq)
 186                ctl0 |= B43legacy_DMA64_DCTL0_IRQ;
 187        ctl1 |= (bufsize - ring->frameoffset)
 188                & B43legacy_DMA64_DCTL1_BYTECNT;
 189        ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT)
 190                & B43legacy_DMA64_DCTL1_ADDREXT_MASK;
 191
 192        desc->dma64.control0 = cpu_to_le32(ctl0);
 193        desc->dma64.control1 = cpu_to_le32(ctl1);
 194        desc->dma64.address_low = cpu_to_le32(addrlo);
 195        desc->dma64.address_high = cpu_to_le32(addrhi);
 196}
 197
 198static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot)
 199{
 200        b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX,
 201                            (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
 202}
 203
 204static void op64_tx_suspend(struct b43legacy_dmaring *ring)
 205{
 206        b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
 207                            b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
 208                            | B43legacy_DMA64_TXSUSPEND);
 209}
 210
 211static void op64_tx_resume(struct b43legacy_dmaring *ring)
 212{
 213        b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
 214                            b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
 215                            & ~B43legacy_DMA64_TXSUSPEND);
 216}
 217
 218static int op64_get_current_rxslot(struct b43legacy_dmaring *ring)
 219{
 220        u32 val;
 221
 222        val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS);
 223        val &= B43legacy_DMA64_RXSTATDPTR;
 224
 225        return (val / sizeof(struct b43legacy_dmadesc64));
 226}
 227
 228static void op64_set_current_rxslot(struct b43legacy_dmaring *ring,
 229                                    int slot)
 230{
 231        b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
 232                            (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
 233}
 234
 235static const struct b43legacy_dma_ops dma64_ops = {
 236        .idx2desc               = op64_idx2desc,
 237        .fill_descriptor        = op64_fill_descriptor,
 238        .poke_tx                = op64_poke_tx,
 239        .tx_suspend             = op64_tx_suspend,
 240        .tx_resume              = op64_tx_resume,
 241        .get_current_rxslot     = op64_get_current_rxslot,
 242        .set_current_rxslot     = op64_set_current_rxslot,
 243};
 244
 245
 246static inline int free_slots(struct b43legacy_dmaring *ring)
 247{
 248        return (ring->nr_slots - ring->used_slots);
 249}
 250
 251static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
 252{
 253        B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
 254        if (slot == ring->nr_slots - 1)
 255                return 0;
 256        return slot + 1;
 257}
 258
 259static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
 260{
 261        B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
 262        if (slot == 0)
 263                return ring->nr_slots - 1;
 264        return slot - 1;
 265}
 266
 267#ifdef CONFIG_B43LEGACY_DEBUG
 268static void update_max_used_slots(struct b43legacy_dmaring *ring,
 269                                  int current_used_slots)
 270{
 271        if (current_used_slots <= ring->max_used_slots)
 272                return;
 273        ring->max_used_slots = current_used_slots;
 274        if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
 275                b43legacydbg(ring->dev->wl,
 276                       "max_used_slots increased to %d on %s ring %d\n",
 277                       ring->max_used_slots,
 278                       ring->tx ? "TX" : "RX",
 279                       ring->index);
 280}
 281#else
 282static inline
 283void update_max_used_slots(struct b43legacy_dmaring *ring,
 284                           int current_used_slots)
 285{ }
 286#endif /* DEBUG */
 287
 288/* Request a slot for usage. */
 289static inline
 290int request_slot(struct b43legacy_dmaring *ring)
 291{
 292        int slot;
 293
 294        B43legacy_WARN_ON(!ring->tx);
 295        B43legacy_WARN_ON(ring->stopped);
 296        B43legacy_WARN_ON(free_slots(ring) == 0);
 297
 298        slot = next_slot(ring, ring->current_slot);
 299        ring->current_slot = slot;
 300        ring->used_slots++;
 301
 302        update_max_used_slots(ring, ring->used_slots);
 303
 304        return slot;
 305}
 306
 307/* Mac80211-queue to b43legacy-ring mapping */
 308static struct b43legacy_dmaring *priority_to_txring(
 309                                                struct b43legacy_wldev *dev,
 310                                                int queue_priority)
 311{
 312        struct b43legacy_dmaring *ring;
 313
 314/*FIXME: For now we always run on TX-ring-1 */
 315return dev->dma.tx_ring1;
 316
 317        /* 0 = highest priority */
 318        switch (queue_priority) {
 319        default:
 320                B43legacy_WARN_ON(1);
 321                /* fallthrough */
 322        case 0:
 323                ring = dev->dma.tx_ring3;
 324                break;
 325        case 1:
 326                ring = dev->dma.tx_ring2;
 327                break;
 328        case 2:
 329                ring = dev->dma.tx_ring1;
 330                break;
 331        case 3:
 332                ring = dev->dma.tx_ring0;
 333                break;
 334        case 4:
 335                ring = dev->dma.tx_ring4;
 336                break;
 337        case 5:
 338                ring = dev->dma.tx_ring5;
 339                break;
 340        }
 341
 342        return ring;
 343}
 344
 345/* Bcm4301-ring to mac80211-queue mapping */
 346static inline int txring_to_priority(struct b43legacy_dmaring *ring)
 347{
 348        static const u8 idx_to_prio[] =
 349                { 3, 2, 1, 0, 4, 5, };
 350
 351/*FIXME: have only one queue, for now */
 352return 0;
 353
 354        return idx_to_prio[ring->index];
 355}
 356
 357
 358static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
 359                                        int controller_idx)
 360{
 361        static const u16 map64[] = {
 362                B43legacy_MMIO_DMA64_BASE0,
 363                B43legacy_MMIO_DMA64_BASE1,
 364                B43legacy_MMIO_DMA64_BASE2,
 365                B43legacy_MMIO_DMA64_BASE3,
 366                B43legacy_MMIO_DMA64_BASE4,
 367                B43legacy_MMIO_DMA64_BASE5,
 368        };
 369        static const u16 map32[] = {
 370                B43legacy_MMIO_DMA32_BASE0,
 371                B43legacy_MMIO_DMA32_BASE1,
 372                B43legacy_MMIO_DMA32_BASE2,
 373                B43legacy_MMIO_DMA32_BASE3,
 374                B43legacy_MMIO_DMA32_BASE4,
 375                B43legacy_MMIO_DMA32_BASE5,
 376        };
 377
 378        if (type == B43legacy_DMA_64BIT) {
 379                B43legacy_WARN_ON(!(controller_idx >= 0 &&
 380                                  controller_idx < ARRAY_SIZE(map64)));
 381                return map64[controller_idx];
 382        }
 383        B43legacy_WARN_ON(!(controller_idx >= 0 &&
 384                          controller_idx < ARRAY_SIZE(map32)));
 385        return map32[controller_idx];
 386}
 387
 388static inline
 389dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
 390                          unsigned char *buf,
 391                          size_t len,
 392                          int tx)
 393{
 394        dma_addr_t dmaaddr;
 395
 396        if (tx)
 397                dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 398                                             buf, len,
 399                                             DMA_TO_DEVICE);
 400        else
 401                dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 402                                             buf, len,
 403                                             DMA_FROM_DEVICE);
 404
 405        return dmaaddr;
 406}
 407
 408static inline
 409void unmap_descbuffer(struct b43legacy_dmaring *ring,
 410                      dma_addr_t addr,
 411                      size_t len,
 412                      int tx)
 413{
 414        if (tx)
 415                dma_unmap_single(ring->dev->dev->dma_dev,
 416                                     addr, len,
 417                                     DMA_TO_DEVICE);
 418        else
 419                dma_unmap_single(ring->dev->dev->dma_dev,
 420                                     addr, len,
 421                                     DMA_FROM_DEVICE);
 422}
 423
 424static inline
 425void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
 426                             dma_addr_t addr,
 427                             size_t len)
 428{
 429        B43legacy_WARN_ON(ring->tx);
 430
 431        dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
 432                                addr, len, DMA_FROM_DEVICE);
 433}
 434
 435static inline
 436void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
 437                                dma_addr_t addr,
 438                                size_t len)
 439{
 440        B43legacy_WARN_ON(ring->tx);
 441
 442        dma_sync_single_for_device(ring->dev->dev->dma_dev,
 443                                   addr, len, DMA_FROM_DEVICE);
 444}
 445
 446static inline
 447void free_descriptor_buffer(struct b43legacy_dmaring *ring,
 448                            struct b43legacy_dmadesc_meta *meta,
 449                            int irq_context)
 450{
 451        if (meta->skb) {
 452                if (irq_context)
 453                        dev_kfree_skb_irq(meta->skb);
 454                else
 455                        dev_kfree_skb(meta->skb);
 456                meta->skb = NULL;
 457        }
 458}
 459
 460static int alloc_ringmemory(struct b43legacy_dmaring *ring)
 461{
 462        /* GFP flags must match the flags in free_ringmemory()! */
 463        ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
 464                                            B43legacy_DMA_RINGMEMSIZE,
 465                                            &(ring->dmabase),
 466                                            GFP_KERNEL);
 467        if (!ring->descbase) {
 468                b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
 469                             " failed\n");
 470                return -ENOMEM;
 471        }
 472        memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
 473
 474        return 0;
 475}
 476
 477static void free_ringmemory(struct b43legacy_dmaring *ring)
 478{
 479        dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
 480                          ring->descbase, ring->dmabase);
 481}
 482
 483/* Reset the RX DMA channel */
 484static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
 485                                            u16 mmio_base,
 486                                            enum b43legacy_dmatype type)
 487{
 488        int i;
 489        u32 value;
 490        u16 offset;
 491
 492        might_sleep();
 493
 494        offset = (type == B43legacy_DMA_64BIT) ?
 495                 B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
 496        b43legacy_write32(dev, mmio_base + offset, 0);
 497        for (i = 0; i < 10; i++) {
 498                offset = (type == B43legacy_DMA_64BIT) ?
 499                         B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
 500                value = b43legacy_read32(dev, mmio_base + offset);
 501                if (type == B43legacy_DMA_64BIT) {
 502                        value &= B43legacy_DMA64_RXSTAT;
 503                        if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
 504                                i = -1;
 505                                break;
 506                        }
 507                } else {
 508                        value &= B43legacy_DMA32_RXSTATE;
 509                        if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
 510                                i = -1;
 511                                break;
 512                        }
 513                }
 514                msleep(1);
 515        }
 516        if (i != -1) {
 517                b43legacyerr(dev->wl, "DMA RX reset timed out\n");
 518                return -ENODEV;
 519        }
 520
 521        return 0;
 522}
 523
 524/* Reset the RX DMA channel */
 525static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
 526                                            u16 mmio_base,
 527                                            enum b43legacy_dmatype type)
 528{
 529        int i;
 530        u32 value;
 531        u16 offset;
 532
 533        might_sleep();
 534
 535        for (i = 0; i < 10; i++) {
 536                offset = (type == B43legacy_DMA_64BIT) ?
 537                         B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
 538                value = b43legacy_read32(dev, mmio_base + offset);
 539                if (type == B43legacy_DMA_64BIT) {
 540                        value &= B43legacy_DMA64_TXSTAT;
 541                        if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
 542                            value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
 543                            value == B43legacy_DMA64_TXSTAT_STOPPED)
 544                                break;
 545                } else {
 546                        value &= B43legacy_DMA32_TXSTATE;
 547                        if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
 548                            value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
 549                            value == B43legacy_DMA32_TXSTAT_STOPPED)
 550                                break;
 551                }
 552                msleep(1);
 553        }
 554        offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL :
 555                                                 B43legacy_DMA32_TXCTL;
 556        b43legacy_write32(dev, mmio_base + offset, 0);
 557        for (i = 0; i < 10; i++) {
 558                offset = (type == B43legacy_DMA_64BIT) ?
 559                         B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
 560                value = b43legacy_read32(dev, mmio_base + offset);
 561                if (type == B43legacy_DMA_64BIT) {
 562                        value &= B43legacy_DMA64_TXSTAT;
 563                        if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
 564                                i = -1;
 565                                break;
 566                        }
 567                } else {
 568                        value &= B43legacy_DMA32_TXSTATE;
 569                        if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
 570                                i = -1;
 571                                break;
 572                        }
 573                }
 574                msleep(1);
 575        }
 576        if (i != -1) {
 577                b43legacyerr(dev->wl, "DMA TX reset timed out\n");
 578                return -ENODEV;
 579        }
 580        /* ensure the reset is completed. */
 581        msleep(1);
 582
 583        return 0;
 584}
 585
 586/* Check if a DMA mapping address is invalid. */
 587static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
 588                                         dma_addr_t addr,
 589                                         size_t buffersize,
 590                                         bool dma_to_device)
 591{
 592        if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
 593                return 1;
 594
 595        switch (ring->type) {
 596        case B43legacy_DMA_30BIT:
 597                if ((u64)addr + buffersize > (1ULL << 30))
 598                        goto address_error;
 599                break;
 600        case B43legacy_DMA_32BIT:
 601                if ((u64)addr + buffersize > (1ULL << 32))
 602                        goto address_error;
 603                break;
 604        case B43legacy_DMA_64BIT:
 605                /* Currently we can't have addresses beyond 64 bits in the kernel. */
 606                break;
 607        }
 608
 609        /* The address is OK. */
 610        return 0;
 611
 612address_error:
 613        /* We can't support this address. Unmap it again. */
 614        unmap_descbuffer(ring, addr, buffersize, dma_to_device);
 615
 616        return 1;
 617}
 618
 619static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
 620                               struct b43legacy_dmadesc_generic *desc,
 621                               struct b43legacy_dmadesc_meta *meta,
 622                               gfp_t gfp_flags)
 623{
 624        struct b43legacy_rxhdr_fw3 *rxhdr;
 625        struct b43legacy_hwtxstatus *txstat;
 626        dma_addr_t dmaaddr;
 627        struct sk_buff *skb;
 628
 629        B43legacy_WARN_ON(ring->tx);
 630
 631        skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
 632        if (unlikely(!skb))
 633                return -ENOMEM;
 634        dmaaddr = map_descbuffer(ring, skb->data,
 635                                 ring->rx_buffersize, 0);
 636        if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
 637                /* ugh. try to realloc in zone_dma */
 638                gfp_flags |= GFP_DMA;
 639
 640                dev_kfree_skb_any(skb);
 641
 642                skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
 643                if (unlikely(!skb))
 644                        return -ENOMEM;
 645                dmaaddr = map_descbuffer(ring, skb->data,
 646                                         ring->rx_buffersize, 0);
 647        }
 648
 649        if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
 650                dev_kfree_skb_any(skb);
 651                return -EIO;
 652        }
 653
 654        meta->skb = skb;
 655        meta->dmaaddr = dmaaddr;
 656        ring->ops->fill_descriptor(ring, desc, dmaaddr,
 657                                   ring->rx_buffersize, 0, 0, 0);
 658
 659        rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
 660        rxhdr->frame_len = 0;
 661        txstat = (struct b43legacy_hwtxstatus *)(skb->data);
 662        txstat->cookie = 0;
 663
 664        return 0;
 665}
 666
 667/* Allocate the initial descbuffers.
 668 * This is used for an RX ring only.
 669 */
 670static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
 671{
 672        int i;
 673        int err = -ENOMEM;
 674        struct b43legacy_dmadesc_generic *desc;
 675        struct b43legacy_dmadesc_meta *meta;
 676
 677        for (i = 0; i < ring->nr_slots; i++) {
 678                desc = ring->ops->idx2desc(ring, i, &meta);
 679
 680                err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
 681                if (err) {
 682                        b43legacyerr(ring->dev->wl,
 683                               "Failed to allocate initial descbuffers\n");
 684                        goto err_unwind;
 685                }
 686        }
 687        mb(); /* all descbuffer setup before next line */
 688        ring->used_slots = ring->nr_slots;
 689        err = 0;
 690out:
 691        return err;
 692
 693err_unwind:
 694        for (i--; i >= 0; i--) {
 695                desc = ring->ops->idx2desc(ring, i, &meta);
 696
 697                unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
 698                dev_kfree_skb(meta->skb);
 699        }
 700        goto out;
 701}
 702
 703/* Do initial setup of the DMA controller.
 704 * Reset the controller, write the ring busaddress
 705 * and switch the "enable" bit on.
 706 */
 707static int dmacontroller_setup(struct b43legacy_dmaring *ring)
 708{
 709        int err = 0;
 710        u32 value;
 711        u32 addrext;
 712        u32 trans = ssb_dma_translation(ring->dev->dev);
 713
 714        if (ring->tx) {
 715                if (ring->type == B43legacy_DMA_64BIT) {
 716                        u64 ringbase = (u64)(ring->dmabase);
 717
 718                        addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
 719                                  >> SSB_DMA_TRANSLATION_SHIFT;
 720                        value = B43legacy_DMA64_TXENABLE;
 721                        value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT)
 722                                & B43legacy_DMA64_TXADDREXT_MASK;
 723                        b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
 724                                            value);
 725                        b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO,
 726                                            (ringbase & 0xFFFFFFFF));
 727                        b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI,
 728                                            ((ringbase >> 32)
 729                                            & ~SSB_DMA_TRANSLATION_MASK)
 730                                            | trans);
 731                } else {
 732                        u32 ringbase = (u32)(ring->dmabase);
 733
 734                        addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
 735                                  >> SSB_DMA_TRANSLATION_SHIFT;
 736                        value = B43legacy_DMA32_TXENABLE;
 737                        value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
 738                                & B43legacy_DMA32_TXADDREXT_MASK;
 739                        b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
 740                                            value);
 741                        b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
 742                                            (ringbase &
 743                                            ~SSB_DMA_TRANSLATION_MASK)
 744                                            | trans);
 745                }
 746        } else {
 747                err = alloc_initial_descbuffers(ring);
 748                if (err)
 749                        goto out;
 750                if (ring->type == B43legacy_DMA_64BIT) {
 751                        u64 ringbase = (u64)(ring->dmabase);
 752
 753                        addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
 754                                  >> SSB_DMA_TRANSLATION_SHIFT;
 755                        value = (ring->frameoffset <<
 756                                 B43legacy_DMA64_RXFROFF_SHIFT);
 757                        value |= B43legacy_DMA64_RXENABLE;
 758                        value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT)
 759                                & B43legacy_DMA64_RXADDREXT_MASK;
 760                        b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL,
 761                                            value);
 762                        b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO,
 763                                            (ringbase & 0xFFFFFFFF));
 764                        b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI,
 765                                            ((ringbase >> 32) &
 766                                            ~SSB_DMA_TRANSLATION_MASK) |
 767                                            trans);
 768                        b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
 769                                            200);
 770                } else {
 771                        u32 ringbase = (u32)(ring->dmabase);
 772
 773                        addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
 774                                  >> SSB_DMA_TRANSLATION_SHIFT;
 775                        value = (ring->frameoffset <<
 776                                 B43legacy_DMA32_RXFROFF_SHIFT);
 777                        value |= B43legacy_DMA32_RXENABLE;
 778                        value |= (addrext <<
 779                                 B43legacy_DMA32_RXADDREXT_SHIFT)
 780                                 & B43legacy_DMA32_RXADDREXT_MASK;
 781                        b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL,
 782                                            value);
 783                        b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
 784                                            (ringbase &
 785                                            ~SSB_DMA_TRANSLATION_MASK)
 786                                            | trans);
 787                        b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
 788                                            200);
 789                }
 790        }
 791
 792out:
 793        return err;
 794}
 795
 796/* Shutdown the DMA controller. */
 797static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
 798{
 799        if (ring->tx) {
 800                b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
 801                                                 ring->type);
 802                if (ring->type == B43legacy_DMA_64BIT) {
 803                        b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
 804                        b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
 805                } else
 806                        b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
 807        } else {
 808                b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
 809                                                 ring->type);
 810                if (ring->type == B43legacy_DMA_64BIT) {
 811                        b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
 812                        b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
 813                } else
 814                        b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
 815        }
 816}
 817
 818static void free_all_descbuffers(struct b43legacy_dmaring *ring)
 819{
 820        struct b43legacy_dmadesc_generic *desc;
 821        struct b43legacy_dmadesc_meta *meta;
 822        int i;
 823
 824        if (!ring->used_slots)
 825                return;
 826        for (i = 0; i < ring->nr_slots; i++) {
 827                desc = ring->ops->idx2desc(ring, i, &meta);
 828
 829                if (!meta->skb) {
 830                        B43legacy_WARN_ON(!ring->tx);
 831                        continue;
 832                }
 833                if (ring->tx)
 834                        unmap_descbuffer(ring, meta->dmaaddr,
 835                                         meta->skb->len, 1);
 836                else
 837                        unmap_descbuffer(ring, meta->dmaaddr,
 838                                         ring->rx_buffersize, 0);
 839                free_descriptor_buffer(ring, meta, 0);
 840        }
 841}
 842
 843static u64 supported_dma_mask(struct b43legacy_wldev *dev)
 844{
 845        u32 tmp;
 846        u16 mmio_base;
 847
 848        tmp = b43legacy_read32(dev, SSB_TMSHIGH);
 849        if (tmp & SSB_TMSHIGH_DMA64)
 850                return DMA_BIT_MASK(64);
 851        mmio_base = b43legacy_dmacontroller_base(0, 0);
 852        b43legacy_write32(dev,
 853                        mmio_base + B43legacy_DMA32_TXCTL,
 854                        B43legacy_DMA32_TXADDREXT_MASK);
 855        tmp = b43legacy_read32(dev, mmio_base +
 856                               B43legacy_DMA32_TXCTL);
 857        if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
 858                return DMA_BIT_MASK(32);
 859
 860        return DMA_BIT_MASK(30);
 861}
 862
 863static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
 864{
 865        if (dmamask == DMA_BIT_MASK(30))
 866                return B43legacy_DMA_30BIT;
 867        if (dmamask == DMA_BIT_MASK(32))
 868                return B43legacy_DMA_32BIT;
 869        if (dmamask == DMA_BIT_MASK(64))
 870                return B43legacy_DMA_64BIT;
 871        B43legacy_WARN_ON(1);
 872        return B43legacy_DMA_30BIT;
 873}
 874
 875/* Main initialization function. */
 876static
 877struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
 878                                                  int controller_index,
 879                                                  int for_tx,
 880                                                  enum b43legacy_dmatype type)
 881{
 882        struct b43legacy_dmaring *ring;
 883        int err;
 884        int nr_slots;
 885        dma_addr_t dma_test;
 886
 887        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 888        if (!ring)
 889                goto out;
 890        ring->type = type;
 891        ring->dev = dev;
 892
 893        nr_slots = B43legacy_RXRING_SLOTS;
 894        if (for_tx)
 895                nr_slots = B43legacy_TXRING_SLOTS;
 896
 897        ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
 898                             GFP_KERNEL);
 899        if (!ring->meta)
 900                goto err_kfree_ring;
 901        if (for_tx) {
 902                ring->txhdr_cache = kcalloc(nr_slots,
 903                                        sizeof(struct b43legacy_txhdr_fw3),
 904                                        GFP_KERNEL);
 905                if (!ring->txhdr_cache)
 906                        goto err_kfree_meta;
 907
 908                /* test for ability to dma to txhdr_cache */
 909                dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
 910                                              sizeof(struct b43legacy_txhdr_fw3),
 911                                              DMA_TO_DEVICE);
 912
 913                if (b43legacy_dma_mapping_error(ring, dma_test,
 914                                        sizeof(struct b43legacy_txhdr_fw3), 1)) {
 915                        /* ugh realloc */
 916                        kfree(ring->txhdr_cache);
 917                        ring->txhdr_cache = kcalloc(nr_slots,
 918                                        sizeof(struct b43legacy_txhdr_fw3),
 919                                        GFP_KERNEL | GFP_DMA);
 920                        if (!ring->txhdr_cache)
 921                                goto err_kfree_meta;
 922
 923                        dma_test = dma_map_single(dev->dev->dma_dev,
 924                                        ring->txhdr_cache,
 925                                        sizeof(struct b43legacy_txhdr_fw3),
 926                                        DMA_TO_DEVICE);
 927
 928                        if (b43legacy_dma_mapping_error(ring, dma_test,
 929                                        sizeof(struct b43legacy_txhdr_fw3), 1))
 930                                goto err_kfree_txhdr_cache;
 931                }
 932
 933                dma_unmap_single(dev->dev->dma_dev, dma_test,
 934                                 sizeof(struct b43legacy_txhdr_fw3),
 935                                 DMA_TO_DEVICE);
 936        }
 937
 938        ring->nr_slots = nr_slots;
 939        ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
 940        ring->index = controller_index;
 941        if (type == B43legacy_DMA_64BIT)
 942                ring->ops = &dma64_ops;
 943        else
 944                ring->ops = &dma32_ops;
 945        if (for_tx) {
 946                ring->tx = 1;
 947                ring->current_slot = -1;
 948        } else {
 949                if (ring->index == 0) {
 950                        ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
 951                        ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
 952                } else if (ring->index == 3) {
 953                        ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
 954                        ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
 955                } else
 956                        B43legacy_WARN_ON(1);
 957        }
 958        spin_lock_init(&ring->lock);
 959#ifdef CONFIG_B43LEGACY_DEBUG
 960        ring->last_injected_overflow = jiffies;
 961#endif
 962
 963        err = alloc_ringmemory(ring);
 964        if (err)
 965                goto err_kfree_txhdr_cache;
 966        err = dmacontroller_setup(ring);
 967        if (err)
 968                goto err_free_ringmemory;
 969
 970out:
 971        return ring;
 972
 973err_free_ringmemory:
 974        free_ringmemory(ring);
 975err_kfree_txhdr_cache:
 976        kfree(ring->txhdr_cache);
 977err_kfree_meta:
 978        kfree(ring->meta);
 979err_kfree_ring:
 980        kfree(ring);
 981        ring = NULL;
 982        goto out;
 983}
 984
 985/* Main cleanup function. */
 986static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
 987{
 988        if (!ring)
 989                return;
 990
 991        b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
 992                     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
 993                     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
 994                     ring->nr_slots);
 995        /* Device IRQs are disabled prior entering this function,
 996         * so no need to take care of concurrency with rx handler stuff.
 997         */
 998        dmacontroller_cleanup(ring);
 999        free_all_descbuffers(ring);
1000        free_ringmemory(ring);
1001
1002        kfree(ring->txhdr_cache);
1003        kfree(ring->meta);
1004        kfree(ring);
1005}
1006
1007void b43legacy_dma_free(struct b43legacy_wldev *dev)
1008{
1009        struct b43legacy_dma *dma;
1010
1011        if (b43legacy_using_pio(dev))
1012                return;
1013        dma = &dev->dma;
1014
1015        b43legacy_destroy_dmaring(dma->rx_ring3);
1016        dma->rx_ring3 = NULL;
1017        b43legacy_destroy_dmaring(dma->rx_ring0);
1018        dma->rx_ring0 = NULL;
1019
1020        b43legacy_destroy_dmaring(dma->tx_ring5);
1021        dma->tx_ring5 = NULL;
1022        b43legacy_destroy_dmaring(dma->tx_ring4);
1023        dma->tx_ring4 = NULL;
1024        b43legacy_destroy_dmaring(dma->tx_ring3);
1025        dma->tx_ring3 = NULL;
1026        b43legacy_destroy_dmaring(dma->tx_ring2);
1027        dma->tx_ring2 = NULL;
1028        b43legacy_destroy_dmaring(dma->tx_ring1);
1029        dma->tx_ring1 = NULL;
1030        b43legacy_destroy_dmaring(dma->tx_ring0);
1031        dma->tx_ring0 = NULL;
1032}
1033
1034static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
1035{
1036        u64 orig_mask = mask;
1037        bool fallback = 0;
1038        int err;
1039
1040        /* Try to set the DMA mask. If it fails, try falling back to a
1041         * lower mask, as we can always also support a lower one. */
1042        while (1) {
1043                err = dma_set_mask(dev->dev->dma_dev, mask);
1044                if (!err) {
1045                        err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1046                        if (!err)
1047                                break;
1048                }
1049                if (mask == DMA_BIT_MASK(64)) {
1050                        mask = DMA_BIT_MASK(32);
1051                        fallback = 1;
1052                        continue;
1053                }
1054                if (mask == DMA_BIT_MASK(32)) {
1055                        mask = DMA_BIT_MASK(30);
1056                        fallback = 1;
1057                        continue;
1058                }
1059                b43legacyerr(dev->wl, "The machine/kernel does not support "
1060                       "the required %u-bit DMA mask\n",
1061                       (unsigned int)dma_mask_to_engine_type(orig_mask));
1062                return -EOPNOTSUPP;
1063        }
1064        if (fallback) {
1065                b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
1066                        "bit\n",
1067                        (unsigned int)dma_mask_to_engine_type(orig_mask),
1068                        (unsigned int)dma_mask_to_engine_type(mask));
1069        }
1070
1071        return 0;
1072}
1073
1074int b43legacy_dma_init(struct b43legacy_wldev *dev)
1075{
1076        struct b43legacy_dma *dma = &dev->dma;
1077        struct b43legacy_dmaring *ring;
1078        int err;
1079        u64 dmamask;
1080        enum b43legacy_dmatype type;
1081
1082        dmamask = supported_dma_mask(dev);
1083        type = dma_mask_to_engine_type(dmamask);
1084        err = b43legacy_dma_set_mask(dev, dmamask);
1085        if (err) {
1086#ifdef CONFIG_B43LEGACY_PIO
1087                b43legacywarn(dev->wl, "DMA for this device not supported. "
1088                        "Falling back to PIO\n");
1089                dev->__using_pio = 1;
1090                return -EAGAIN;
1091#else
1092                b43legacyerr(dev->wl, "DMA for this device not supported and "
1093                       "no PIO support compiled in\n");
1094                return -EOPNOTSUPP;
1095#endif
1096        }
1097
1098        err = -ENOMEM;
1099        /* setup TX DMA channels. */
1100        ring = b43legacy_setup_dmaring(dev, 0, 1, type);
1101        if (!ring)
1102                goto out;
1103        dma->tx_ring0 = ring;
1104
1105        ring = b43legacy_setup_dmaring(dev, 1, 1, type);
1106        if (!ring)
1107                goto err_destroy_tx0;
1108        dma->tx_ring1 = ring;
1109
1110        ring = b43legacy_setup_dmaring(dev, 2, 1, type);
1111        if (!ring)
1112                goto err_destroy_tx1;
1113        dma->tx_ring2 = ring;
1114
1115        ring = b43legacy_setup_dmaring(dev, 3, 1, type);
1116        if (!ring)
1117                goto err_destroy_tx2;
1118        dma->tx_ring3 = ring;
1119
1120        ring = b43legacy_setup_dmaring(dev, 4, 1, type);
1121        if (!ring)
1122                goto err_destroy_tx3;
1123        dma->tx_ring4 = ring;
1124
1125        ring = b43legacy_setup_dmaring(dev, 5, 1, type);
1126        if (!ring)
1127                goto err_destroy_tx4;
1128        dma->tx_ring5 = ring;
1129
1130        /* setup RX DMA channels. */
1131        ring = b43legacy_setup_dmaring(dev, 0, 0, type);
1132        if (!ring)
1133                goto err_destroy_tx5;
1134        dma->rx_ring0 = ring;
1135
1136        if (dev->dev->id.revision < 5) {
1137                ring = b43legacy_setup_dmaring(dev, 3, 0, type);
1138                if (!ring)
1139                        goto err_destroy_rx0;
1140                dma->rx_ring3 = ring;
1141        }
1142
1143        b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
1144        err = 0;
1145out:
1146        return err;
1147
1148err_destroy_rx0:
1149        b43legacy_destroy_dmaring(dma->rx_ring0);
1150        dma->rx_ring0 = NULL;
1151err_destroy_tx5:
1152        b43legacy_destroy_dmaring(dma->tx_ring5);
1153        dma->tx_ring5 = NULL;
1154err_destroy_tx4:
1155        b43legacy_destroy_dmaring(dma->tx_ring4);
1156        dma->tx_ring4 = NULL;
1157err_destroy_tx3:
1158        b43legacy_destroy_dmaring(dma->tx_ring3);
1159        dma->tx_ring3 = NULL;
1160err_destroy_tx2:
1161        b43legacy_destroy_dmaring(dma->tx_ring2);
1162        dma->tx_ring2 = NULL;
1163err_destroy_tx1:
1164        b43legacy_destroy_dmaring(dma->tx_ring1);
1165        dma->tx_ring1 = NULL;
1166err_destroy_tx0:
1167        b43legacy_destroy_dmaring(dma->tx_ring0);
1168        dma->tx_ring0 = NULL;
1169        goto out;
1170}
1171
1172/* Generate a cookie for the TX header. */
1173static u16 generate_cookie(struct b43legacy_dmaring *ring,
1174                           int slot)
1175{
1176        u16 cookie = 0x1000;
1177
1178        /* Use the upper 4 bits of the cookie as
1179         * DMA controller ID and store the slot number
1180         * in the lower 12 bits.
1181         * Note that the cookie must never be 0, as this
1182         * is a special value used in RX path.
1183         */
1184        switch (ring->index) {
1185        case 0:
1186                cookie = 0xA000;
1187                break;
1188        case 1:
1189                cookie = 0xB000;
1190                break;
1191        case 2:
1192                cookie = 0xC000;
1193                break;
1194        case 3:
1195                cookie = 0xD000;
1196                break;
1197        case 4:
1198                cookie = 0xE000;
1199                break;
1200        case 5:
1201                cookie = 0xF000;
1202                break;
1203        }
1204        B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
1205        cookie |= (u16)slot;
1206
1207        return cookie;
1208}
1209
1210/* Inspect a cookie and find out to which controller/slot it belongs. */
1211static
1212struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1213                                      u16 cookie, int *slot)
1214{
1215        struct b43legacy_dma *dma = &dev->dma;
1216        struct b43legacy_dmaring *ring = NULL;
1217
1218        switch (cookie & 0xF000) {
1219        case 0xA000:
1220                ring = dma->tx_ring0;
1221                break;
1222        case 0xB000:
1223                ring = dma->tx_ring1;
1224                break;
1225        case 0xC000:
1226                ring = dma->tx_ring2;
1227                break;
1228        case 0xD000:
1229                ring = dma->tx_ring3;
1230                break;
1231        case 0xE000:
1232                ring = dma->tx_ring4;
1233                break;
1234        case 0xF000:
1235                ring = dma->tx_ring5;
1236                break;
1237        default:
1238                B43legacy_WARN_ON(1);
1239        }
1240        *slot = (cookie & 0x0FFF);
1241        B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1242
1243        return ring;
1244}
1245
1246static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1247                            struct sk_buff **in_skb)
1248{
1249        struct sk_buff *skb = *in_skb;
1250        const struct b43legacy_dma_ops *ops = ring->ops;
1251        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1252        u8 *header;
1253        int slot, old_top_slot, old_used_slots;
1254        int err;
1255        struct b43legacy_dmadesc_generic *desc;
1256        struct b43legacy_dmadesc_meta *meta;
1257        struct b43legacy_dmadesc_meta *meta_hdr;
1258        struct sk_buff *bounce_skb;
1259
1260#define SLOTS_PER_PACKET  2
1261        B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1262
1263        old_top_slot = ring->current_slot;
1264        old_used_slots = ring->used_slots;
1265
1266        /* Get a slot for the header. */
1267        slot = request_slot(ring);
1268        desc = ops->idx2desc(ring, slot, &meta_hdr);
1269        memset(meta_hdr, 0, sizeof(*meta_hdr));
1270
1271        header = &(ring->txhdr_cache[slot * sizeof(
1272                               struct b43legacy_txhdr_fw3)]);
1273        err = b43legacy_generate_txhdr(ring->dev, header,
1274                                 skb->data, skb->len, info,
1275                                 generate_cookie(ring, slot));
1276        if (unlikely(err)) {
1277                ring->current_slot = old_top_slot;
1278                ring->used_slots = old_used_slots;
1279                return err;
1280        }
1281
1282        meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1283                                           sizeof(struct b43legacy_txhdr_fw3), 1);
1284        if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1285                                        sizeof(struct b43legacy_txhdr_fw3), 1)) {
1286                ring->current_slot = old_top_slot;
1287                ring->used_slots = old_used_slots;
1288                return -EIO;
1289        }
1290        ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1291                             sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1292
1293        /* Get a slot for the payload. */
1294        slot = request_slot(ring);
1295        desc = ops->idx2desc(ring, slot, &meta);
1296        memset(meta, 0, sizeof(*meta));
1297
1298        meta->skb = skb;
1299        meta->is_last_fragment = 1;
1300
1301        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1302        /* create a bounce buffer in zone_dma on mapping failure. */
1303        if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1304                bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1305                if (!bounce_skb) {
1306                        ring->current_slot = old_top_slot;
1307                        ring->used_slots = old_used_slots;
1308                        err = -ENOMEM;
1309                        goto out_unmap_hdr;
1310                }
1311
1312                memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1313                memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1314                bounce_skb->dev = skb->dev;
1315                skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1316                info = IEEE80211_SKB_CB(bounce_skb);
1317
1318                dev_kfree_skb_any(skb);
1319                skb = bounce_skb;
1320                *in_skb = bounce_skb;
1321                meta->skb = skb;
1322                meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1323                if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1324                        ring->current_slot = old_top_slot;
1325                        ring->used_slots = old_used_slots;
1326                        err = -EIO;
1327                        goto out_free_bounce;
1328                }
1329        }
1330
1331        ops->fill_descriptor(ring, desc, meta->dmaaddr,
1332                             skb->len, 0, 1, 1);
1333
1334        wmb();  /* previous stuff MUST be done */
1335        /* Now transfer the whole frame. */
1336        ops->poke_tx(ring, next_slot(ring, slot));
1337        return 0;
1338
1339out_free_bounce:
1340        dev_kfree_skb_any(skb);
1341out_unmap_hdr:
1342        unmap_descbuffer(ring, meta_hdr->dmaaddr,
1343                         sizeof(struct b43legacy_txhdr_fw3), 1);
1344        return err;
1345}
1346
1347static inline
1348int should_inject_overflow(struct b43legacy_dmaring *ring)
1349{
1350#ifdef CONFIG_B43LEGACY_DEBUG
1351        if (unlikely(b43legacy_debug(ring->dev,
1352                                     B43legacy_DBG_DMAOVERFLOW))) {
1353                /* Check if we should inject another ringbuffer overflow
1354                 * to test handling of this situation in the stack. */
1355                unsigned long next_overflow;
1356
1357                next_overflow = ring->last_injected_overflow + HZ;
1358                if (time_after(jiffies, next_overflow)) {
1359                        ring->last_injected_overflow = jiffies;
1360                        b43legacydbg(ring->dev->wl,
1361                               "Injecting TX ring overflow on "
1362                               "DMA controller %d\n", ring->index);
1363                        return 1;
1364                }
1365        }
1366#endif /* CONFIG_B43LEGACY_DEBUG */
1367        return 0;
1368}
1369
1370int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1371                     struct sk_buff *skb)
1372{
1373        struct b43legacy_dmaring *ring;
1374        struct ieee80211_hdr *hdr;
1375        int err = 0;
1376        unsigned long flags;
1377        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1378
1379        ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1380        spin_lock_irqsave(&ring->lock, flags);
1381        B43legacy_WARN_ON(!ring->tx);
1382
1383        if (unlikely(ring->stopped)) {
1384                /* We get here only because of a bug in mac80211.
1385                 * Because of a race, one packet may be queued after
1386                 * the queue is stopped, thus we got called when we shouldn't.
1387                 * For now, just refuse the transmit. */
1388                if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1389                        b43legacyerr(dev->wl, "Packet after queue stopped\n");
1390                err = -ENOSPC;
1391                goto out_unlock;
1392        }
1393
1394        if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
1395                /* If we get here, we have a real error with the queue
1396                 * full, but queues not stopped. */
1397                b43legacyerr(dev->wl, "DMA queue overflow\n");
1398                err = -ENOSPC;
1399                goto out_unlock;
1400        }
1401
1402        /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1403         * into the skb data or cb now. */
1404        hdr = NULL;
1405        info = NULL;
1406        err = dma_tx_fragment(ring, &skb);
1407        if (unlikely(err == -ENOKEY)) {
1408                /* Drop this packet, as we don't have the encryption key
1409                 * anymore and must not transmit it unencrypted. */
1410                dev_kfree_skb_any(skb);
1411                err = 0;
1412                goto out_unlock;
1413        }
1414        if (unlikely(err)) {
1415                b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1416                goto out_unlock;
1417        }
1418        if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1419            should_inject_overflow(ring)) {
1420                /* This TX ring is full. */
1421                ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1422                ring->stopped = 1;
1423                if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1424                        b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1425                               ring->index);
1426        }
1427out_unlock:
1428        spin_unlock_irqrestore(&ring->lock, flags);
1429
1430        return err;
1431}
1432
1433void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1434                                 const struct b43legacy_txstatus *status)
1435{
1436        const struct b43legacy_dma_ops *ops;
1437        struct b43legacy_dmaring *ring;
1438        struct b43legacy_dmadesc_generic *desc;
1439        struct b43legacy_dmadesc_meta *meta;
1440        int retry_limit;
1441        int slot;
1442
1443        ring = parse_cookie(dev, status->cookie, &slot);
1444        if (unlikely(!ring))
1445                return;
1446        B43legacy_WARN_ON(!irqs_disabled());
1447        spin_lock(&ring->lock);
1448
1449        B43legacy_WARN_ON(!ring->tx);
1450        ops = ring->ops;
1451        while (1) {
1452                B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1453                desc = ops->idx2desc(ring, slot, &meta);
1454
1455                if (meta->skb)
1456                        unmap_descbuffer(ring, meta->dmaaddr,
1457                                         meta->skb->len, 1);
1458                else
1459                        unmap_descbuffer(ring, meta->dmaaddr,
1460                                         sizeof(struct b43legacy_txhdr_fw3),
1461                                         1);
1462
1463                if (meta->is_last_fragment) {
1464                        struct ieee80211_tx_info *info;
1465                        BUG_ON(!meta->skb);
1466                        info = IEEE80211_SKB_CB(meta->skb);
1467
1468                        /* preserve the confiured retry limit before clearing the status
1469                         * The xmit function has overwritten the rc's value with the actual
1470                         * retry limit done by the hardware */
1471                        retry_limit = info->status.rates[0].count;
1472                        ieee80211_tx_info_clear_status(info);
1473
1474                        if (status->acked)
1475                                info->flags |= IEEE80211_TX_STAT_ACK;
1476
1477                        if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1478                                /*
1479                                 * If the short retries (RTS, not data frame) have exceeded
1480                                 * the limit, the hw will not have tried the selected rate,
1481                                 * but will have used the fallback rate instead.
1482                                 * Don't let the rate control count attempts for the selected
1483                                 * rate in this case, otherwise the statistics will be off.
1484                                 */
1485                                info->status.rates[0].count = 0;
1486                                info->status.rates[1].count = status->frame_count;
1487                        } else {
1488                                if (status->frame_count > retry_limit) {
1489                                        info->status.rates[0].count = retry_limit;
1490                                        info->status.rates[1].count = status->frame_count -
1491                                                        retry_limit;
1492
1493                                } else {
1494                                        info->status.rates[0].count = status->frame_count;
1495                                        info->status.rates[1].idx = -1;
1496                                }
1497                        }
1498
1499                        /* Call back to inform the ieee80211 subsystem about the
1500                         * status of the transmission.
1501                         * Some fields of txstat are already filled in dma_tx().
1502                         */
1503                        ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1504                        /* skb is freed by ieee80211_tx_status_irqsafe() */
1505                        meta->skb = NULL;
1506                } else {
1507                        /* No need to call free_descriptor_buffer here, as
1508                         * this is only the txhdr, which is not allocated.
1509                         */
1510                        B43legacy_WARN_ON(meta->skb != NULL);
1511                }
1512
1513                /* Everything unmapped and free'd. So it's not used anymore. */
1514                ring->used_slots--;
1515
1516                if (meta->is_last_fragment)
1517                        break;
1518                slot = next_slot(ring, slot);
1519        }
1520        dev->stats.last_tx = jiffies;
1521        if (ring->stopped) {
1522                B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1523                ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1524                ring->stopped = 0;
1525                if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1526                        b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1527                               ring->index);
1528        }
1529
1530        spin_unlock(&ring->lock);
1531}
1532
1533static void dma_rx(struct b43legacy_dmaring *ring,
1534                   int *slot)
1535{
1536        const struct b43legacy_dma_ops *ops = ring->ops;
1537        struct b43legacy_dmadesc_generic *desc;
1538        struct b43legacy_dmadesc_meta *meta;
1539        struct b43legacy_rxhdr_fw3 *rxhdr;
1540        struct sk_buff *skb;
1541        u16 len;
1542        int err;
1543        dma_addr_t dmaaddr;
1544
1545        desc = ops->idx2desc(ring, *slot, &meta);
1546
1547        sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1548        skb = meta->skb;
1549
1550        if (ring->index == 3) {
1551                /* We received an xmit status. */
1552                struct b43legacy_hwtxstatus *hw =
1553                                (struct b43legacy_hwtxstatus *)skb->data;
1554                int i = 0;
1555
1556                while (hw->cookie == 0) {
1557                        if (i > 100)
1558                                break;
1559                        i++;
1560                        udelay(2);
1561                        barrier();
1562                }
1563                b43legacy_handle_hwtxstatus(ring->dev, hw);
1564                /* recycle the descriptor buffer. */
1565                sync_descbuffer_for_device(ring, meta->dmaaddr,
1566                                           ring->rx_buffersize);
1567
1568                return;
1569        }
1570        rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1571        len = le16_to_cpu(rxhdr->frame_len);
1572        if (len == 0) {
1573                int i = 0;
1574
1575                do {
1576                        udelay(2);
1577                        barrier();
1578                        len = le16_to_cpu(rxhdr->frame_len);
1579                } while (len == 0 && i++ < 5);
1580                if (unlikely(len == 0)) {
1581                        /* recycle the descriptor buffer. */
1582                        sync_descbuffer_for_device(ring, meta->dmaaddr,
1583                                                   ring->rx_buffersize);
1584                        goto drop;
1585                }
1586        }
1587        if (unlikely(len > ring->rx_buffersize)) {
1588                /* The data did not fit into one descriptor buffer
1589                 * and is split over multiple buffers.
1590                 * This should never happen, as we try to allocate buffers
1591                 * big enough. So simply ignore this packet.
1592                 */
1593                int cnt = 0;
1594                s32 tmp = len;
1595
1596                while (1) {
1597                        desc = ops->idx2desc(ring, *slot, &meta);
1598                        /* recycle the descriptor buffer. */
1599                        sync_descbuffer_for_device(ring, meta->dmaaddr,
1600                                                   ring->rx_buffersize);
1601                        *slot = next_slot(ring, *slot);
1602                        cnt++;
1603                        tmp -= ring->rx_buffersize;
1604                        if (tmp <= 0)
1605                                break;
1606                }
1607                b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1608                       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1609                       len, ring->rx_buffersize, cnt);
1610                goto drop;
1611        }
1612
1613        dmaaddr = meta->dmaaddr;
1614        err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1615        if (unlikely(err)) {
1616                b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1617                             " failed\n");
1618                sync_descbuffer_for_device(ring, dmaaddr,
1619                                           ring->rx_buffersize);
1620                goto drop;
1621        }
1622
1623        unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1624        skb_put(skb, len + ring->frameoffset);
1625        skb_pull(skb, ring->frameoffset);
1626
1627        b43legacy_rx(ring->dev, skb, rxhdr);
1628drop:
1629        return;
1630}
1631
1632void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1633{
1634        const struct b43legacy_dma_ops *ops = ring->ops;
1635        int slot;
1636        int current_slot;
1637        int used_slots = 0;
1638
1639        B43legacy_WARN_ON(ring->tx);
1640        current_slot = ops->get_current_rxslot(ring);
1641        B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1642                           ring->nr_slots));
1643
1644        slot = ring->current_slot;
1645        for (; slot != current_slot; slot = next_slot(ring, slot)) {
1646                dma_rx(ring, &slot);
1647                update_max_used_slots(ring, ++used_slots);
1648        }
1649        ops->set_current_rxslot(ring, slot);
1650        ring->current_slot = slot;
1651}
1652
1653static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1654{
1655        unsigned long flags;
1656
1657        spin_lock_irqsave(&ring->lock, flags);
1658        B43legacy_WARN_ON(!ring->tx);
1659        ring->ops->tx_suspend(ring);
1660        spin_unlock_irqrestore(&ring->lock, flags);
1661}
1662
1663static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1664{
1665        unsigned long flags;
1666
1667        spin_lock_irqsave(&ring->lock, flags);
1668        B43legacy_WARN_ON(!ring->tx);
1669        ring->ops->tx_resume(ring);
1670        spin_unlock_irqrestore(&ring->lock, flags);
1671}
1672
1673void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1674{
1675        b43legacy_power_saving_ctl_bits(dev, -1, 1);
1676        b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1677        b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1678        b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1679        b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1680        b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1681        b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1682}
1683
1684void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1685{
1686        b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1687        b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1688        b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1689        b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1690        b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1691        b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1692        b43legacy_power_saving_ctl_bits(dev, -1, -1);
1693}
1694