1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "b43.h"
31#include "dma.h"
32#include "main.h"
33#include "debugfs.h"
34#include "xmit.h"
35
36#include <linux/dma-mapping.h>
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
40#include <linux/etherdevice.h>
41#include <asm/div64.h>
42
43
44
45
46
47#define TX_SLOTS_PER_FRAME 2
48
49
50
51static
52struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
53 int slot,
54 struct b43_dmadesc_meta **meta)
55{
56 struct b43_dmadesc32 *desc;
57
58 *meta = &(ring->meta[slot]);
59 desc = ring->descbase;
60 desc = &(desc[slot]);
61
62 return (struct b43_dmadesc_generic *)desc;
63}
64
65static void op32_fill_descriptor(struct b43_dmaring *ring,
66 struct b43_dmadesc_generic *desc,
67 dma_addr_t dmaaddr, u16 bufsize,
68 int start, int end, int irq)
69{
70 struct b43_dmadesc32 *descbase = ring->descbase;
71 int slot;
72 u32 ctl;
73 u32 addr;
74 u32 addrext;
75
76 slot = (int)(&(desc->dma32) - descbase);
77 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
78
79 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
80 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
81 >> SSB_DMA_TRANSLATION_SHIFT;
82 addr |= ssb_dma_translation(ring->dev->dev);
83 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
84 if (slot == ring->nr_slots - 1)
85 ctl |= B43_DMA32_DCTL_DTABLEEND;
86 if (start)
87 ctl |= B43_DMA32_DCTL_FRAMESTART;
88 if (end)
89 ctl |= B43_DMA32_DCTL_FRAMEEND;
90 if (irq)
91 ctl |= B43_DMA32_DCTL_IRQ;
92 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
93 & B43_DMA32_DCTL_ADDREXT_MASK;
94
95 desc->dma32.control = cpu_to_le32(ctl);
96 desc->dma32.address = cpu_to_le32(addr);
97}
98
99static void op32_poke_tx(struct b43_dmaring *ring, int slot)
100{
101 b43_dma_write(ring, B43_DMA32_TXINDEX,
102 (u32) (slot * sizeof(struct b43_dmadesc32)));
103}
104
105static void op32_tx_suspend(struct b43_dmaring *ring)
106{
107 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108 | B43_DMA32_TXSUSPEND);
109}
110
111static void op32_tx_resume(struct b43_dmaring *ring)
112{
113 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
114 & ~B43_DMA32_TXSUSPEND);
115}
116
117static int op32_get_current_rxslot(struct b43_dmaring *ring)
118{
119 u32 val;
120
121 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
122 val &= B43_DMA32_RXDPTR;
123
124 return (val / sizeof(struct b43_dmadesc32));
125}
126
127static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
128{
129 b43_dma_write(ring, B43_DMA32_RXINDEX,
130 (u32) (slot * sizeof(struct b43_dmadesc32)));
131}
132
133static const struct b43_dma_ops dma32_ops = {
134 .idx2desc = op32_idx2desc,
135 .fill_descriptor = op32_fill_descriptor,
136 .poke_tx = op32_poke_tx,
137 .tx_suspend = op32_tx_suspend,
138 .tx_resume = op32_tx_resume,
139 .get_current_rxslot = op32_get_current_rxslot,
140 .set_current_rxslot = op32_set_current_rxslot,
141};
142
143
144static
145struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
146 int slot,
147 struct b43_dmadesc_meta **meta)
148{
149 struct b43_dmadesc64 *desc;
150
151 *meta = &(ring->meta[slot]);
152 desc = ring->descbase;
153 desc = &(desc[slot]);
154
155 return (struct b43_dmadesc_generic *)desc;
156}
157
158static void op64_fill_descriptor(struct b43_dmaring *ring,
159 struct b43_dmadesc_generic *desc,
160 dma_addr_t dmaaddr, u16 bufsize,
161 int start, int end, int irq)
162{
163 struct b43_dmadesc64 *descbase = ring->descbase;
164 int slot;
165 u32 ctl0 = 0, ctl1 = 0;
166 u32 addrlo, addrhi;
167 u32 addrext;
168
169 slot = (int)(&(desc->dma64) - descbase);
170 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
171
172 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
173 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
174 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
175 >> SSB_DMA_TRANSLATION_SHIFT;
176 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
177 if (slot == ring->nr_slots - 1)
178 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
179 if (start)
180 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
181 if (end)
182 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
183 if (irq)
184 ctl0 |= B43_DMA64_DCTL0_IRQ;
185 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
186 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
187 & B43_DMA64_DCTL1_ADDREXT_MASK;
188
189 desc->dma64.control0 = cpu_to_le32(ctl0);
190 desc->dma64.control1 = cpu_to_le32(ctl1);
191 desc->dma64.address_low = cpu_to_le32(addrlo);
192 desc->dma64.address_high = cpu_to_le32(addrhi);
193}
194
195static void op64_poke_tx(struct b43_dmaring *ring, int slot)
196{
197 b43_dma_write(ring, B43_DMA64_TXINDEX,
198 (u32) (slot * sizeof(struct b43_dmadesc64)));
199}
200
201static void op64_tx_suspend(struct b43_dmaring *ring)
202{
203 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
204 | B43_DMA64_TXSUSPEND);
205}
206
207static void op64_tx_resume(struct b43_dmaring *ring)
208{
209 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
210 & ~B43_DMA64_TXSUSPEND);
211}
212
213static int op64_get_current_rxslot(struct b43_dmaring *ring)
214{
215 u32 val;
216
217 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
218 val &= B43_DMA64_RXSTATDPTR;
219
220 return (val / sizeof(struct b43_dmadesc64));
221}
222
223static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
224{
225 b43_dma_write(ring, B43_DMA64_RXINDEX,
226 (u32) (slot * sizeof(struct b43_dmadesc64)));
227}
228
229static const struct b43_dma_ops dma64_ops = {
230 .idx2desc = op64_idx2desc,
231 .fill_descriptor = op64_fill_descriptor,
232 .poke_tx = op64_poke_tx,
233 .tx_suspend = op64_tx_suspend,
234 .tx_resume = op64_tx_resume,
235 .get_current_rxslot = op64_get_current_rxslot,
236 .set_current_rxslot = op64_set_current_rxslot,
237};
238
239static inline int free_slots(struct b43_dmaring *ring)
240{
241 return (ring->nr_slots - ring->used_slots);
242}
243
244static inline int next_slot(struct b43_dmaring *ring, int slot)
245{
246 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
247 if (slot == ring->nr_slots - 1)
248 return 0;
249 return slot + 1;
250}
251
252static inline int prev_slot(struct b43_dmaring *ring, int slot)
253{
254 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
255 if (slot == 0)
256 return ring->nr_slots - 1;
257 return slot - 1;
258}
259
260#ifdef CONFIG_B43_DEBUG
261static void update_max_used_slots(struct b43_dmaring *ring,
262 int current_used_slots)
263{
264 if (current_used_slots <= ring->max_used_slots)
265 return;
266 ring->max_used_slots = current_used_slots;
267 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
268 b43dbg(ring->dev->wl,
269 "max_used_slots increased to %d on %s ring %d\n",
270 ring->max_used_slots,
271 ring->tx ? "TX" : "RX", ring->index);
272 }
273}
274#else
275static inline
276 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
277{
278}
279#endif
280
281
282static inline int request_slot(struct b43_dmaring *ring)
283{
284 int slot;
285
286 B43_WARN_ON(!ring->tx);
287 B43_WARN_ON(ring->stopped);
288 B43_WARN_ON(free_slots(ring) == 0);
289
290 slot = next_slot(ring, ring->current_slot);
291 ring->current_slot = slot;
292 ring->used_slots++;
293
294 update_max_used_slots(ring, ring->used_slots);
295
296 return slot;
297}
298
299static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
300{
301 static const u16 map64[] = {
302 B43_MMIO_DMA64_BASE0,
303 B43_MMIO_DMA64_BASE1,
304 B43_MMIO_DMA64_BASE2,
305 B43_MMIO_DMA64_BASE3,
306 B43_MMIO_DMA64_BASE4,
307 B43_MMIO_DMA64_BASE5,
308 };
309 static const u16 map32[] = {
310 B43_MMIO_DMA32_BASE0,
311 B43_MMIO_DMA32_BASE1,
312 B43_MMIO_DMA32_BASE2,
313 B43_MMIO_DMA32_BASE3,
314 B43_MMIO_DMA32_BASE4,
315 B43_MMIO_DMA32_BASE5,
316 };
317
318 if (type == B43_DMA_64BIT) {
319 B43_WARN_ON(!(controller_idx >= 0 &&
320 controller_idx < ARRAY_SIZE(map64)));
321 return map64[controller_idx];
322 }
323 B43_WARN_ON(!(controller_idx >= 0 &&
324 controller_idx < ARRAY_SIZE(map32)));
325 return map32[controller_idx];
326}
327
328static inline
329 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
330 unsigned char *buf, size_t len, int tx)
331{
332 dma_addr_t dmaaddr;
333
334 if (tx) {
335 dmaaddr = ssb_dma_map_single(ring->dev->dev,
336 buf, len, DMA_TO_DEVICE);
337 } else {
338 dmaaddr = ssb_dma_map_single(ring->dev->dev,
339 buf, len, DMA_FROM_DEVICE);
340 }
341
342 return dmaaddr;
343}
344
345static inline
346 void unmap_descbuffer(struct b43_dmaring *ring,
347 dma_addr_t addr, size_t len, int tx)
348{
349 if (tx) {
350 ssb_dma_unmap_single(ring->dev->dev,
351 addr, len, DMA_TO_DEVICE);
352 } else {
353 ssb_dma_unmap_single(ring->dev->dev,
354 addr, len, DMA_FROM_DEVICE);
355 }
356}
357
358static inline
359 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
360 dma_addr_t addr, size_t len)
361{
362 B43_WARN_ON(ring->tx);
363 ssb_dma_sync_single_for_cpu(ring->dev->dev,
364 addr, len, DMA_FROM_DEVICE);
365}
366
367static inline
368 void sync_descbuffer_for_device(struct b43_dmaring *ring,
369 dma_addr_t addr, size_t len)
370{
371 B43_WARN_ON(ring->tx);
372 ssb_dma_sync_single_for_device(ring->dev->dev,
373 addr, len, DMA_FROM_DEVICE);
374}
375
376static inline
377 void free_descriptor_buffer(struct b43_dmaring *ring,
378 struct b43_dmadesc_meta *meta)
379{
380 if (meta->skb) {
381 dev_kfree_skb_any(meta->skb);
382 meta->skb = NULL;
383 }
384}
385
386static int alloc_ringmemory(struct b43_dmaring *ring)
387{
388 gfp_t flags = GFP_KERNEL;
389
390
391
392
393
394
395
396
397
398
399
400
401 if (ring->type == B43_DMA_64BIT)
402 flags |= GFP_DMA;
403 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
404 B43_DMA_RINGMEMSIZE,
405 &(ring->dmabase), flags);
406 if (!ring->descbase) {
407 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
408 return -ENOMEM;
409 }
410 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
411
412 return 0;
413}
414
415static void free_ringmemory(struct b43_dmaring *ring)
416{
417 gfp_t flags = GFP_KERNEL;
418
419 if (ring->type == B43_DMA_64BIT)
420 flags |= GFP_DMA;
421
422 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
423 ring->descbase, ring->dmabase, flags);
424}
425
426
427static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
428 enum b43_dmatype type)
429{
430 int i;
431 u32 value;
432 u16 offset;
433
434 might_sleep();
435
436 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
437 b43_write32(dev, mmio_base + offset, 0);
438 for (i = 0; i < 10; i++) {
439 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
440 B43_DMA32_RXSTATUS;
441 value = b43_read32(dev, mmio_base + offset);
442 if (type == B43_DMA_64BIT) {
443 value &= B43_DMA64_RXSTAT;
444 if (value == B43_DMA64_RXSTAT_DISABLED) {
445 i = -1;
446 break;
447 }
448 } else {
449 value &= B43_DMA32_RXSTATE;
450 if (value == B43_DMA32_RXSTAT_DISABLED) {
451 i = -1;
452 break;
453 }
454 }
455 msleep(1);
456 }
457 if (i != -1) {
458 b43err(dev->wl, "DMA RX reset timed out\n");
459 return -ENODEV;
460 }
461
462 return 0;
463}
464
465
466static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
467 enum b43_dmatype type)
468{
469 int i;
470 u32 value;
471 u16 offset;
472
473 might_sleep();
474
475 for (i = 0; i < 10; i++) {
476 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
477 B43_DMA32_TXSTATUS;
478 value = b43_read32(dev, mmio_base + offset);
479 if (type == B43_DMA_64BIT) {
480 value &= B43_DMA64_TXSTAT;
481 if (value == B43_DMA64_TXSTAT_DISABLED ||
482 value == B43_DMA64_TXSTAT_IDLEWAIT ||
483 value == B43_DMA64_TXSTAT_STOPPED)
484 break;
485 } else {
486 value &= B43_DMA32_TXSTATE;
487 if (value == B43_DMA32_TXSTAT_DISABLED ||
488 value == B43_DMA32_TXSTAT_IDLEWAIT ||
489 value == B43_DMA32_TXSTAT_STOPPED)
490 break;
491 }
492 msleep(1);
493 }
494 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
495 b43_write32(dev, mmio_base + offset, 0);
496 for (i = 0; i < 10; i++) {
497 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
498 B43_DMA32_TXSTATUS;
499 value = b43_read32(dev, mmio_base + offset);
500 if (type == B43_DMA_64BIT) {
501 value &= B43_DMA64_TXSTAT;
502 if (value == B43_DMA64_TXSTAT_DISABLED) {
503 i = -1;
504 break;
505 }
506 } else {
507 value &= B43_DMA32_TXSTATE;
508 if (value == B43_DMA32_TXSTAT_DISABLED) {
509 i = -1;
510 break;
511 }
512 }
513 msleep(1);
514 }
515 if (i != -1) {
516 b43err(dev->wl, "DMA TX reset timed out\n");
517 return -ENODEV;
518 }
519
520 msleep(1);
521
522 return 0;
523}
524
525
526static bool b43_dma_mapping_error(struct b43_dmaring *ring,
527 dma_addr_t addr,
528 size_t buffersize, bool dma_to_device)
529{
530 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
531 return 1;
532
533 switch (ring->type) {
534 case B43_DMA_30BIT:
535 if ((u64)addr + buffersize > (1ULL << 30))
536 goto address_error;
537 break;
538 case B43_DMA_32BIT:
539 if ((u64)addr + buffersize > (1ULL << 32))
540 goto address_error;
541 break;
542 case B43_DMA_64BIT:
543
544
545 break;
546 }
547
548
549 return 0;
550
551address_error:
552
553 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
554
555 return 1;
556}
557
558static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
559{
560 unsigned char *f = skb->data + ring->frameoffset;
561
562 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
563}
564
565static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
566{
567 struct b43_rxhdr_fw4 *rxhdr;
568 unsigned char *frame;
569
570
571
572 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
573 rxhdr->frame_len = 0;
574
575 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
576 frame = skb->data + ring->frameoffset;
577 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 );
578}
579
580static int setup_rx_descbuffer(struct b43_dmaring *ring,
581 struct b43_dmadesc_generic *desc,
582 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
583{
584 dma_addr_t dmaaddr;
585 struct sk_buff *skb;
586
587 B43_WARN_ON(ring->tx);
588
589 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
590 if (unlikely(!skb))
591 return -ENOMEM;
592 b43_poison_rx_buffer(ring, skb);
593 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
594 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
595
596 gfp_flags |= GFP_DMA;
597
598 dev_kfree_skb_any(skb);
599
600 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
601 if (unlikely(!skb))
602 return -ENOMEM;
603 b43_poison_rx_buffer(ring, skb);
604 dmaaddr = map_descbuffer(ring, skb->data,
605 ring->rx_buffersize, 0);
606 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
607 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
608 dev_kfree_skb_any(skb);
609 return -EIO;
610 }
611 }
612
613 meta->skb = skb;
614 meta->dmaaddr = dmaaddr;
615 ring->ops->fill_descriptor(ring, desc, dmaaddr,
616 ring->rx_buffersize, 0, 0, 0);
617
618 return 0;
619}
620
621
622
623
624static int alloc_initial_descbuffers(struct b43_dmaring *ring)
625{
626 int i, err = -ENOMEM;
627 struct b43_dmadesc_generic *desc;
628 struct b43_dmadesc_meta *meta;
629
630 for (i = 0; i < ring->nr_slots; i++) {
631 desc = ring->ops->idx2desc(ring, i, &meta);
632
633 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
634 if (err) {
635 b43err(ring->dev->wl,
636 "Failed to allocate initial descbuffers\n");
637 goto err_unwind;
638 }
639 }
640 mb();
641 ring->used_slots = ring->nr_slots;
642 err = 0;
643 out:
644 return err;
645
646 err_unwind:
647 for (i--; i >= 0; i--) {
648 desc = ring->ops->idx2desc(ring, i, &meta);
649
650 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
651 dev_kfree_skb(meta->skb);
652 }
653 goto out;
654}
655
656
657
658
659
660static int dmacontroller_setup(struct b43_dmaring *ring)
661{
662 int err = 0;
663 u32 value;
664 u32 addrext;
665 u32 trans = ssb_dma_translation(ring->dev->dev);
666
667 if (ring->tx) {
668 if (ring->type == B43_DMA_64BIT) {
669 u64 ringbase = (u64) (ring->dmabase);
670
671 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
672 >> SSB_DMA_TRANSLATION_SHIFT;
673 value = B43_DMA64_TXENABLE;
674 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
675 & B43_DMA64_TXADDREXT_MASK;
676 b43_dma_write(ring, B43_DMA64_TXCTL, value);
677 b43_dma_write(ring, B43_DMA64_TXRINGLO,
678 (ringbase & 0xFFFFFFFF));
679 b43_dma_write(ring, B43_DMA64_TXRINGHI,
680 ((ringbase >> 32) &
681 ~SSB_DMA_TRANSLATION_MASK)
682 | (trans << 1));
683 } else {
684 u32 ringbase = (u32) (ring->dmabase);
685
686 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
687 >> SSB_DMA_TRANSLATION_SHIFT;
688 value = B43_DMA32_TXENABLE;
689 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
690 & B43_DMA32_TXADDREXT_MASK;
691 b43_dma_write(ring, B43_DMA32_TXCTL, value);
692 b43_dma_write(ring, B43_DMA32_TXRING,
693 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
694 | trans);
695 }
696 } else {
697 err = alloc_initial_descbuffers(ring);
698 if (err)
699 goto out;
700 if (ring->type == B43_DMA_64BIT) {
701 u64 ringbase = (u64) (ring->dmabase);
702
703 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
704 >> SSB_DMA_TRANSLATION_SHIFT;
705 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
706 value |= B43_DMA64_RXENABLE;
707 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
708 & B43_DMA64_RXADDREXT_MASK;
709 b43_dma_write(ring, B43_DMA64_RXCTL, value);
710 b43_dma_write(ring, B43_DMA64_RXRINGLO,
711 (ringbase & 0xFFFFFFFF));
712 b43_dma_write(ring, B43_DMA64_RXRINGHI,
713 ((ringbase >> 32) &
714 ~SSB_DMA_TRANSLATION_MASK)
715 | (trans << 1));
716 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
717 sizeof(struct b43_dmadesc64));
718 } else {
719 u32 ringbase = (u32) (ring->dmabase);
720
721 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
722 >> SSB_DMA_TRANSLATION_SHIFT;
723 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
724 value |= B43_DMA32_RXENABLE;
725 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
726 & B43_DMA32_RXADDREXT_MASK;
727 b43_dma_write(ring, B43_DMA32_RXCTL, value);
728 b43_dma_write(ring, B43_DMA32_RXRING,
729 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
730 | trans);
731 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
732 sizeof(struct b43_dmadesc32));
733 }
734 }
735
736out:
737 return err;
738}
739
740
741static void dmacontroller_cleanup(struct b43_dmaring *ring)
742{
743 if (ring->tx) {
744 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
745 ring->type);
746 if (ring->type == B43_DMA_64BIT) {
747 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
748 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
749 } else
750 b43_dma_write(ring, B43_DMA32_TXRING, 0);
751 } else {
752 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
753 ring->type);
754 if (ring->type == B43_DMA_64BIT) {
755 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
756 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
757 } else
758 b43_dma_write(ring, B43_DMA32_RXRING, 0);
759 }
760}
761
762static void free_all_descbuffers(struct b43_dmaring *ring)
763{
764 struct b43_dmadesc_generic *desc;
765 struct b43_dmadesc_meta *meta;
766 int i;
767
768 if (!ring->used_slots)
769 return;
770 for (i = 0; i < ring->nr_slots; i++) {
771 desc = ring->ops->idx2desc(ring, i, &meta);
772
773 if (!meta->skb) {
774 B43_WARN_ON(!ring->tx);
775 continue;
776 }
777 if (ring->tx) {
778 unmap_descbuffer(ring, meta->dmaaddr,
779 meta->skb->len, 1);
780 } else {
781 unmap_descbuffer(ring, meta->dmaaddr,
782 ring->rx_buffersize, 0);
783 }
784 free_descriptor_buffer(ring, meta);
785 }
786}
787
788static u64 supported_dma_mask(struct b43_wldev *dev)
789{
790 u32 tmp;
791 u16 mmio_base;
792
793 tmp = b43_read32(dev, SSB_TMSHIGH);
794 if (tmp & SSB_TMSHIGH_DMA64)
795 return DMA_BIT_MASK(64);
796 mmio_base = b43_dmacontroller_base(0, 0);
797 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
798 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
799 if (tmp & B43_DMA32_TXADDREXT_MASK)
800 return DMA_BIT_MASK(32);
801
802 return DMA_BIT_MASK(30);
803}
804
805static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
806{
807 if (dmamask == DMA_BIT_MASK(30))
808 return B43_DMA_30BIT;
809 if (dmamask == DMA_BIT_MASK(32))
810 return B43_DMA_32BIT;
811 if (dmamask == DMA_BIT_MASK(64))
812 return B43_DMA_64BIT;
813 B43_WARN_ON(1);
814 return B43_DMA_30BIT;
815}
816
817
818static
819struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
820 int controller_index,
821 int for_tx,
822 enum b43_dmatype type)
823{
824 struct b43_dmaring *ring;
825 int err;
826 dma_addr_t dma_test;
827
828 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
829 if (!ring)
830 goto out;
831
832 ring->nr_slots = B43_RXRING_SLOTS;
833 if (for_tx)
834 ring->nr_slots = B43_TXRING_SLOTS;
835
836 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
837 GFP_KERNEL);
838 if (!ring->meta)
839 goto err_kfree_ring;
840
841 ring->type = type;
842 ring->dev = dev;
843 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
844 ring->index = controller_index;
845 if (type == B43_DMA_64BIT)
846 ring->ops = &dma64_ops;
847 else
848 ring->ops = &dma32_ops;
849 if (for_tx) {
850 ring->tx = 1;
851 ring->current_slot = -1;
852 } else {
853 if (ring->index == 0) {
854 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
855 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
856 } else
857 B43_WARN_ON(1);
858 }
859#ifdef CONFIG_B43_DEBUG
860 ring->last_injected_overflow = jiffies;
861#endif
862
863 if (for_tx) {
864
865 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
866
867 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
868 b43_txhdr_size(dev),
869 GFP_KERNEL);
870 if (!ring->txhdr_cache)
871 goto err_kfree_meta;
872
873
874 dma_test = ssb_dma_map_single(dev->dev,
875 ring->txhdr_cache,
876 b43_txhdr_size(dev),
877 DMA_TO_DEVICE);
878
879 if (b43_dma_mapping_error(ring, dma_test,
880 b43_txhdr_size(dev), 1)) {
881
882 kfree(ring->txhdr_cache);
883 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
884 b43_txhdr_size(dev),
885 GFP_KERNEL | GFP_DMA);
886 if (!ring->txhdr_cache)
887 goto err_kfree_meta;
888
889 dma_test = ssb_dma_map_single(dev->dev,
890 ring->txhdr_cache,
891 b43_txhdr_size(dev),
892 DMA_TO_DEVICE);
893
894 if (b43_dma_mapping_error(ring, dma_test,
895 b43_txhdr_size(dev), 1)) {
896
897 b43err(dev->wl,
898 "TXHDR DMA allocation failed\n");
899 goto err_kfree_txhdr_cache;
900 }
901 }
902
903 ssb_dma_unmap_single(dev->dev,
904 dma_test, b43_txhdr_size(dev),
905 DMA_TO_DEVICE);
906 }
907
908 err = alloc_ringmemory(ring);
909 if (err)
910 goto err_kfree_txhdr_cache;
911 err = dmacontroller_setup(ring);
912 if (err)
913 goto err_free_ringmemory;
914
915 out:
916 return ring;
917
918 err_free_ringmemory:
919 free_ringmemory(ring);
920 err_kfree_txhdr_cache:
921 kfree(ring->txhdr_cache);
922 err_kfree_meta:
923 kfree(ring->meta);
924 err_kfree_ring:
925 kfree(ring);
926 ring = NULL;
927 goto out;
928}
929
930#define divide(a, b) ({ \
931 typeof(a) __a = a; \
932 do_div(__a, b); \
933 __a; \
934 })
935
936#define modulo(a, b) ({ \
937 typeof(a) __a = a; \
938 do_div(__a, b); \
939 })
940
941
942static void b43_destroy_dmaring(struct b43_dmaring *ring,
943 const char *ringname)
944{
945 if (!ring)
946 return;
947
948#ifdef CONFIG_B43_DEBUG
949 {
950
951 u64 failed_packets = ring->nr_failed_tx_packets;
952 u64 succeed_packets = ring->nr_succeed_tx_packets;
953 u64 nr_packets = failed_packets + succeed_packets;
954 u64 permille_failed = 0, average_tries = 0;
955
956 if (nr_packets)
957 permille_failed = divide(failed_packets * 1000, nr_packets);
958 if (nr_packets)
959 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
960
961 b43dbg(ring->dev->wl, "DMA-%u %s: "
962 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
963 "Average tries %llu.%02llu\n",
964 (unsigned int)(ring->type), ringname,
965 ring->max_used_slots,
966 ring->nr_slots,
967 (unsigned long long)failed_packets,
968 (unsigned long long)nr_packets,
969 (unsigned long long)divide(permille_failed, 10),
970 (unsigned long long)modulo(permille_failed, 10),
971 (unsigned long long)divide(average_tries, 100),
972 (unsigned long long)modulo(average_tries, 100));
973 }
974#endif
975
976
977
978
979 dmacontroller_cleanup(ring);
980 free_all_descbuffers(ring);
981 free_ringmemory(ring);
982
983 kfree(ring->txhdr_cache);
984 kfree(ring->meta);
985 kfree(ring);
986}
987
988#define destroy_ring(dma, ring) do { \
989 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
990 (dma)->ring = NULL; \
991 } while (0)
992
993void b43_dma_free(struct b43_wldev *dev)
994{
995 struct b43_dma *dma;
996
997 if (b43_using_pio_transfers(dev))
998 return;
999 dma = &dev->dma;
1000
1001 destroy_ring(dma, rx_ring);
1002 destroy_ring(dma, tx_ring_AC_BK);
1003 destroy_ring(dma, tx_ring_AC_BE);
1004 destroy_ring(dma, tx_ring_AC_VI);
1005 destroy_ring(dma, tx_ring_AC_VO);
1006 destroy_ring(dma, tx_ring_mcast);
1007}
1008
1009static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1010{
1011 u64 orig_mask = mask;
1012 bool fallback = 0;
1013 int err;
1014
1015
1016
1017 while (1) {
1018 err = ssb_dma_set_mask(dev->dev, mask);
1019 if (!err)
1020 break;
1021 if (mask == DMA_BIT_MASK(64)) {
1022 mask = DMA_BIT_MASK(32);
1023 fallback = 1;
1024 continue;
1025 }
1026 if (mask == DMA_BIT_MASK(32)) {
1027 mask = DMA_BIT_MASK(30);
1028 fallback = 1;
1029 continue;
1030 }
1031 b43err(dev->wl, "The machine/kernel does not support "
1032 "the required %u-bit DMA mask\n",
1033 (unsigned int)dma_mask_to_engine_type(orig_mask));
1034 return -EOPNOTSUPP;
1035 }
1036 if (fallback) {
1037 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1038 (unsigned int)dma_mask_to_engine_type(orig_mask),
1039 (unsigned int)dma_mask_to_engine_type(mask));
1040 }
1041
1042 return 0;
1043}
1044
1045int b43_dma_init(struct b43_wldev *dev)
1046{
1047 struct b43_dma *dma = &dev->dma;
1048 int err;
1049 u64 dmamask;
1050 enum b43_dmatype type;
1051
1052 dmamask = supported_dma_mask(dev);
1053 type = dma_mask_to_engine_type(dmamask);
1054 err = b43_dma_set_mask(dev, dmamask);
1055 if (err)
1056 return err;
1057
1058 err = -ENOMEM;
1059
1060 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1061 if (!dma->tx_ring_AC_BK)
1062 goto out;
1063
1064 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1065 if (!dma->tx_ring_AC_BE)
1066 goto err_destroy_bk;
1067
1068 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1069 if (!dma->tx_ring_AC_VI)
1070 goto err_destroy_be;
1071
1072 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1073 if (!dma->tx_ring_AC_VO)
1074 goto err_destroy_vi;
1075
1076 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1077 if (!dma->tx_ring_mcast)
1078 goto err_destroy_vo;
1079
1080
1081 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1082 if (!dma->rx_ring)
1083 goto err_destroy_mcast;
1084
1085
1086 B43_WARN_ON(dev->dev->id.revision < 5);
1087
1088 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1089 (unsigned int)type);
1090 err = 0;
1091out:
1092 return err;
1093
1094err_destroy_mcast:
1095 destroy_ring(dma, tx_ring_mcast);
1096err_destroy_vo:
1097 destroy_ring(dma, tx_ring_AC_VO);
1098err_destroy_vi:
1099 destroy_ring(dma, tx_ring_AC_VI);
1100err_destroy_be:
1101 destroy_ring(dma, tx_ring_AC_BE);
1102err_destroy_bk:
1103 destroy_ring(dma, tx_ring_AC_BK);
1104 return err;
1105}
1106
1107
1108static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1109{
1110 u16 cookie;
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 cookie = (((u16)ring->index + 1) << 12);
1121 B43_WARN_ON(slot & ~0x0FFF);
1122 cookie |= (u16)slot;
1123
1124 return cookie;
1125}
1126
1127
1128static
1129struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1130{
1131 struct b43_dma *dma = &dev->dma;
1132 struct b43_dmaring *ring = NULL;
1133
1134 switch (cookie & 0xF000) {
1135 case 0x1000:
1136 ring = dma->tx_ring_AC_BK;
1137 break;
1138 case 0x2000:
1139 ring = dma->tx_ring_AC_BE;
1140 break;
1141 case 0x3000:
1142 ring = dma->tx_ring_AC_VI;
1143 break;
1144 case 0x4000:
1145 ring = dma->tx_ring_AC_VO;
1146 break;
1147 case 0x5000:
1148 ring = dma->tx_ring_mcast;
1149 break;
1150 default:
1151 B43_WARN_ON(1);
1152 }
1153 *slot = (cookie & 0x0FFF);
1154 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1155
1156 return ring;
1157}
1158
1159static int dma_tx_fragment(struct b43_dmaring *ring,
1160 struct sk_buff **in_skb)
1161{
1162 struct sk_buff *skb = *in_skb;
1163 const struct b43_dma_ops *ops = ring->ops;
1164 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1165 u8 *header;
1166 int slot, old_top_slot, old_used_slots;
1167 int err;
1168 struct b43_dmadesc_generic *desc;
1169 struct b43_dmadesc_meta *meta;
1170 struct b43_dmadesc_meta *meta_hdr;
1171 struct sk_buff *bounce_skb;
1172 u16 cookie;
1173 size_t hdrsize = b43_txhdr_size(ring->dev);
1174
1175
1176
1177
1178
1179
1180 old_top_slot = ring->current_slot;
1181 old_used_slots = ring->used_slots;
1182
1183
1184 slot = request_slot(ring);
1185 desc = ops->idx2desc(ring, slot, &meta_hdr);
1186 memset(meta_hdr, 0, sizeof(*meta_hdr));
1187
1188 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1189 cookie = generate_cookie(ring, slot);
1190 err = b43_generate_txhdr(ring->dev, header,
1191 skb, info, cookie);
1192 if (unlikely(err)) {
1193 ring->current_slot = old_top_slot;
1194 ring->used_slots = old_used_slots;
1195 return err;
1196 }
1197
1198 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1199 hdrsize, 1);
1200 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1201 ring->current_slot = old_top_slot;
1202 ring->used_slots = old_used_slots;
1203 return -EIO;
1204 }
1205 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1206 hdrsize, 1, 0, 0);
1207
1208
1209 slot = request_slot(ring);
1210 desc = ops->idx2desc(ring, slot, &meta);
1211 memset(meta, 0, sizeof(*meta));
1212
1213 meta->skb = skb;
1214 meta->is_last_fragment = 1;
1215
1216 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1217
1218 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1219 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1220 if (!bounce_skb) {
1221 ring->current_slot = old_top_slot;
1222 ring->used_slots = old_used_slots;
1223 err = -ENOMEM;
1224 goto out_unmap_hdr;
1225 }
1226
1227 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1228 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1229 bounce_skb->dev = skb->dev;
1230 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1231 info = IEEE80211_SKB_CB(bounce_skb);
1232
1233 dev_kfree_skb_any(skb);
1234 skb = bounce_skb;
1235 *in_skb = bounce_skb;
1236 meta->skb = skb;
1237 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1238 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1239 ring->current_slot = old_top_slot;
1240 ring->used_slots = old_used_slots;
1241 err = -EIO;
1242 goto out_free_bounce;
1243 }
1244 }
1245
1246 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1247
1248 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1249
1250
1251 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1252 B43_SHM_SH_MCASTCOOKIE, cookie);
1253 }
1254
1255 wmb();
1256 ops->poke_tx(ring, next_slot(ring, slot));
1257 return 0;
1258
1259out_free_bounce:
1260 dev_kfree_skb_any(skb);
1261out_unmap_hdr:
1262 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1263 hdrsize, 1);
1264 return err;
1265}
1266
1267static inline int should_inject_overflow(struct b43_dmaring *ring)
1268{
1269#ifdef CONFIG_B43_DEBUG
1270 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1271
1272
1273 unsigned long next_overflow;
1274
1275 next_overflow = ring->last_injected_overflow + HZ;
1276 if (time_after(jiffies, next_overflow)) {
1277 ring->last_injected_overflow = jiffies;
1278 b43dbg(ring->dev->wl,
1279 "Injecting TX ring overflow on "
1280 "DMA controller %d\n", ring->index);
1281 return 1;
1282 }
1283 }
1284#endif
1285 return 0;
1286}
1287
1288
1289static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1290 u8 queue_prio)
1291{
1292 struct b43_dmaring *ring;
1293
1294 if (dev->qos_enabled) {
1295
1296 switch (queue_prio) {
1297 default:
1298 B43_WARN_ON(1);
1299
1300 case 0:
1301 ring = dev->dma.tx_ring_AC_VO;
1302 break;
1303 case 1:
1304 ring = dev->dma.tx_ring_AC_VI;
1305 break;
1306 case 2:
1307 ring = dev->dma.tx_ring_AC_BE;
1308 break;
1309 case 3:
1310 ring = dev->dma.tx_ring_AC_BK;
1311 break;
1312 }
1313 } else
1314 ring = dev->dma.tx_ring_AC_BE;
1315
1316 return ring;
1317}
1318
1319int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1320{
1321 struct b43_dmaring *ring;
1322 struct ieee80211_hdr *hdr;
1323 int err = 0;
1324 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1325
1326 hdr = (struct ieee80211_hdr *)skb->data;
1327 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1328
1329 ring = dev->dma.tx_ring_mcast;
1330
1331
1332 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1333 } else {
1334
1335 ring = select_ring_by_priority(
1336 dev, skb_get_queue_mapping(skb));
1337 }
1338
1339 B43_WARN_ON(!ring->tx);
1340
1341 if (unlikely(ring->stopped)) {
1342
1343
1344
1345
1346 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1347 b43err(dev->wl, "Packet after queue stopped\n");
1348 err = -ENOSPC;
1349 goto out;
1350 }
1351
1352 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1353
1354
1355 b43err(dev->wl, "DMA queue overflow\n");
1356 err = -ENOSPC;
1357 goto out;
1358 }
1359
1360
1361
1362
1363 ring->queue_prio = skb_get_queue_mapping(skb);
1364
1365
1366
1367 hdr = NULL;
1368 info = NULL;
1369 err = dma_tx_fragment(ring, &skb);
1370 if (unlikely(err == -ENOKEY)) {
1371
1372
1373 dev_kfree_skb_any(skb);
1374 err = 0;
1375 goto out;
1376 }
1377 if (unlikely(err)) {
1378 b43err(dev->wl, "DMA tx mapping failure\n");
1379 goto out;
1380 }
1381 ring->nr_tx_packets++;
1382 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1383 should_inject_overflow(ring)) {
1384
1385 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
1386 ring->stopped = 1;
1387 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1388 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1389 }
1390 }
1391out:
1392
1393 return err;
1394}
1395
1396void b43_dma_handle_txstatus(struct b43_wldev *dev,
1397 const struct b43_txstatus *status)
1398{
1399 const struct b43_dma_ops *ops;
1400 struct b43_dmaring *ring;
1401 struct b43_dmadesc_generic *desc;
1402 struct b43_dmadesc_meta *meta;
1403 int slot;
1404 bool frame_succeed;
1405
1406 ring = parse_cookie(dev, status->cookie, &slot);
1407 if (unlikely(!ring))
1408 return;
1409
1410 B43_WARN_ON(!ring->tx);
1411 ops = ring->ops;
1412 while (1) {
1413 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1414 desc = ops->idx2desc(ring, slot, &meta);
1415
1416 if (meta->skb)
1417 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1418 1);
1419 else
1420 unmap_descbuffer(ring, meta->dmaaddr,
1421 b43_txhdr_size(dev), 1);
1422
1423 if (meta->is_last_fragment) {
1424 struct ieee80211_tx_info *info;
1425
1426 BUG_ON(!meta->skb);
1427
1428 info = IEEE80211_SKB_CB(meta->skb);
1429
1430
1431
1432
1433
1434 frame_succeed = b43_fill_txstatus_report(dev, info, status);
1435#ifdef CONFIG_B43_DEBUG
1436 if (frame_succeed)
1437 ring->nr_succeed_tx_packets++;
1438 else
1439 ring->nr_failed_tx_packets++;
1440 ring->nr_total_packet_tries += status->frame_count;
1441#endif
1442 ieee80211_tx_status(dev->wl->hw, meta->skb);
1443
1444
1445 meta->skb = NULL;
1446 } else {
1447
1448
1449
1450 B43_WARN_ON(meta->skb);
1451 }
1452
1453
1454 ring->used_slots--;
1455
1456 if (meta->is_last_fragment)
1457 break;
1458 slot = next_slot(ring, slot);
1459 }
1460 if (ring->stopped) {
1461 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1462 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1463 ring->stopped = 0;
1464 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1465 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1466 }
1467 }
1468}
1469
1470void b43_dma_get_tx_stats(struct b43_wldev *dev,
1471 struct ieee80211_tx_queue_stats *stats)
1472{
1473 const int nr_queues = dev->wl->hw->queues;
1474 struct b43_dmaring *ring;
1475 int i;
1476
1477 for (i = 0; i < nr_queues; i++) {
1478 ring = select_ring_by_priority(dev, i);
1479
1480 stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
1481 stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
1482 stats[i].count = ring->nr_tx_packets;
1483 }
1484}
1485
1486static void dma_rx(struct b43_dmaring *ring, int *slot)
1487{
1488 const struct b43_dma_ops *ops = ring->ops;
1489 struct b43_dmadesc_generic *desc;
1490 struct b43_dmadesc_meta *meta;
1491 struct b43_rxhdr_fw4 *rxhdr;
1492 struct sk_buff *skb;
1493 u16 len;
1494 int err;
1495 dma_addr_t dmaaddr;
1496
1497 desc = ops->idx2desc(ring, *slot, &meta);
1498
1499 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1500 skb = meta->skb;
1501
1502 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1503 len = le16_to_cpu(rxhdr->frame_len);
1504 if (len == 0) {
1505 int i = 0;
1506
1507 do {
1508 udelay(2);
1509 barrier();
1510 len = le16_to_cpu(rxhdr->frame_len);
1511 } while (len == 0 && i++ < 5);
1512 if (unlikely(len == 0)) {
1513 dmaaddr = meta->dmaaddr;
1514 goto drop_recycle_buffer;
1515 }
1516 }
1517 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1518
1519
1520 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1521 dmaaddr = meta->dmaaddr;
1522 goto drop_recycle_buffer;
1523 }
1524 if (unlikely(len > ring->rx_buffersize)) {
1525
1526
1527
1528
1529
1530 int cnt = 0;
1531 s32 tmp = len;
1532
1533 while (1) {
1534 desc = ops->idx2desc(ring, *slot, &meta);
1535
1536 b43_poison_rx_buffer(ring, meta->skb);
1537 sync_descbuffer_for_device(ring, meta->dmaaddr,
1538 ring->rx_buffersize);
1539 *slot = next_slot(ring, *slot);
1540 cnt++;
1541 tmp -= ring->rx_buffersize;
1542 if (tmp <= 0)
1543 break;
1544 }
1545 b43err(ring->dev->wl, "DMA RX buffer too small "
1546 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1547 len, ring->rx_buffersize, cnt);
1548 goto drop;
1549 }
1550
1551 dmaaddr = meta->dmaaddr;
1552 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1553 if (unlikely(err)) {
1554 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1555 goto drop_recycle_buffer;
1556 }
1557
1558 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1559 skb_put(skb, len + ring->frameoffset);
1560 skb_pull(skb, ring->frameoffset);
1561
1562 b43_rx(ring->dev, skb, rxhdr);
1563drop:
1564 return;
1565
1566drop_recycle_buffer:
1567
1568 b43_poison_rx_buffer(ring, skb);
1569 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1570}
1571
1572void b43_dma_rx(struct b43_dmaring *ring)
1573{
1574 const struct b43_dma_ops *ops = ring->ops;
1575 int slot, current_slot;
1576 int used_slots = 0;
1577
1578 B43_WARN_ON(ring->tx);
1579 current_slot = ops->get_current_rxslot(ring);
1580 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1581
1582 slot = ring->current_slot;
1583 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1584 dma_rx(ring, &slot);
1585 update_max_used_slots(ring, ++used_slots);
1586 }
1587 ops->set_current_rxslot(ring, slot);
1588 ring->current_slot = slot;
1589}
1590
1591static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1592{
1593 B43_WARN_ON(!ring->tx);
1594 ring->ops->tx_suspend(ring);
1595}
1596
1597static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1598{
1599 B43_WARN_ON(!ring->tx);
1600 ring->ops->tx_resume(ring);
1601}
1602
1603void b43_dma_tx_suspend(struct b43_wldev *dev)
1604{
1605 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1606 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1607 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1608 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1609 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1610 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1611}
1612
1613void b43_dma_tx_resume(struct b43_wldev *dev)
1614{
1615 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1616 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1617 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1618 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1619 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1620 b43_power_saving_ctl_bits(dev, 0);
1621}
1622
1623#ifdef CONFIG_B43_PIO
1624static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1625 u16 mmio_base, bool enable)
1626{
1627 u32 ctl;
1628
1629 if (type == B43_DMA_64BIT) {
1630 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1631 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1632 if (enable)
1633 ctl |= B43_DMA64_RXDIRECTFIFO;
1634 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1635 } else {
1636 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1637 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1638 if (enable)
1639 ctl |= B43_DMA32_RXDIRECTFIFO;
1640 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1641 }
1642}
1643
1644
1645
1646void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1647 unsigned int engine_index, bool enable)
1648{
1649 enum b43_dmatype type;
1650 u16 mmio_base;
1651
1652 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1653
1654 mmio_base = b43_dmacontroller_base(type, engine_index);
1655 direct_fifo_rx(dev, type, mmio_base, enable);
1656}
1657#endif
1658