1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/dma-mapping.h>
30
31#include "rt2x00.h"
32#include "rt2x00lib.h"
33
34struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
35{
36 struct data_queue *queue = entry->queue;
37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
38 struct sk_buff *skb;
39 struct skb_frame_desc *skbdesc;
40 unsigned int frame_size;
41 unsigned int head_size = 0;
42 unsigned int tail_size = 0;
43
44
45
46
47
48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
49
50
51
52
53
54
55 head_size = 4;
56
57
58
59
60
61
62 if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
63 head_size += 8;
64 tail_size += 8;
65 }
66
67
68
69
70 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
71 if (!skb)
72 return NULL;
73
74
75
76
77
78 skb_reserve(skb, head_size);
79 skb_put(skb, frame_size);
80
81
82
83
84 skbdesc = get_skb_frame_desc(skb);
85 memset(skbdesc, 0, sizeof(*skbdesc));
86
87 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
88 dma_addr_t skb_dma;
89
90 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
91 DMA_FROM_DEVICE);
92 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
93 dev_kfree_skb_any(skb);
94 return NULL;
95 }
96
97 skbdesc->skb_dma = skb_dma;
98 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
99 }
100
101 return skb;
102}
103
104int rt2x00queue_map_txskb(struct queue_entry *entry)
105{
106 struct device *dev = entry->queue->rt2x00dev->dev;
107 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
108
109 skbdesc->skb_dma =
110 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
111
112 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
113 return -ENOMEM;
114
115 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
116 rt2x00lib_dmadone(entry);
117 return 0;
118}
119EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
120
121void rt2x00queue_unmap_skb(struct queue_entry *entry)
122{
123 struct device *dev = entry->queue->rt2x00dev->dev;
124 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
125
126 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
127 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
128 DMA_FROM_DEVICE);
129 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
130 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
131 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
132 DMA_TO_DEVICE);
133 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
134 }
135}
136EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
137
138void rt2x00queue_free_skb(struct queue_entry *entry)
139{
140 if (!entry->skb)
141 return;
142
143 rt2x00queue_unmap_skb(entry);
144 dev_kfree_skb_any(entry->skb);
145 entry->skb = NULL;
146}
147
148void rt2x00queue_align_frame(struct sk_buff *skb)
149{
150 unsigned int frame_length = skb->len;
151 unsigned int align = ALIGN_SIZE(skb, 0);
152
153 if (!align)
154 return;
155
156 skb_push(skb, align);
157 memmove(skb->data, skb->data + align, frame_length);
158 skb_trim(skb, frame_length);
159}
160
161
162
163
164
165void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
166{
167 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
168
169 if (!l2pad)
170 return;
171
172 skb_push(skb, l2pad);
173 memmove(skb->data, skb->data + l2pad, hdr_len);
174}
175
176void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
177{
178 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
179
180 if (!l2pad)
181 return;
182
183 memmove(skb->data + l2pad, skb->data, hdr_len);
184 skb_pull(skb, l2pad);
185}
186
187static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
188 struct sk_buff *skb,
189 struct txentry_desc *txdesc)
190{
191 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
192 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
193 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
194 u16 seqno;
195
196 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
197 return;
198
199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
200
201 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
202
203
204
205
206
207
208
209 if (ieee80211_is_beacon(hdr->frame_control)) {
210 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
211
212 return;
213 }
214
215 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
216 }
217
218
219
220
221
222
223
224
225
226
227
228
229 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
230 seqno = atomic_add_return(0x10, &intf->seqno);
231 else
232 seqno = atomic_read(&intf->seqno);
233
234 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
235 hdr->seq_ctrl |= cpu_to_le16(seqno);
236}
237
238static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
239 struct sk_buff *skb,
240 struct txentry_desc *txdesc,
241 const struct rt2x00_rate *hwrate)
242{
243 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
244 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
245 unsigned int data_length;
246 unsigned int duration;
247 unsigned int residual;
248
249
250
251
252
253
254 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
255 txdesc->u.plcp.ifs = IFS_BACKOFF;
256 else
257 txdesc->u.plcp.ifs = IFS_SIFS;
258
259
260 data_length = skb->len + 4;
261 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
262
263
264
265
266
267 txdesc->u.plcp.signal = hwrate->plcp;
268 txdesc->u.plcp.service = 0x04;
269
270 if (hwrate->flags & DEV_RATE_OFDM) {
271 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
272 txdesc->u.plcp.length_low = data_length & 0x3f;
273 } else {
274
275
276
277 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
278 duration = GET_DURATION(data_length, hwrate->bitrate);
279
280 if (residual != 0) {
281 duration++;
282
283
284
285
286 if (hwrate->bitrate == 110 && residual <= 30)
287 txdesc->u.plcp.service |= 0x80;
288 }
289
290 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
291 txdesc->u.plcp.length_low = duration & 0xff;
292
293
294
295
296
297 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
298 txdesc->u.plcp.signal |= 0x08;
299 }
300}
301
302static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
303 struct sk_buff *skb,
304 struct txentry_desc *txdesc,
305 struct ieee80211_sta *sta,
306 const struct rt2x00_rate *hwrate)
307{
308 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
309 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
310 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
311 struct rt2x00_sta *sta_priv = NULL;
312 u8 density = 0;
313
314 if (sta) {
315 sta_priv = sta_to_rt2x00_sta(sta);
316 txdesc->u.ht.wcid = sta_priv->wcid;
317 density = sta->ht_cap.ampdu_density;
318 }
319
320
321
322
323
324 if (txrate->flags & IEEE80211_TX_RC_MCS) {
325 txdesc->u.ht.mcs = txrate->idx;
326
327
328
329
330
331 if (sta && txdesc->u.ht.mcs > 7 &&
332 sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
333 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
334 } else {
335 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
336 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
337 txdesc->u.ht.mcs |= 0x08;
338 }
339
340 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
341 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
342 txdesc->u.ht.txop = TXOP_SIFS;
343 else
344 txdesc->u.ht.txop = TXOP_BACKOFF;
345
346
347 return;
348 }
349
350
351
352
353 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
354 txdesc->u.ht.stbc = 1;
355
356
357
358
359
360 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
361 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
362 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
363 txdesc->u.ht.mpdu_density = density;
364 txdesc->u.ht.ba_size = 7;
365 }
366
367
368
369
370
371 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
372 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
373 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
374 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
375 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
376
377
378
379
380
381
382
383
384
385
386 if (ieee80211_is_mgmt(hdr->frame_control) &&
387 !ieee80211_is_beacon(hdr->frame_control))
388 txdesc->u.ht.txop = TXOP_BACKOFF;
389 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
390 txdesc->u.ht.txop = TXOP_SIFS;
391 else
392 txdesc->u.ht.txop = TXOP_HTTXOP;
393}
394
395static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
396 struct sk_buff *skb,
397 struct txentry_desc *txdesc,
398 struct ieee80211_sta *sta)
399{
400 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
401 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
402 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
403 struct ieee80211_rate *rate;
404 const struct rt2x00_rate *hwrate = NULL;
405
406 memset(txdesc, 0, sizeof(*txdesc));
407
408
409
410
411 txdesc->length = skb->len;
412 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
413
414
415
416
417 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
418 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
419
420
421
422
423 if (ieee80211_is_rts(hdr->frame_control) ||
424 ieee80211_is_cts(hdr->frame_control)) {
425 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
426 if (ieee80211_is_rts(hdr->frame_control))
427 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
428 else
429 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
430 if (tx_info->control.rts_cts_rate_idx >= 0)
431 rate =
432 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
433 }
434
435
436
437
438 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
439 if (txdesc->retry_limit >= rt2x00dev->long_retry)
440 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
441
442
443
444
445 if (ieee80211_has_morefrags(hdr->frame_control)) {
446 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
447 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
448 }
449
450
451
452
453 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
454 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
455
456
457
458
459
460 if ((ieee80211_is_beacon(hdr->frame_control) ||
461 ieee80211_is_probe_resp(hdr->frame_control)) &&
462 !(tx_info->flags & IEEE80211_TX_CTL_INJECTED))
463 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
464
465 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
466 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
467 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
468
469
470
471
472 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
473 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
474 else if (txrate->flags & IEEE80211_TX_RC_MCS)
475 txdesc->rate_mode = RATE_MODE_HT_MIX;
476 else {
477 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
478 hwrate = rt2x00_get_rate(rate->hw_value);
479 if (hwrate->flags & DEV_RATE_OFDM)
480 txdesc->rate_mode = RATE_MODE_OFDM;
481 else
482 txdesc->rate_mode = RATE_MODE_CCK;
483 }
484
485
486
487
488 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
489 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
490
491 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
492 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
493 sta, hwrate);
494 else
495 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
496 hwrate);
497}
498
499static int rt2x00queue_write_tx_data(struct queue_entry *entry,
500 struct txentry_desc *txdesc)
501{
502 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
503
504
505
506
507
508
509 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
510 rt2x00dev->ops->lib->get_entry_state(entry))) {
511 rt2x00_err(rt2x00dev,
512 "Corrupt queue %d, accessing entry which is not ours\n"
513 "Please file bug report to %s\n",
514 entry->queue->qid, DRV_PROJECT);
515 return -EINVAL;
516 }
517
518
519
520
521 skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
522 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
523
524
525
526
527 if (rt2x00dev->ops->lib->write_tx_data)
528 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
529
530
531
532
533 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
534 rt2x00queue_map_txskb(entry))
535 return -ENOMEM;
536
537 return 0;
538}
539
540static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
541 struct txentry_desc *txdesc)
542{
543 struct data_queue *queue = entry->queue;
544
545 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
546
547
548
549
550
551 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
552}
553
554static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
555 struct txentry_desc *txdesc)
556{
557
558
559
560
561
562
563
564
565
566 if (rt2x00queue_threshold(queue) ||
567 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
568 queue->rt2x00dev->ops->lib->kick_queue(queue);
569}
570
571static void rt2x00queue_bar_check(struct queue_entry *entry)
572{
573 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
574 struct ieee80211_bar *bar = (void *) (entry->skb->data +
575 rt2x00dev->extra_tx_headroom);
576 struct rt2x00_bar_list_entry *bar_entry;
577
578 if (likely(!ieee80211_is_back_req(bar->frame_control)))
579 return;
580
581 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
582
583
584
585
586
587
588 if (!bar_entry)
589 return;
590
591 bar_entry->entry = entry;
592 bar_entry->block_acked = 0;
593
594
595
596
597
598
599
600 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
601 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
602 bar_entry->control = bar->control;
603 bar_entry->start_seq_num = bar->start_seq_num;
604
605
606
607
608 spin_lock_bh(&rt2x00dev->bar_list_lock);
609 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
610 spin_unlock_bh(&rt2x00dev->bar_list_lock);
611}
612
613int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
614 struct ieee80211_sta *sta, bool local)
615{
616 struct ieee80211_tx_info *tx_info;
617 struct queue_entry *entry;
618 struct txentry_desc txdesc;
619 struct skb_frame_desc *skbdesc;
620 u8 rate_idx, rate_flags;
621 int ret = 0;
622
623
624
625
626
627
628 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
629
630
631
632
633
634
635 tx_info = IEEE80211_SKB_CB(skb);
636 rate_idx = tx_info->control.rates[0].idx;
637 rate_flags = tx_info->control.rates[0].flags;
638 skbdesc = get_skb_frame_desc(skb);
639 memset(skbdesc, 0, sizeof(*skbdesc));
640 skbdesc->tx_rate_idx = rate_idx;
641 skbdesc->tx_rate_flags = rate_flags;
642
643 if (local)
644 skbdesc->flags |= SKBDESC_NOT_MAC80211;
645
646
647
648
649
650
651 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
652 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
653 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
654 rt2x00crypto_tx_copy_iv(skb, &txdesc);
655 else
656 rt2x00crypto_tx_remove_iv(skb, &txdesc);
657 }
658
659
660
661
662
663
664
665
666
667 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
668 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
669 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
670 rt2x00queue_align_frame(skb);
671
672
673
674
675 spin_lock(&queue->tx_lock);
676
677 if (unlikely(rt2x00queue_full(queue))) {
678 rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
679 queue->qid);
680 ret = -ENOBUFS;
681 goto out;
682 }
683
684 entry = rt2x00queue_get_entry(queue, Q_INDEX);
685
686 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
687 &entry->flags))) {
688 rt2x00_err(queue->rt2x00dev,
689 "Arrived at non-free entry in the non-full queue %d\n"
690 "Please file bug report to %s\n",
691 queue->qid, DRV_PROJECT);
692 ret = -EINVAL;
693 goto out;
694 }
695
696 entry->skb = skb;
697
698
699
700
701
702
703 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
704 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
705 entry->skb = NULL;
706 ret = -EIO;
707 goto out;
708 }
709
710
711
712
713 rt2x00queue_bar_check(entry);
714
715 set_bit(ENTRY_DATA_PENDING, &entry->flags);
716
717 rt2x00queue_index_inc(entry, Q_INDEX);
718 rt2x00queue_write_tx_descriptor(entry, &txdesc);
719 rt2x00queue_kick_tx_queue(queue, &txdesc);
720
721out:
722
723
724
725
726
727 if (rt2x00queue_threshold(queue))
728 rt2x00queue_pause_queue(queue);
729
730 spin_unlock(&queue->tx_lock);
731 return ret;
732}
733
734int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
735 struct ieee80211_vif *vif)
736{
737 struct rt2x00_intf *intf = vif_to_intf(vif);
738
739 if (unlikely(!intf->beacon))
740 return -ENOBUFS;
741
742
743
744
745 rt2x00queue_free_skb(intf->beacon);
746
747
748
749
750
751 if (rt2x00dev->ops->lib->clear_beacon)
752 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
753
754 return 0;
755}
756
757int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
758 struct ieee80211_vif *vif)
759{
760 struct rt2x00_intf *intf = vif_to_intf(vif);
761 struct skb_frame_desc *skbdesc;
762 struct txentry_desc txdesc;
763
764 if (unlikely(!intf->beacon))
765 return -ENOBUFS;
766
767
768
769
770 rt2x00queue_free_skb(intf->beacon);
771
772 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
773 if (!intf->beacon->skb)
774 return -ENOMEM;
775
776
777
778
779
780
781 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
782
783
784
785
786 skbdesc = get_skb_frame_desc(intf->beacon->skb);
787 memset(skbdesc, 0, sizeof(*skbdesc));
788
789
790
791
792 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
793
794 return 0;
795
796}
797
798bool rt2x00queue_for_each_entry(struct data_queue *queue,
799 enum queue_index start,
800 enum queue_index end,
801 void *data,
802 bool (*fn)(struct queue_entry *entry,
803 void *data))
804{
805 unsigned long irqflags;
806 unsigned int index_start;
807 unsigned int index_end;
808 unsigned int i;
809
810 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
811 rt2x00_err(queue->rt2x00dev,
812 "Entry requested from invalid index range (%d - %d)\n",
813 start, end);
814 return true;
815 }
816
817
818
819
820
821
822
823 spin_lock_irqsave(&queue->index_lock, irqflags);
824 index_start = queue->index[start];
825 index_end = queue->index[end];
826 spin_unlock_irqrestore(&queue->index_lock, irqflags);
827
828
829
830
831
832 if (index_start < index_end) {
833 for (i = index_start; i < index_end; i++) {
834 if (fn(&queue->entries[i], data))
835 return true;
836 }
837 } else {
838 for (i = index_start; i < queue->limit; i++) {
839 if (fn(&queue->entries[i], data))
840 return true;
841 }
842
843 for (i = 0; i < index_end; i++) {
844 if (fn(&queue->entries[i], data))
845 return true;
846 }
847 }
848
849 return false;
850}
851EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
852
853struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
854 enum queue_index index)
855{
856 struct queue_entry *entry;
857 unsigned long irqflags;
858
859 if (unlikely(index >= Q_INDEX_MAX)) {
860 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
861 index);
862 return NULL;
863 }
864
865 spin_lock_irqsave(&queue->index_lock, irqflags);
866
867 entry = &queue->entries[queue->index[index]];
868
869 spin_unlock_irqrestore(&queue->index_lock, irqflags);
870
871 return entry;
872}
873EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
874
875void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
876{
877 struct data_queue *queue = entry->queue;
878 unsigned long irqflags;
879
880 if (unlikely(index >= Q_INDEX_MAX)) {
881 rt2x00_err(queue->rt2x00dev,
882 "Index change on invalid index type (%d)\n", index);
883 return;
884 }
885
886 spin_lock_irqsave(&queue->index_lock, irqflags);
887
888 queue->index[index]++;
889 if (queue->index[index] >= queue->limit)
890 queue->index[index] = 0;
891
892 entry->last_action = jiffies;
893
894 if (index == Q_INDEX) {
895 queue->length++;
896 } else if (index == Q_INDEX_DONE) {
897 queue->length--;
898 queue->count++;
899 }
900
901 spin_unlock_irqrestore(&queue->index_lock, irqflags);
902}
903
904static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
905{
906 switch (queue->qid) {
907 case QID_AC_VO:
908 case QID_AC_VI:
909 case QID_AC_BE:
910 case QID_AC_BK:
911
912
913
914
915 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
916 break;
917 default:
918 break;
919 }
920}
921void rt2x00queue_pause_queue(struct data_queue *queue)
922{
923 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
924 !test_bit(QUEUE_STARTED, &queue->flags) ||
925 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
926 return;
927
928 rt2x00queue_pause_queue_nocheck(queue);
929}
930EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
931
932void rt2x00queue_unpause_queue(struct data_queue *queue)
933{
934 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
935 !test_bit(QUEUE_STARTED, &queue->flags) ||
936 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
937 return;
938
939 switch (queue->qid) {
940 case QID_AC_VO:
941 case QID_AC_VI:
942 case QID_AC_BE:
943 case QID_AC_BK:
944
945
946
947
948 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
949 break;
950 case QID_RX:
951
952
953
954
955 queue->rt2x00dev->ops->lib->kick_queue(queue);
956 break;
957 default:
958 break;
959 }
960}
961EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
962
963void rt2x00queue_start_queue(struct data_queue *queue)
964{
965 mutex_lock(&queue->status_lock);
966
967 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
968 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
969 mutex_unlock(&queue->status_lock);
970 return;
971 }
972
973 set_bit(QUEUE_PAUSED, &queue->flags);
974
975 queue->rt2x00dev->ops->lib->start_queue(queue);
976
977 rt2x00queue_unpause_queue(queue);
978
979 mutex_unlock(&queue->status_lock);
980}
981EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
982
983void rt2x00queue_stop_queue(struct data_queue *queue)
984{
985 mutex_lock(&queue->status_lock);
986
987 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
988 mutex_unlock(&queue->status_lock);
989 return;
990 }
991
992 rt2x00queue_pause_queue_nocheck(queue);
993
994 queue->rt2x00dev->ops->lib->stop_queue(queue);
995
996 mutex_unlock(&queue->status_lock);
997}
998EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
999
1000void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1001{
1002 bool tx_queue =
1003 (queue->qid == QID_AC_VO) ||
1004 (queue->qid == QID_AC_VI) ||
1005 (queue->qid == QID_AC_BE) ||
1006 (queue->qid == QID_AC_BK);
1007
1008 if (rt2x00queue_empty(queue))
1009 return;
1010
1011
1012
1013
1014
1015
1016
1017 if (!drop && tx_queue)
1018 queue->rt2x00dev->ops->lib->kick_queue(queue);
1019
1020
1021
1022
1023
1024
1025 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1026 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1027
1028
1029
1030
1031 if (unlikely(!rt2x00queue_empty(queue)))
1032 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1033 queue->qid);
1034}
1035EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1036
1037void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1038{
1039 struct data_queue *queue;
1040
1041
1042
1043
1044
1045 tx_queue_for_each(rt2x00dev, queue)
1046 rt2x00queue_start_queue(queue);
1047
1048 rt2x00queue_start_queue(rt2x00dev->rx);
1049}
1050EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1051
1052void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1053{
1054 struct data_queue *queue;
1055
1056
1057
1058
1059
1060
1061
1062 ieee80211_stop_queues(rt2x00dev->hw);
1063
1064 tx_queue_for_each(rt2x00dev, queue)
1065 rt2x00queue_stop_queue(queue);
1066
1067 rt2x00queue_stop_queue(rt2x00dev->rx);
1068}
1069EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1070
1071void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1072{
1073 struct data_queue *queue;
1074
1075 tx_queue_for_each(rt2x00dev, queue)
1076 rt2x00queue_flush_queue(queue, drop);
1077
1078 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1079}
1080EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1081
1082static void rt2x00queue_reset(struct data_queue *queue)
1083{
1084 unsigned long irqflags;
1085 unsigned int i;
1086
1087 spin_lock_irqsave(&queue->index_lock, irqflags);
1088
1089 queue->count = 0;
1090 queue->length = 0;
1091
1092 for (i = 0; i < Q_INDEX_MAX; i++)
1093 queue->index[i] = 0;
1094
1095 spin_unlock_irqrestore(&queue->index_lock, irqflags);
1096}
1097
1098void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1099{
1100 struct data_queue *queue;
1101 unsigned int i;
1102
1103 queue_for_each(rt2x00dev, queue) {
1104 rt2x00queue_reset(queue);
1105
1106 for (i = 0; i < queue->limit; i++)
1107 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1108 }
1109}
1110
1111static int rt2x00queue_alloc_entries(struct data_queue *queue)
1112{
1113 struct queue_entry *entries;
1114 unsigned int entry_size;
1115 unsigned int i;
1116
1117 rt2x00queue_reset(queue);
1118
1119
1120
1121
1122 entry_size = sizeof(*entries) + queue->priv_size;
1123 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1124 if (!entries)
1125 return -ENOMEM;
1126
1127#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1128 (((char *)(__base)) + ((__limit) * (__esize)) + \
1129 ((__index) * (__psize)))
1130
1131 for (i = 0; i < queue->limit; i++) {
1132 entries[i].flags = 0;
1133 entries[i].queue = queue;
1134 entries[i].skb = NULL;
1135 entries[i].entry_idx = i;
1136 entries[i].priv_data =
1137 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1138 sizeof(*entries), queue->priv_size);
1139 }
1140
1141#undef QUEUE_ENTRY_PRIV_OFFSET
1142
1143 queue->entries = entries;
1144
1145 return 0;
1146}
1147
1148static void rt2x00queue_free_skbs(struct data_queue *queue)
1149{
1150 unsigned int i;
1151
1152 if (!queue->entries)
1153 return;
1154
1155 for (i = 0; i < queue->limit; i++) {
1156 rt2x00queue_free_skb(&queue->entries[i]);
1157 }
1158}
1159
1160static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1161{
1162 unsigned int i;
1163 struct sk_buff *skb;
1164
1165 for (i = 0; i < queue->limit; i++) {
1166 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1167 if (!skb)
1168 return -ENOMEM;
1169 queue->entries[i].skb = skb;
1170 }
1171
1172 return 0;
1173}
1174
1175int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1176{
1177 struct data_queue *queue;
1178 int status;
1179
1180 status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1181 if (status)
1182 goto exit;
1183
1184 tx_queue_for_each(rt2x00dev, queue) {
1185 status = rt2x00queue_alloc_entries(queue);
1186 if (status)
1187 goto exit;
1188 }
1189
1190 status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1191 if (status)
1192 goto exit;
1193
1194 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
1195 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1196 if (status)
1197 goto exit;
1198 }
1199
1200 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1201 if (status)
1202 goto exit;
1203
1204 return 0;
1205
1206exit:
1207 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1208
1209 rt2x00queue_uninitialize(rt2x00dev);
1210
1211 return status;
1212}
1213
1214void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1215{
1216 struct data_queue *queue;
1217
1218 rt2x00queue_free_skbs(rt2x00dev->rx);
1219
1220 queue_for_each(rt2x00dev, queue) {
1221 kfree(queue->entries);
1222 queue->entries = NULL;
1223 }
1224}
1225
1226static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1227 struct data_queue *queue, enum data_queue_qid qid)
1228{
1229 mutex_init(&queue->status_lock);
1230 spin_lock_init(&queue->tx_lock);
1231 spin_lock_init(&queue->index_lock);
1232
1233 queue->rt2x00dev = rt2x00dev;
1234 queue->qid = qid;
1235 queue->txop = 0;
1236 queue->aifs = 2;
1237 queue->cw_min = 5;
1238 queue->cw_max = 10;
1239
1240 rt2x00dev->ops->queue_init(queue);
1241
1242 queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1243}
1244
1245int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1246{
1247 struct data_queue *queue;
1248 enum data_queue_qid qid;
1249 unsigned int req_atim =
1250 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
1251
1252
1253
1254
1255
1256
1257
1258
1259 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1260
1261 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1262 if (!queue)
1263 return -ENOMEM;
1264
1265
1266
1267
1268 rt2x00dev->rx = queue;
1269 rt2x00dev->tx = &queue[1];
1270 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1271 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1283
1284 qid = QID_AC_VO;
1285 tx_queue_for_each(rt2x00dev, queue)
1286 rt2x00queue_init(rt2x00dev, queue, qid++);
1287
1288 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1289 if (req_atim)
1290 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1291
1292 return 0;
1293}
1294
1295void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1296{
1297 kfree(rt2x00dev->rx);
1298 rt2x00dev->rx = NULL;
1299 rt2x00dev->tx = NULL;
1300 rt2x00dev->bcn = NULL;
1301}
1302