1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/slab.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/dma-mapping.h>
32
33#include "rt2x00.h"
34#include "rt2x00lib.h"
35
36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
37{
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
39 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc;
41 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
44
45
46
47
48
49 frame_size = entry->queue->data_size + entry->queue->desc_size;
50
51
52
53
54
55
56 head_size = 4;
57
58
59
60
61
62
63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
64 head_size += 8;
65 tail_size += 8;
66 }
67
68
69
70
71 skb = dev_alloc_skb(frame_size + head_size + tail_size);
72 if (!skb)
73 return NULL;
74
75
76
77
78
79 skb_reserve(skb, head_size);
80 skb_put(skb, frame_size);
81
82
83
84
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
88
89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data,
92 skb->len,
93 DMA_FROM_DEVICE);
94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
95 }
96
97 return skb;
98}
99
100void rt2x00queue_map_txskb(struct queue_entry *entry)
101{
102 struct device *dev = entry->queue->rt2x00dev->dev;
103 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
104
105 skbdesc->skb_dma =
106 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
107 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
108}
109EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
110
111void rt2x00queue_unmap_skb(struct queue_entry *entry)
112{
113 struct device *dev = entry->queue->rt2x00dev->dev;
114 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
115
116 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
117 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
118 DMA_FROM_DEVICE);
119 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
120 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
122 DMA_TO_DEVICE);
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
124 }
125}
126EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
127
128void rt2x00queue_free_skb(struct queue_entry *entry)
129{
130 if (!entry->skb)
131 return;
132
133 rt2x00queue_unmap_skb(entry);
134 dev_kfree_skb_any(entry->skb);
135 entry->skb = NULL;
136}
137
138void rt2x00queue_align_frame(struct sk_buff *skb)
139{
140 unsigned int frame_length = skb->len;
141 unsigned int align = ALIGN_SIZE(skb, 0);
142
143 if (!align)
144 return;
145
146 skb_push(skb, align);
147 memmove(skb->data, skb->data + align, frame_length);
148 skb_trim(skb, frame_length);
149}
150
151void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
152{
153 unsigned int payload_length = skb->len - header_length;
154 unsigned int header_align = ALIGN_SIZE(skb, 0);
155 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
156 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
157
158
159
160
161
162 if (payload_align > header_align)
163 header_align += 4;
164
165
166 if (!header_align)
167 return;
168
169
170 skb_push(skb, header_align);
171
172
173
174
175 memmove(skb->data, skb->data + header_align, header_length);
176
177
178 if (payload_length && payload_align)
179 memmove(skb->data + header_length + l2pad,
180 skb->data + header_length + l2pad + payload_align,
181 payload_length);
182
183
184 skb_trim(skb, header_length + l2pad + payload_length);
185}
186
187void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
188{
189
190
191
192
193 unsigned int l2pad = (skb->len > header_length) ?
194 L2PAD_SIZE(header_length) : 0;
195
196 if (!l2pad)
197 return;
198
199 memmove(skb->data + l2pad, skb->data, header_length);
200 skb_pull(skb, l2pad);
201}
202
203static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
204 struct sk_buff *skb,
205 struct txentry_desc *txdesc)
206{
207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
210
211 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
212 return;
213
214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
215
216 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
217 return;
218
219
220
221
222
223
224
225
226
227
228
229
230 spin_lock(&intf->seqlock);
231
232 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
233 intf->seqno += 0x10;
234 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
235 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
236
237 spin_unlock(&intf->seqlock);
238
239}
240
241static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
242 struct sk_buff *skb,
243 struct txentry_desc *txdesc,
244 const struct rt2x00_rate *hwrate)
245{
246 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
247 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
248 unsigned int data_length;
249 unsigned int duration;
250 unsigned int residual;
251
252
253
254
255
256
257 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
258 txdesc->u.plcp.ifs = IFS_BACKOFF;
259 else
260 txdesc->u.plcp.ifs = IFS_SIFS;
261
262
263 data_length = skb->len + 4;
264 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
265
266
267
268
269
270 txdesc->u.plcp.signal = hwrate->plcp;
271 txdesc->u.plcp.service = 0x04;
272
273 if (hwrate->flags & DEV_RATE_OFDM) {
274 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
275 txdesc->u.plcp.length_low = data_length & 0x3f;
276 } else {
277
278
279
280 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
281 duration = GET_DURATION(data_length, hwrate->bitrate);
282
283 if (residual != 0) {
284 duration++;
285
286
287
288
289 if (hwrate->bitrate == 110 && residual <= 30)
290 txdesc->u.plcp.service |= 0x80;
291 }
292
293 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
294 txdesc->u.plcp.length_low = duration & 0xff;
295
296
297
298
299
300 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
301 txdesc->u.plcp.signal |= 0x08;
302 }
303}
304
305static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
306 struct sk_buff *skb,
307 struct txentry_desc *txdesc,
308 const struct rt2x00_rate *hwrate)
309{
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
311 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
312 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
313 struct rt2x00_sta *sta_priv = NULL;
314
315 if (tx_info->control.sta) {
316 txdesc->u.ht.mpdu_density =
317 tx_info->control.sta->ht_cap.ampdu_density;
318
319 sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
320 txdesc->u.ht.wcid = sta_priv->wcid;
321 }
322
323 txdesc->u.ht.ba_size = 7;
324
325
326
327
328 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
329 txdesc->u.ht.stbc = 1;
330
331
332
333
334
335 if (txrate->flags & IEEE80211_TX_RC_MCS) {
336 txdesc->u.ht.mcs = txrate->idx;
337
338
339
340
341
342 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
343 ((tx_info->control.sta->ht_cap.cap &
344 IEEE80211_HT_CAP_SM_PS) >>
345 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
346 WLAN_HT_CAP_SM_PS_DYNAMIC)
347 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
348 } else {
349 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
350 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
351 txdesc->u.ht.mcs |= 0x08;
352 }
353
354
355
356
357
358 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
359 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
360 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
361
362
363
364
365
366 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
367 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
368 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
369 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
370 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
371
372
373
374
375
376
377
378
379
380
381 if (ieee80211_is_mgmt(hdr->frame_control) &&
382 !ieee80211_is_beacon(hdr->frame_control))
383 txdesc->u.ht.txop = TXOP_BACKOFF;
384 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
385 txdesc->u.ht.txop = TXOP_SIFS;
386 else
387 txdesc->u.ht.txop = TXOP_HTTXOP;
388}
389
390static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
391 struct sk_buff *skb,
392 struct txentry_desc *txdesc)
393{
394 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
395 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
396 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
397 struct ieee80211_rate *rate;
398 const struct rt2x00_rate *hwrate = NULL;
399
400 memset(txdesc, 0, sizeof(*txdesc));
401
402
403
404
405 txdesc->length = skb->len;
406 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
407
408
409
410
411 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
412 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
413
414
415
416
417 if (ieee80211_is_rts(hdr->frame_control) ||
418 ieee80211_is_cts(hdr->frame_control)) {
419 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
420 if (ieee80211_is_rts(hdr->frame_control))
421 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
422 else
423 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
424 if (tx_info->control.rts_cts_rate_idx >= 0)
425 rate =
426 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
427 }
428
429
430
431
432 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
433 if (txdesc->retry_limit >= rt2x00dev->long_retry)
434 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
435
436
437
438
439 if (ieee80211_has_morefrags(hdr->frame_control)) {
440 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
441 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
442 }
443
444
445
446
447 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
448 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
449
450
451
452
453
454 if (ieee80211_is_beacon(hdr->frame_control) ||
455 ieee80211_is_probe_resp(hdr->frame_control))
456 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
457
458 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
459 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
460 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
461
462
463
464
465 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
466 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
467 else if (txrate->flags & IEEE80211_TX_RC_MCS)
468 txdesc->rate_mode = RATE_MODE_HT_MIX;
469 else {
470 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
471 hwrate = rt2x00_get_rate(rate->hw_value);
472 if (hwrate->flags & DEV_RATE_OFDM)
473 txdesc->rate_mode = RATE_MODE_OFDM;
474 else
475 txdesc->rate_mode = RATE_MODE_CCK;
476 }
477
478
479
480
481 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
482 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
483
484 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
485 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
486 hwrate);
487 else
488 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
489 hwrate);
490}
491
492static int rt2x00queue_write_tx_data(struct queue_entry *entry,
493 struct txentry_desc *txdesc)
494{
495 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
496
497
498
499
500
501
502 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
503 rt2x00dev->ops->lib->get_entry_state(entry))) {
504 ERROR(rt2x00dev,
505 "Corrupt queue %d, accessing entry which is not ours.\n"
506 "Please file bug report to %s.\n",
507 entry->queue->qid, DRV_PROJECT);
508 return -EINVAL;
509 }
510
511
512
513
514 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
515 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
516
517
518
519
520 if (rt2x00dev->ops->lib->write_tx_data)
521 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
522
523
524
525
526 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
527 rt2x00queue_map_txskb(entry);
528
529 return 0;
530}
531
532static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
533 struct txentry_desc *txdesc)
534{
535 struct data_queue *queue = entry->queue;
536
537 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
538
539
540
541
542
543 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
544}
545
546static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
547 struct txentry_desc *txdesc)
548{
549
550
551
552
553
554
555
556
557
558 if (rt2x00queue_threshold(queue) ||
559 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
560 queue->rt2x00dev->ops->lib->kick_queue(queue);
561}
562
563int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
564 bool local)
565{
566 struct ieee80211_tx_info *tx_info;
567 struct queue_entry *entry;
568 struct txentry_desc txdesc;
569 struct skb_frame_desc *skbdesc;
570 u8 rate_idx, rate_flags;
571 int ret = 0;
572
573
574
575
576
577
578 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
579
580
581
582
583
584
585 tx_info = IEEE80211_SKB_CB(skb);
586 rate_idx = tx_info->control.rates[0].idx;
587 rate_flags = tx_info->control.rates[0].flags;
588 skbdesc = get_skb_frame_desc(skb);
589 memset(skbdesc, 0, sizeof(*skbdesc));
590 skbdesc->tx_rate_idx = rate_idx;
591 skbdesc->tx_rate_flags = rate_flags;
592
593 if (local)
594 skbdesc->flags |= SKBDESC_NOT_MAC80211;
595
596
597
598
599
600
601 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
602 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
603 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
604 rt2x00crypto_tx_copy_iv(skb, &txdesc);
605 else
606 rt2x00crypto_tx_remove_iv(skb, &txdesc);
607 }
608
609
610
611
612
613
614
615
616
617 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
618 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
619 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
620 rt2x00queue_align_frame(skb);
621
622
623
624
625 spin_lock(&queue->tx_lock);
626
627 if (unlikely(rt2x00queue_full(queue))) {
628 ERROR(queue->rt2x00dev,
629 "Dropping frame due to full tx queue %d.\n", queue->qid);
630 ret = -ENOBUFS;
631 goto out;
632 }
633
634 entry = rt2x00queue_get_entry(queue, Q_INDEX);
635
636 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
637 &entry->flags))) {
638 ERROR(queue->rt2x00dev,
639 "Arrived at non-free entry in the non-full queue %d.\n"
640 "Please file bug report to %s.\n",
641 queue->qid, DRV_PROJECT);
642 ret = -EINVAL;
643 goto out;
644 }
645
646 skbdesc->entry = entry;
647 entry->skb = skb;
648
649
650
651
652
653
654 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
655 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
656 entry->skb = NULL;
657 ret = -EIO;
658 goto out;
659 }
660
661 set_bit(ENTRY_DATA_PENDING, &entry->flags);
662
663 rt2x00queue_index_inc(entry, Q_INDEX);
664 rt2x00queue_write_tx_descriptor(entry, &txdesc);
665 rt2x00queue_kick_tx_queue(queue, &txdesc);
666
667out:
668 spin_unlock(&queue->tx_lock);
669 return ret;
670}
671
672int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
673 struct ieee80211_vif *vif)
674{
675 struct rt2x00_intf *intf = vif_to_intf(vif);
676
677 if (unlikely(!intf->beacon))
678 return -ENOBUFS;
679
680 mutex_lock(&intf->beacon_skb_mutex);
681
682
683
684
685 rt2x00queue_free_skb(intf->beacon);
686
687
688
689
690
691 if (rt2x00dev->ops->lib->clear_beacon)
692 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
693
694 mutex_unlock(&intf->beacon_skb_mutex);
695
696 return 0;
697}
698
699int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
700 struct ieee80211_vif *vif)
701{
702 struct rt2x00_intf *intf = vif_to_intf(vif);
703 struct skb_frame_desc *skbdesc;
704 struct txentry_desc txdesc;
705
706 if (unlikely(!intf->beacon))
707 return -ENOBUFS;
708
709
710
711
712 rt2x00queue_free_skb(intf->beacon);
713
714 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
715 if (!intf->beacon->skb)
716 return -ENOMEM;
717
718
719
720
721
722
723 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
724
725
726
727
728 skbdesc = get_skb_frame_desc(intf->beacon->skb);
729 memset(skbdesc, 0, sizeof(*skbdesc));
730 skbdesc->entry = intf->beacon;
731
732
733
734
735 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
736
737 return 0;
738
739}
740
741int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
742 struct ieee80211_vif *vif)
743{
744 struct rt2x00_intf *intf = vif_to_intf(vif);
745 int ret;
746
747 mutex_lock(&intf->beacon_skb_mutex);
748 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
749 mutex_unlock(&intf->beacon_skb_mutex);
750
751 return ret;
752}
753
754bool rt2x00queue_for_each_entry(struct data_queue *queue,
755 enum queue_index start,
756 enum queue_index end,
757 void *data,
758 bool (*fn)(struct queue_entry *entry,
759 void *data))
760{
761 unsigned long irqflags;
762 unsigned int index_start;
763 unsigned int index_end;
764 unsigned int i;
765
766 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
767 ERROR(queue->rt2x00dev,
768 "Entry requested from invalid index range (%d - %d)\n",
769 start, end);
770 return true;
771 }
772
773
774
775
776
777
778
779 spin_lock_irqsave(&queue->index_lock, irqflags);
780 index_start = queue->index[start];
781 index_end = queue->index[end];
782 spin_unlock_irqrestore(&queue->index_lock, irqflags);
783
784
785
786
787
788 if (index_start < index_end) {
789 for (i = index_start; i < index_end; i++) {
790 if (fn(&queue->entries[i], data))
791 return true;
792 }
793 } else {
794 for (i = index_start; i < queue->limit; i++) {
795 if (fn(&queue->entries[i], data))
796 return true;
797 }
798
799 for (i = 0; i < index_end; i++) {
800 if (fn(&queue->entries[i], data))
801 return true;
802 }
803 }
804
805 return false;
806}
807EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
808
809struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
810 enum queue_index index)
811{
812 struct queue_entry *entry;
813 unsigned long irqflags;
814
815 if (unlikely(index >= Q_INDEX_MAX)) {
816 ERROR(queue->rt2x00dev,
817 "Entry requested from invalid index type (%d)\n", index);
818 return NULL;
819 }
820
821 spin_lock_irqsave(&queue->index_lock, irqflags);
822
823 entry = &queue->entries[queue->index[index]];
824
825 spin_unlock_irqrestore(&queue->index_lock, irqflags);
826
827 return entry;
828}
829EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
830
831void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
832{
833 struct data_queue *queue = entry->queue;
834 unsigned long irqflags;
835
836 if (unlikely(index >= Q_INDEX_MAX)) {
837 ERROR(queue->rt2x00dev,
838 "Index change on invalid index type (%d)\n", index);
839 return;
840 }
841
842 spin_lock_irqsave(&queue->index_lock, irqflags);
843
844 queue->index[index]++;
845 if (queue->index[index] >= queue->limit)
846 queue->index[index] = 0;
847
848 entry->last_action = jiffies;
849
850 if (index == Q_INDEX) {
851 queue->length++;
852 } else if (index == Q_INDEX_DONE) {
853 queue->length--;
854 queue->count++;
855 }
856
857 spin_unlock_irqrestore(&queue->index_lock, irqflags);
858}
859
860void rt2x00queue_pause_queue(struct data_queue *queue)
861{
862 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
863 !test_bit(QUEUE_STARTED, &queue->flags) ||
864 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
865 return;
866
867 switch (queue->qid) {
868 case QID_AC_VO:
869 case QID_AC_VI:
870 case QID_AC_BE:
871 case QID_AC_BK:
872
873
874
875
876 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
877 break;
878 default:
879 break;
880 }
881}
882EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
883
884void rt2x00queue_unpause_queue(struct data_queue *queue)
885{
886 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
887 !test_bit(QUEUE_STARTED, &queue->flags) ||
888 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
889 return;
890
891 switch (queue->qid) {
892 case QID_AC_VO:
893 case QID_AC_VI:
894 case QID_AC_BE:
895 case QID_AC_BK:
896
897
898
899
900 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
901 break;
902 case QID_RX:
903
904
905
906
907 queue->rt2x00dev->ops->lib->kick_queue(queue);
908 default:
909 break;
910 }
911}
912EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
913
914void rt2x00queue_start_queue(struct data_queue *queue)
915{
916 mutex_lock(&queue->status_lock);
917
918 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
919 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
920 mutex_unlock(&queue->status_lock);
921 return;
922 }
923
924 set_bit(QUEUE_PAUSED, &queue->flags);
925
926 queue->rt2x00dev->ops->lib->start_queue(queue);
927
928 rt2x00queue_unpause_queue(queue);
929
930 mutex_unlock(&queue->status_lock);
931}
932EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
933
934void rt2x00queue_stop_queue(struct data_queue *queue)
935{
936 mutex_lock(&queue->status_lock);
937
938 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
939 mutex_unlock(&queue->status_lock);
940 return;
941 }
942
943 rt2x00queue_pause_queue(queue);
944
945 queue->rt2x00dev->ops->lib->stop_queue(queue);
946
947 mutex_unlock(&queue->status_lock);
948}
949EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
950
951void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
952{
953 bool started;
954 bool tx_queue =
955 (queue->qid == QID_AC_VO) ||
956 (queue->qid == QID_AC_VI) ||
957 (queue->qid == QID_AC_BE) ||
958 (queue->qid == QID_AC_BK);
959
960 mutex_lock(&queue->status_lock);
961
962
963
964
965
966
967
968
969 started = test_bit(QUEUE_STARTED, &queue->flags);
970 if (started) {
971
972
973
974 rt2x00queue_pause_queue(queue);
975
976
977
978
979
980
981
982 if (!drop && tx_queue)
983 queue->rt2x00dev->ops->lib->kick_queue(queue);
984 }
985
986
987
988
989
990
991 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
992 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
993
994
995
996
997 if (unlikely(!rt2x00queue_empty(queue)))
998 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
999
1000
1001
1002
1003 if (started)
1004 rt2x00queue_unpause_queue(queue);
1005
1006 mutex_unlock(&queue->status_lock);
1007}
1008EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1009
1010void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1011{
1012 struct data_queue *queue;
1013
1014
1015
1016
1017
1018 tx_queue_for_each(rt2x00dev, queue)
1019 rt2x00queue_start_queue(queue);
1020
1021 rt2x00queue_start_queue(rt2x00dev->rx);
1022}
1023EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1024
1025void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1026{
1027 struct data_queue *queue;
1028
1029
1030
1031
1032
1033
1034
1035 ieee80211_stop_queues(rt2x00dev->hw);
1036
1037 tx_queue_for_each(rt2x00dev, queue)
1038 rt2x00queue_stop_queue(queue);
1039
1040 rt2x00queue_stop_queue(rt2x00dev->rx);
1041}
1042EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1043
1044void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1045{
1046 struct data_queue *queue;
1047
1048 tx_queue_for_each(rt2x00dev, queue)
1049 rt2x00queue_flush_queue(queue, drop);
1050
1051 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1052}
1053EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1054
1055static void rt2x00queue_reset(struct data_queue *queue)
1056{
1057 unsigned long irqflags;
1058 unsigned int i;
1059
1060 spin_lock_irqsave(&queue->index_lock, irqflags);
1061
1062 queue->count = 0;
1063 queue->length = 0;
1064
1065 for (i = 0; i < Q_INDEX_MAX; i++)
1066 queue->index[i] = 0;
1067
1068 spin_unlock_irqrestore(&queue->index_lock, irqflags);
1069}
1070
1071void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1072{
1073 struct data_queue *queue;
1074 unsigned int i;
1075
1076 queue_for_each(rt2x00dev, queue) {
1077 rt2x00queue_reset(queue);
1078
1079 for (i = 0; i < queue->limit; i++)
1080 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1081 }
1082}
1083
1084static int rt2x00queue_alloc_entries(struct data_queue *queue,
1085 const struct data_queue_desc *qdesc)
1086{
1087 struct queue_entry *entries;
1088 unsigned int entry_size;
1089 unsigned int i;
1090
1091 rt2x00queue_reset(queue);
1092
1093 queue->limit = qdesc->entry_num;
1094 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1095 queue->data_size = qdesc->data_size;
1096 queue->desc_size = qdesc->desc_size;
1097
1098
1099
1100
1101 entry_size = sizeof(*entries) + qdesc->priv_size;
1102 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1103 if (!entries)
1104 return -ENOMEM;
1105
1106#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1107 (((char *)(__base)) + ((__limit) * (__esize)) + \
1108 ((__index) * (__psize)))
1109
1110 for (i = 0; i < queue->limit; i++) {
1111 entries[i].flags = 0;
1112 entries[i].queue = queue;
1113 entries[i].skb = NULL;
1114 entries[i].entry_idx = i;
1115 entries[i].priv_data =
1116 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1117 sizeof(*entries), qdesc->priv_size);
1118 }
1119
1120#undef QUEUE_ENTRY_PRIV_OFFSET
1121
1122 queue->entries = entries;
1123
1124 return 0;
1125}
1126
1127static void rt2x00queue_free_skbs(struct data_queue *queue)
1128{
1129 unsigned int i;
1130
1131 if (!queue->entries)
1132 return;
1133
1134 for (i = 0; i < queue->limit; i++) {
1135 rt2x00queue_free_skb(&queue->entries[i]);
1136 }
1137}
1138
1139static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1140{
1141 unsigned int i;
1142 struct sk_buff *skb;
1143
1144 for (i = 0; i < queue->limit; i++) {
1145 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
1146 if (!skb)
1147 return -ENOMEM;
1148 queue->entries[i].skb = skb;
1149 }
1150
1151 return 0;
1152}
1153
1154int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1155{
1156 struct data_queue *queue;
1157 int status;
1158
1159 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1160 if (status)
1161 goto exit;
1162
1163 tx_queue_for_each(rt2x00dev, queue) {
1164 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1165 if (status)
1166 goto exit;
1167 }
1168
1169 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1170 if (status)
1171 goto exit;
1172
1173 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1174 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1175 rt2x00dev->ops->atim);
1176 if (status)
1177 goto exit;
1178 }
1179
1180 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1181 if (status)
1182 goto exit;
1183
1184 return 0;
1185
1186exit:
1187 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1188
1189 rt2x00queue_uninitialize(rt2x00dev);
1190
1191 return status;
1192}
1193
1194void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1195{
1196 struct data_queue *queue;
1197
1198 rt2x00queue_free_skbs(rt2x00dev->rx);
1199
1200 queue_for_each(rt2x00dev, queue) {
1201 kfree(queue->entries);
1202 queue->entries = NULL;
1203 }
1204}
1205
1206static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1207 struct data_queue *queue, enum data_queue_qid qid)
1208{
1209 mutex_init(&queue->status_lock);
1210 spin_lock_init(&queue->tx_lock);
1211 spin_lock_init(&queue->index_lock);
1212
1213 queue->rt2x00dev = rt2x00dev;
1214 queue->qid = qid;
1215 queue->txop = 0;
1216 queue->aifs = 2;
1217 queue->cw_min = 5;
1218 queue->cw_max = 10;
1219}
1220
1221int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1222{
1223 struct data_queue *queue;
1224 enum data_queue_qid qid;
1225 unsigned int req_atim =
1226 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1227
1228
1229
1230
1231
1232
1233
1234
1235 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1236
1237 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1238 if (!queue) {
1239 ERROR(rt2x00dev, "Queue allocation failed.\n");
1240 return -ENOMEM;
1241 }
1242
1243
1244
1245
1246 rt2x00dev->rx = queue;
1247 rt2x00dev->tx = &queue[1];
1248 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1249 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1261
1262 qid = QID_AC_VO;
1263 tx_queue_for_each(rt2x00dev, queue)
1264 rt2x00queue_init(rt2x00dev, queue, qid++);
1265
1266 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1267 if (req_atim)
1268 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1269
1270 return 0;
1271}
1272
1273void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1274{
1275 kfree(rt2x00dev->rx);
1276 rt2x00dev->rx = NULL;
1277 rt2x00dev->tx = NULL;
1278 rt2x00dev->bcn = NULL;
1279}
1280