1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include "ath5k.h"
26#include "reg.h"
27#include "debug.h"
28#include <linux/log2.h>
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62u32
63ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
64{
65 u32 pending;
66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
67
68
69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
70 return false;
71
72
73 if (ah->ah_version == AR5K_AR5210)
74 return false;
75
76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
77 pending &= AR5K_QCU_STS_FRMPENDCNT;
78
79
80
81
82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
83 return true;
84
85 return pending;
86}
87
88
89
90
91
92
93void
94ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
95{
96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
97 return;
98
99
100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
101
102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
103}
104
105
106
107
108
109
110
111static u16
112ath5k_cw_validate(u16 cw_req)
113{
114 cw_req = min(cw_req, (u16)1023);
115
116
117 if (is_power_of_2(cw_req + 1))
118 return cw_req;
119
120
121 if (is_power_of_2(cw_req))
122 return cw_req - 1;
123
124
125
126 cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
127
128 return cw_req;
129}
130
131
132
133
134
135
136
137int
138ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
139 struct ath5k_txq_info *queue_info)
140{
141 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
142 return 0;
143}
144
145
146
147
148
149
150
151
152
153int
154ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
155 const struct ath5k_txq_info *qinfo)
156{
157 struct ath5k_txq_info *qi;
158
159 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
160
161 qi = &ah->ah_txq[queue];
162
163 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
164 return -EIO;
165
166
167 qi->tqi_type = qinfo->tqi_type;
168 qi->tqi_subtype = qinfo->tqi_subtype;
169 qi->tqi_flags = qinfo->tqi_flags;
170
171
172
173
174
175 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
176 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
177 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
178 qi->tqi_cbr_period = qinfo->tqi_cbr_period;
179 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
180 qi->tqi_burst_time = qinfo->tqi_burst_time;
181 qi->tqi_ready_time = qinfo->tqi_ready_time;
182
183
184
185 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
186 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
187 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
188 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
189 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
190
191 return 0;
192}
193
194
195
196
197
198
199
200
201
202int
203ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
204 struct ath5k_txq_info *queue_info)
205{
206 unsigned int queue;
207 int ret;
208
209
210
211
212
213 if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
214 switch (queue_type) {
215 case AR5K_TX_QUEUE_DATA:
216 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
217 break;
218 case AR5K_TX_QUEUE_BEACON:
219 case AR5K_TX_QUEUE_CAB:
220 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
221 break;
222 default:
223 return -EINVAL;
224 }
225 } else {
226 switch (queue_type) {
227 case AR5K_TX_QUEUE_DATA:
228 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
229 ah->ah_txq[queue].tqi_type !=
230 AR5K_TX_QUEUE_INACTIVE; queue++) {
231
232 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
233 return -EINVAL;
234 }
235 break;
236 case AR5K_TX_QUEUE_UAPSD:
237 queue = AR5K_TX_QUEUE_ID_UAPSD;
238 break;
239 case AR5K_TX_QUEUE_BEACON:
240 queue = AR5K_TX_QUEUE_ID_BEACON;
241 break;
242 case AR5K_TX_QUEUE_CAB:
243 queue = AR5K_TX_QUEUE_ID_CAB;
244 break;
245 default:
246 return -EINVAL;
247 }
248 }
249
250
251
252
253 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
254 ah->ah_txq[queue].tqi_type = queue_type;
255
256 if (queue_info != NULL) {
257 queue_info->tqi_type = queue_type;
258 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
259 if (ret)
260 return ret;
261 }
262
263
264
265
266
267
268 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
269
270 return queue;
271}
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286void
287ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
288 unsigned int queue)
289{
290
291 if (ah->ah_version == AR5K_AR5210) {
292 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
293
294 if (queue > 0)
295 return;
296
297 ath5k_hw_reg_write(ah,
298 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
299 | AR5K_REG_SM(ah->ah_retry_long,
300 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
301 | AR5K_REG_SM(ah->ah_retry_short,
302 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
303 | AR5K_REG_SM(ah->ah_retry_long,
304 AR5K_NODCU_RETRY_LMT_LG_RETRY)
305 | AR5K_REG_SM(ah->ah_retry_short,
306 AR5K_NODCU_RETRY_LMT_SH_RETRY),
307 AR5K_NODCU_RETRY_LMT);
308
309 } else {
310 ath5k_hw_reg_write(ah,
311 AR5K_REG_SM(ah->ah_retry_long,
312 AR5K_DCU_RETRY_LMT_RTS)
313 | AR5K_REG_SM(ah->ah_retry_long,
314 AR5K_DCU_RETRY_LMT_STA_RTS)
315 | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
316 AR5K_DCU_RETRY_LMT_STA_DATA),
317 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
318 }
319}
320
321
322
323
324
325
326
327
328
329int
330ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
331{
332 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
333
334 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
335
336 tq = &ah->ah_txq[queue];
337
338
339
340 if ((ah->ah_version == AR5K_AR5210) ||
341 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
342 return 0;
343
344
345
346
347
348 ath5k_hw_reg_write(ah,
349 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
350 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
351 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
352 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
353
354
355
356
357 ath5k_hw_set_tx_retry_limits(ah, queue);
358
359
360
361
362
363
364
365 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
366 AR5K_DCU_MISC_FRAG_WAIT);
367
368
369 if (ah->ah_mac_version < AR5K_SREV_AR5211)
370 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
371 AR5K_DCU_MISC_SEQNUM_CTL);
372
373
374 if (tq->tqi_cbr_period) {
375 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
376 AR5K_QCU_CBRCFG_INTVAL) |
377 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
378 AR5K_QCU_CBRCFG_ORN_THRES),
379 AR5K_QUEUE_CBRCFG(queue));
380
381 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
382 AR5K_QCU_MISC_FRSHED_CBR);
383
384 if (tq->tqi_cbr_overflow_limit)
385 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
386 AR5K_QCU_MISC_CBR_THRES_ENABLE);
387 }
388
389
390 if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
391 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
392 AR5K_QCU_RDYTIMECFG_INTVAL) |
393 AR5K_QCU_RDYTIMECFG_ENABLE,
394 AR5K_QUEUE_RDYTIMECFG(queue));
395
396 if (tq->tqi_burst_time) {
397 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
398 AR5K_DCU_CHAN_TIME_DUR) |
399 AR5K_DCU_CHAN_TIME_ENABLE,
400 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
401
402 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
403 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
404 AR5K_QCU_MISC_RDY_VEOL_POLICY);
405 }
406
407
408 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
409 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
410 AR5K_QUEUE_DFS_MISC(queue));
411
412
413 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
414 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
415 AR5K_QUEUE_DFS_MISC(queue));
416
417
418
419
420 switch (tq->tqi_type) {
421 case AR5K_TX_QUEUE_BEACON:
422 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
423 AR5K_QCU_MISC_FRSHED_DBA_GT |
424 AR5K_QCU_MISC_CBREXP_BCN_DIS |
425 AR5K_QCU_MISC_BCN_ENABLE);
426
427 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
428 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
429 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
430 AR5K_DCU_MISC_ARBLOCK_IGNORE |
431 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
432 AR5K_DCU_MISC_BCN_ENABLE);
433 break;
434
435 case AR5K_TX_QUEUE_CAB:
436
437 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
438 AR5K_QCU_MISC_FRSHED_DBA_GT |
439 AR5K_QCU_MISC_CBREXP_DIS |
440 AR5K_QCU_MISC_CBREXP_BCN_DIS);
441
442 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
443 (AR5K_TUNE_SW_BEACON_RESP -
444 AR5K_TUNE_DMA_BEACON_RESP) -
445 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
446 AR5K_QCU_RDYTIMECFG_ENABLE,
447 AR5K_QUEUE_RDYTIMECFG(queue));
448
449 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
450 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
451 AR5K_DCU_MISC_ARBLOCK_CTL_S));
452 break;
453
454 case AR5K_TX_QUEUE_UAPSD:
455 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
456 AR5K_QCU_MISC_CBREXP_DIS);
457 break;
458
459 case AR5K_TX_QUEUE_DATA:
460 default:
461 break;
462 }
463
464
465
466
467
468
469
470 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
471 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
472
473 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
474 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
475
476 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
477 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
478
479 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
480 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
481
482 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
483 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
484
485 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
486 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
487
488 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
489 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
490
491 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
492 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
493
494 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
495 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
496
497
498
499
500 ah->ah_txq_imr_txok &= ah->ah_txq_status;
501 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
502 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
503 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
504 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
505 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
506 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
507 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
508 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
509
510 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
511 AR5K_SIMR0_QCU_TXOK) |
512 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
513 AR5K_SIMR0_QCU_TXDESC),
514 AR5K_SIMR0);
515
516 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
517 AR5K_SIMR1_QCU_TXERR) |
518 AR5K_REG_SM(ah->ah_txq_imr_txeol,
519 AR5K_SIMR1_QCU_TXEOL),
520 AR5K_SIMR1);
521
522
523 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
524 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
525 AR5K_REG_SM(ah->ah_txq_imr_txurn,
526 AR5K_SIMR2_QCU_TXURN));
527
528 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
529 AR5K_SIMR3_QCBRORN) |
530 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
531 AR5K_SIMR3_QCBRURN),
532 AR5K_SIMR3);
533
534 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
535 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
536
537
538 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
539 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
540
541
542
543 if (ah->ah_txq_imr_nofrm == 0)
544 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
545
546
547 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
548
549 return 0;
550}
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
566{
567 struct ieee80211_channel *channel = ah->ah_current_channel;
568 enum ieee80211_band band;
569 struct ieee80211_supported_band *sband;
570 struct ieee80211_rate *rate;
571 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
572 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
573 u32 rate_flags, i;
574
575 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
576 return -EINVAL;
577
578 sifs = ath5k_hw_get_default_sifs(ah);
579 sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605 if (channel->band == IEEE80211_BAND_5GHZ)
606 band = IEEE80211_BAND_5GHZ;
607 else
608 band = IEEE80211_BAND_2GHZ;
609
610 switch (ah->ah_bwmode) {
611 case AR5K_BWMODE_5MHZ:
612 rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
613 break;
614 case AR5K_BWMODE_10MHZ:
615 rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
616 break;
617 default:
618 rate_flags = 0;
619 break;
620 }
621 sband = &ah->sbands[band];
622 rate = NULL;
623 for (i = 0; i < sband->n_bitrates; i++) {
624 if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
625 continue;
626 rate = &sband->bitrates[i];
627 break;
628 }
629 if (WARN_ON(!rate))
630 return -EINVAL;
631
632 ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
633
634
635 eifs = ack_tx_time + sifs + 2 * slot_time;
636 eifs_clock = ath5k_hw_htoclock(ah, eifs);
637
638
639 if (ah->ah_version == AR5K_AR5210) {
640 u32 pifs, pifs_clock, difs, difs_clock;
641
642
643 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
644
645
646 eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
647
648
649 pifs = slot_time + sifs;
650 pifs_clock = ath5k_hw_htoclock(ah, pifs);
651 pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
652
653
654 difs = sifs + 2 * slot_time;
655 difs_clock = ath5k_hw_htoclock(ah, difs);
656
657
658 ath5k_hw_reg_write(ah, (difs_clock <<
659 AR5K_IFS0_DIFS_S) | sifs_clock,
660 AR5K_IFS0);
661
662
663 ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
664 (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
665 AR5K_IFS1);
666
667 return 0;
668 }
669
670
671 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
672
673
674 ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
675
676
677 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
678 AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
679 sifs);
680
681
682 ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
683
684 return 0;
685}
686
687
688
689
690
691
692
693
694
695int
696ath5k_hw_init_queues(struct ath5k_hw *ah)
697{
698 int i, ret;
699
700
701
702
703
704
705
706
707
708 if (ah->ah_version != AR5K_AR5210)
709 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
710 ret = ath5k_hw_reset_tx_queue(ah, i);
711 if (ret) {
712 ATH5K_ERR(ah,
713 "failed to reset TX queue #%d\n", i);
714 return ret;
715 }
716 }
717 else
718
719
720
721 ath5k_hw_set_tx_retry_limits(ah, 0);
722
723
724 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
725 AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
726 AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
727
728
729
730
731 if (!ah->ah_coverage_class) {
732 unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
733 ath5k_hw_set_ifs_intervals(ah, slot_time);
734 }
735
736 return 0;
737}
738