1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62#include <net/mac80211.h>
63
64#include "mvm.h"
65#include "sta.h"
66#include "rs.h"
67
68
69
70
71
72
73static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
74{
75 if (iwl_mvm_has_new_rx_api(mvm) ||
76 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
77 return sizeof(struct iwl_mvm_add_sta_cmd);
78 else
79 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
80}
81
82static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
83 enum nl80211_iftype iftype)
84{
85 int sta_id;
86 u32 reserved_ids = 0;
87
88 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
89 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
90
91 lockdep_assert_held(&mvm->mutex);
92
93
94 if (iftype != NL80211_IFTYPE_STATION)
95 reserved_ids = BIT(0);
96
97
98 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
99 if (BIT(sta_id) & reserved_ids)
100 continue;
101
102 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
103 lockdep_is_held(&mvm->mutex)))
104 return sta_id;
105 }
106 return IWL_MVM_INVALID_STA;
107}
108
109
110int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
111 bool update, unsigned int flags)
112{
113 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
114 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
115 .sta_id = mvm_sta->sta_id,
116 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
117 .add_modify = update ? 1 : 0,
118 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
119 STA_FLG_MIMO_EN_MSK |
120 STA_FLG_RTS_MIMO_PROT),
121 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
122 };
123 int ret;
124 u32 status;
125 u32 agg_size = 0, mpdu_dens = 0;
126
127 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
128 add_sta_cmd.station_type = mvm_sta->sta_type;
129
130 if (!update || (flags & STA_MODIFY_QUEUES)) {
131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
132
133 if (!iwl_mvm_has_new_tx_api(mvm)) {
134 add_sta_cmd.tfd_queue_msk =
135 cpu_to_le32(mvm_sta->tfd_queue_msk);
136
137 if (flags & STA_MODIFY_QUEUES)
138 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
139 } else {
140 WARN_ON(flags & STA_MODIFY_QUEUES);
141 }
142 }
143
144 switch (sta->bandwidth) {
145 case IEEE80211_STA_RX_BW_160:
146 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
147
148 case IEEE80211_STA_RX_BW_80:
149 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
150
151 case IEEE80211_STA_RX_BW_40:
152 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
153
154 case IEEE80211_STA_RX_BW_20:
155 if (sta->ht_cap.ht_supported)
156 add_sta_cmd.station_flags |=
157 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
158 break;
159 }
160
161 switch (sta->rx_nss) {
162 case 1:
163 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
164 break;
165 case 2:
166 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
167 break;
168 case 3 ... 8:
169 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
170 break;
171 }
172
173 switch (sta->smps_mode) {
174 case IEEE80211_SMPS_AUTOMATIC:
175 case IEEE80211_SMPS_NUM_MODES:
176 WARN_ON(1);
177 break;
178 case IEEE80211_SMPS_STATIC:
179
180 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
181 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
182 break;
183 case IEEE80211_SMPS_DYNAMIC:
184 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
185 break;
186 case IEEE80211_SMPS_OFF:
187
188 break;
189 }
190
191 if (sta->ht_cap.ht_supported) {
192 add_sta_cmd.station_flags_msk |=
193 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
194 STA_FLG_AGG_MPDU_DENS_MSK);
195
196 mpdu_dens = sta->ht_cap.ampdu_density;
197 }
198
199 if (sta->vht_cap.vht_supported) {
200 agg_size = sta->vht_cap.cap &
201 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
202 agg_size >>=
203 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
204 } else if (sta->ht_cap.ht_supported) {
205 agg_size = sta->ht_cap.ampdu_factor;
206 }
207
208 add_sta_cmd.station_flags |=
209 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
210 add_sta_cmd.station_flags |=
211 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
212 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
213 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
214
215 if (sta->wme) {
216 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
217
218 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
219 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
220 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
221 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
222 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
223 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
224 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
225 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
226 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
227 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
228 }
229
230 status = ADD_STA_SUCCESS;
231 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
232 iwl_mvm_add_sta_cmd_size(mvm),
233 &add_sta_cmd, &status);
234 if (ret)
235 return ret;
236
237 switch (status & IWL_ADD_STA_STATUS_MASK) {
238 case ADD_STA_SUCCESS:
239 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
240 break;
241 default:
242 ret = -EIO;
243 IWL_ERR(mvm, "ADD_STA failed\n");
244 break;
245 }
246
247 return ret;
248}
249
250static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
251{
252 struct iwl_mvm_baid_data *data =
253 from_timer(data, t, session_timer);
254 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
255 struct iwl_mvm_baid_data *ba_data;
256 struct ieee80211_sta *sta;
257 struct iwl_mvm_sta *mvm_sta;
258 unsigned long timeout;
259
260 rcu_read_lock();
261
262 ba_data = rcu_dereference(*rcu_ptr);
263
264 if (WARN_ON(!ba_data))
265 goto unlock;
266
267 if (!ba_data->timeout)
268 goto unlock;
269
270 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
271 if (time_is_after_jiffies(timeout)) {
272 mod_timer(&ba_data->session_timer, timeout);
273 goto unlock;
274 }
275
276
277 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
278
279
280
281
282
283
284
285
286
287 if (!sta)
288 goto unlock;
289
290 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
291 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
292 sta->addr, ba_data->tid);
293unlock:
294 rcu_read_unlock();
295}
296
297
298static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
299 unsigned long disable_agg_tids,
300 bool remove_queue)
301{
302 struct iwl_mvm_add_sta_cmd cmd = {};
303 struct ieee80211_sta *sta;
304 struct iwl_mvm_sta *mvmsta;
305 u32 status;
306 u8 sta_id;
307
308 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
309 return -EINVAL;
310
311 sta_id = mvm->queue_info[queue].ra_sta_id;
312
313 rcu_read_lock();
314
315 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
316
317 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
318 rcu_read_unlock();
319 return -EINVAL;
320 }
321
322 mvmsta = iwl_mvm_sta_from_mac80211(sta);
323
324 mvmsta->tid_disable_agg |= disable_agg_tids;
325
326 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
327 cmd.sta_id = mvmsta->sta_id;
328 cmd.add_modify = STA_MODE_MODIFY;
329 cmd.modify_mask = STA_MODIFY_QUEUES;
330 if (disable_agg_tids)
331 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
332 if (remove_queue)
333 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
334 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
335 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
336
337 rcu_read_unlock();
338
339
340 status = ADD_STA_SUCCESS;
341 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
342 iwl_mvm_add_sta_cmd_size(mvm),
343 &cmd, &status);
344}
345
346static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
347 int queue, u8 tid, u8 flags)
348{
349 struct iwl_scd_txq_cfg_cmd cmd = {
350 .scd_queue = queue,
351 .action = SCD_CFG_DISABLE_QUEUE,
352 };
353 int ret;
354
355 if (iwl_mvm_has_new_tx_api(mvm)) {
356 iwl_trans_txq_free(mvm->trans, queue);
357
358 return 0;
359 }
360
361 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
362 return 0;
363
364 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
365
366 cmd.action = mvm->queue_info[queue].tid_bitmap ?
367 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
368 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
369 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
370
371 IWL_DEBUG_TX_QUEUES(mvm,
372 "Disabling TXQ #%d tids=0x%x\n",
373 queue,
374 mvm->queue_info[queue].tid_bitmap);
375
376
377 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
378 return 0;
379
380 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
381 cmd.tid = mvm->queue_info[queue].txq_tid;
382
383
384 WARN(mvm->queue_info[queue].tid_bitmap,
385 "TXQ #%d info out-of-sync - tids=0x%x\n",
386 queue, mvm->queue_info[queue].tid_bitmap);
387
388
389 mvm->queue_info[queue].tid_bitmap = 0;
390
391 if (sta) {
392 struct iwl_mvm_txq *mvmtxq =
393 iwl_mvm_txq_from_tid(sta, tid);
394
395 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
396 }
397
398
399 mvm->queue_info[queue].reserved = false;
400
401 iwl_trans_txq_disable(mvm->trans, queue, false);
402 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
403 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
404
405 if (ret)
406 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
407 queue, ret);
408 return ret;
409}
410
411static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
412{
413 struct ieee80211_sta *sta;
414 struct iwl_mvm_sta *mvmsta;
415 unsigned long tid_bitmap;
416 unsigned long agg_tids = 0;
417 u8 sta_id;
418 int tid;
419
420 lockdep_assert_held(&mvm->mutex);
421
422 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
423 return -EINVAL;
424
425 sta_id = mvm->queue_info[queue].ra_sta_id;
426 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
427
428 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
429 lockdep_is_held(&mvm->mutex));
430
431 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
432 return -EINVAL;
433
434 mvmsta = iwl_mvm_sta_from_mac80211(sta);
435
436 spin_lock_bh(&mvmsta->lock);
437 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
438 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
439 agg_tids |= BIT(tid);
440 }
441 spin_unlock_bh(&mvmsta->lock);
442
443 return agg_tids;
444}
445
446
447
448
449
450
451static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
452{
453 struct ieee80211_sta *sta;
454 struct iwl_mvm_sta *mvmsta;
455 unsigned long tid_bitmap;
456 unsigned long disable_agg_tids = 0;
457 u8 sta_id;
458 int tid;
459
460 lockdep_assert_held(&mvm->mutex);
461
462 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
463 return -EINVAL;
464
465 sta_id = mvm->queue_info[queue].ra_sta_id;
466 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
467
468 rcu_read_lock();
469
470 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
471
472 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
473 rcu_read_unlock();
474 return 0;
475 }
476
477 mvmsta = iwl_mvm_sta_from_mac80211(sta);
478
479 spin_lock_bh(&mvmsta->lock);
480
481 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
482 struct iwl_mvm_txq *mvmtxq =
483 iwl_mvm_txq_from_tid(sta, tid);
484
485 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
486 disable_agg_tids |= BIT(tid);
487 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
488
489 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
490 }
491
492 mvmsta->tfd_queue_msk &= ~BIT(queue);
493 spin_unlock_bh(&mvmsta->lock);
494
495 rcu_read_unlock();
496
497
498
499
500
501
502
503
504
505 synchronize_net();
506
507 return disable_agg_tids;
508}
509
510static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
511 struct ieee80211_sta *old_sta,
512 u8 new_sta_id)
513{
514 struct iwl_mvm_sta *mvmsta;
515 u8 sta_id, tid;
516 unsigned long disable_agg_tids = 0;
517 bool same_sta;
518 int ret;
519
520 lockdep_assert_held(&mvm->mutex);
521
522 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
523 return -EINVAL;
524
525 sta_id = mvm->queue_info[queue].ra_sta_id;
526 tid = mvm->queue_info[queue].txq_tid;
527
528 same_sta = sta_id == new_sta_id;
529
530 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
531 if (WARN_ON(!mvmsta))
532 return -EINVAL;
533
534 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
535
536 if (disable_agg_tids)
537 iwl_mvm_invalidate_sta_queue(mvm, queue,
538 disable_agg_tids, false);
539
540 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
541 if (ret) {
542 IWL_ERR(mvm,
543 "Failed to free inactive queue %d (ret=%d)\n",
544 queue, ret);
545
546 return ret;
547 }
548
549
550 if (!same_sta)
551 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
552
553 return 0;
554}
555
556static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
557 unsigned long tfd_queue_mask, u8 ac)
558{
559 int queue = 0;
560 u8 ac_to_queue[IEEE80211_NUM_ACS];
561 int i;
562
563
564
565
566
567 lockdep_assert_held(&mvm->mutex);
568
569 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
570 return -EINVAL;
571
572 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
573
574
575 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
576
577 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
578 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
579 continue;
580
581 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
582 }
583
584
585
586
587
588
589
590
591
592
593
594 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
595 queue = ac_to_queue[IEEE80211_AC_BE];
596
597 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
598 queue = ac_to_queue[ac];
599
600 else if (ac == IEEE80211_AC_VO &&
601 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
602 queue = ac_to_queue[IEEE80211_AC_VI];
603
604 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
605 queue = ac_to_queue[IEEE80211_AC_BK];
606
607 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
608 queue = ac_to_queue[IEEE80211_AC_VI];
609
610 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
611 queue = ac_to_queue[IEEE80211_AC_VO];
612
613
614 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
615 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
616 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
617 IWL_ERR(mvm, "No DATA queues available to share\n");
618 return -ENOSPC;
619 }
620
621 return queue;
622}
623
624
625
626
627
628
629
630static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
631 int ac, int ssn, unsigned int wdg_timeout,
632 bool force, struct iwl_mvm_txq *txq)
633{
634 struct iwl_scd_txq_cfg_cmd cmd = {
635 .scd_queue = queue,
636 .action = SCD_CFG_DISABLE_QUEUE,
637 };
638 bool shared_queue;
639 int ret;
640
641 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
642 return -EINVAL;
643
644
645
646
647
648
649
650
651
652 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
653 IWL_DEBUG_TX_QUEUES(mvm,
654 "No redirection needed on TXQ #%d\n",
655 queue);
656 return 0;
657 }
658
659 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
660 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
661 cmd.tid = mvm->queue_info[queue].txq_tid;
662 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
663
664 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
665 queue, iwl_mvm_ac_to_tx_fifo[ac]);
666
667
668 txq->stopped = true;
669
670 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
671 if (ret) {
672 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
673 queue);
674 ret = -EIO;
675 goto out;
676 }
677
678
679 iwl_trans_txq_disable(mvm->trans, queue, false);
680 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
681 if (ret)
682 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
683 ret);
684
685
686 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
687
688
689 mvm->queue_info[queue].txq_tid = tid;
690
691
692
693
694 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
695 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
696
697
698 mvm->queue_info[queue].mac80211_ac = ac;
699
700
701
702
703
704
705
706 if (shared_queue)
707 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
708
709out:
710
711 txq->stopped = false;
712
713 return ret;
714}
715
716static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
717 u8 minq, u8 maxq)
718{
719 int i;
720
721 lockdep_assert_held(&mvm->mutex);
722
723 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
724 "max queue %d >= num_of_queues (%d)", maxq,
725 mvm->trans->trans_cfg->base_params->num_of_queues))
726 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
727
728
729 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
730 return -ENOSPC;
731
732
733 for (i = minq; i <= maxq; i++)
734 if (mvm->queue_info[i].tid_bitmap == 0 &&
735 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
736 return i;
737
738 return -ENOSPC;
739}
740
741static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
742 u8 sta_id, u8 tid, unsigned int timeout)
743{
744 int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
745 mvm->trans->cfg->min_256_ba_txq_size);
746
747 if (tid == IWL_MAX_TID_COUNT) {
748 tid = IWL_MGMT_TID;
749 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
750 mvm->trans->cfg->min_txq_size);
751 }
752
753 do {
754 __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
755
756 queue = iwl_trans_txq_alloc(mvm->trans, enable,
757 sta_id, tid, SCD_QUEUE_CFG,
758 size, timeout);
759
760 if (queue < 0)
761 IWL_DEBUG_TX_QUEUES(mvm,
762 "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
763 size, sta_id, tid, queue);
764 size /= 2;
765 } while (queue < 0 && size >= 16);
766
767 if (queue < 0)
768 return queue;
769
770 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
771 queue, sta_id, tid);
772
773 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
774
775 return queue;
776}
777
778static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
779 struct ieee80211_sta *sta, u8 ac,
780 int tid)
781{
782 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
783 struct iwl_mvm_txq *mvmtxq =
784 iwl_mvm_txq_from_tid(sta, tid);
785 unsigned int wdg_timeout =
786 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
787 int queue = -1;
788
789 lockdep_assert_held(&mvm->mutex);
790
791 IWL_DEBUG_TX_QUEUES(mvm,
792 "Allocating queue for sta %d on tid %d\n",
793 mvmsta->sta_id, tid);
794 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
795 if (queue < 0)
796 return queue;
797
798 mvmtxq->txq_id = queue;
799 mvm->tvqm_info[queue].txq_tid = tid;
800 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
801
802 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
803
804 spin_lock_bh(&mvmsta->lock);
805 mvmsta->tid_data[tid].txq_id = queue;
806 spin_unlock_bh(&mvmsta->lock);
807
808 return 0;
809}
810
811static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
812 struct ieee80211_sta *sta,
813 int queue, u8 sta_id, u8 tid)
814{
815 bool enable_queue = true;
816
817
818 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
819 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
820 queue, tid);
821 return false;
822 }
823
824
825 if (mvm->queue_info[queue].tid_bitmap)
826 enable_queue = false;
827
828 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
829 mvm->queue_info[queue].ra_sta_id = sta_id;
830
831 if (enable_queue) {
832 if (tid != IWL_MAX_TID_COUNT)
833 mvm->queue_info[queue].mac80211_ac =
834 tid_to_mac80211_ac[tid];
835 else
836 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
837
838 mvm->queue_info[queue].txq_tid = tid;
839 }
840
841 if (sta) {
842 struct iwl_mvm_txq *mvmtxq =
843 iwl_mvm_txq_from_tid(sta, tid);
844
845 mvmtxq->txq_id = queue;
846 }
847
848 IWL_DEBUG_TX_QUEUES(mvm,
849 "Enabling TXQ #%d tids=0x%x\n",
850 queue, mvm->queue_info[queue].tid_bitmap);
851
852 return enable_queue;
853}
854
855static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
856 int queue, u16 ssn,
857 const struct iwl_trans_txq_scd_cfg *cfg,
858 unsigned int wdg_timeout)
859{
860 struct iwl_scd_txq_cfg_cmd cmd = {
861 .scd_queue = queue,
862 .action = SCD_CFG_ENABLE_QUEUE,
863 .window = cfg->frame_limit,
864 .sta_id = cfg->sta_id,
865 .ssn = cpu_to_le16(ssn),
866 .tx_fifo = cfg->fifo,
867 .aggregate = cfg->aggregate,
868 .tid = cfg->tid,
869 };
870 bool inc_ssn;
871
872 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
873 return false;
874
875
876 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
877 return false;
878
879 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
880 NULL, wdg_timeout);
881 if (inc_ssn)
882 le16_add_cpu(&cmd.ssn, 1);
883
884 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
885 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
886
887 return inc_ssn;
888}
889
890static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
891{
892 struct iwl_scd_txq_cfg_cmd cmd = {
893 .scd_queue = queue,
894 .action = SCD_CFG_UPDATE_QUEUE_TID,
895 };
896 int tid;
897 unsigned long tid_bitmap;
898 int ret;
899
900 lockdep_assert_held(&mvm->mutex);
901
902 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
903 return;
904
905 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
906
907 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
908 return;
909
910
911 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
912 cmd.tid = tid;
913 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
914
915 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
916 if (ret) {
917 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
918 queue, ret);
919 return;
920 }
921
922 mvm->queue_info[queue].txq_tid = tid;
923 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
924 queue, tid);
925}
926
927static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
928{
929 struct ieee80211_sta *sta;
930 struct iwl_mvm_sta *mvmsta;
931 u8 sta_id;
932 int tid = -1;
933 unsigned long tid_bitmap;
934 unsigned int wdg_timeout;
935 int ssn;
936 int ret = true;
937
938
939 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
940 return;
941
942 lockdep_assert_held(&mvm->mutex);
943
944 sta_id = mvm->queue_info[queue].ra_sta_id;
945 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
946
947
948 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
949 if (tid_bitmap != BIT(tid)) {
950 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
951 queue, tid_bitmap);
952 return;
953 }
954
955 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
956 tid);
957
958 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
959 lockdep_is_held(&mvm->mutex));
960
961 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
962 return;
963
964 mvmsta = iwl_mvm_sta_from_mac80211(sta);
965 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
966
967 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
968
969 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
970 tid_to_mac80211_ac[tid], ssn,
971 wdg_timeout, true,
972 iwl_mvm_txq_from_tid(sta, tid));
973 if (ret) {
974 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
975 return;
976 }
977
978
979 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
980 struct iwl_mvm_add_sta_cmd cmd = {0};
981
982 mvmsta->tid_disable_agg &= ~BIT(tid);
983
984 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
985 cmd.sta_id = mvmsta->sta_id;
986 cmd.add_modify = STA_MODE_MODIFY;
987 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
988 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
989 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
990
991 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
992 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
993 if (!ret) {
994 IWL_DEBUG_TX_QUEUES(mvm,
995 "TXQ #%d is now aggregated again\n",
996 queue);
997
998
999 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1000 }
1001 }
1002
1003 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1014 struct iwl_mvm_sta *mvmsta, int queue,
1015 unsigned long tid_bitmap,
1016 unsigned long *unshare_queues,
1017 unsigned long *changetid_queues)
1018{
1019 int tid;
1020
1021 lockdep_assert_held(&mvmsta->lock);
1022 lockdep_assert_held(&mvm->mutex);
1023
1024 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1025 return false;
1026
1027
1028 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1029
1030 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1031 tid_bitmap &= ~BIT(tid);
1032
1033
1034 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1035 tid_bitmap &= ~BIT(tid);
1036 }
1037
1038
1039 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1040 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1041 return true;
1042 }
1043
1044
1045
1046
1047
1048 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1049 u16 tid_bitmap;
1050
1051 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1052 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1053
1054 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1068 set_bit(queue, changetid_queues);
1069
1070 IWL_DEBUG_TX_QUEUES(mvm,
1071 "Removing inactive TID %d from shared Q:%d\n",
1072 tid, queue);
1073 }
1074
1075 IWL_DEBUG_TX_QUEUES(mvm,
1076 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1077 mvm->queue_info[queue].tid_bitmap);
1078
1079
1080
1081
1082
1083 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1084
1085
1086 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1087 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1088 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1089 queue);
1090 set_bit(queue, unshare_queues);
1091 }
1092
1093 return false;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1106{
1107 unsigned long now = jiffies;
1108 unsigned long unshare_queues = 0;
1109 unsigned long changetid_queues = 0;
1110 int i, ret, free_queue = -ENOSPC;
1111 struct ieee80211_sta *queue_owner = NULL;
1112
1113 lockdep_assert_held(&mvm->mutex);
1114
1115 if (iwl_mvm_has_new_tx_api(mvm))
1116 return -ENOSPC;
1117
1118 rcu_read_lock();
1119
1120
1121 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1122
1123 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1124 struct ieee80211_sta *sta;
1125 struct iwl_mvm_sta *mvmsta;
1126 u8 sta_id;
1127 int tid;
1128 unsigned long inactive_tid_bitmap = 0;
1129 unsigned long queue_tid_bitmap;
1130
1131 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1132 if (!queue_tid_bitmap)
1133 continue;
1134
1135
1136 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1137 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1138 continue;
1139
1140
1141 for_each_set_bit(tid, &queue_tid_bitmap,
1142 IWL_MAX_TID_COUNT + 1) {
1143 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1144 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1145 continue;
1146
1147 inactive_tid_bitmap |= BIT(tid);
1148 }
1149
1150
1151 if (!inactive_tid_bitmap)
1152 continue;
1153
1154
1155
1156
1157
1158
1159 sta_id = mvm->queue_info[i].ra_sta_id;
1160 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1161
1162
1163
1164
1165
1166
1167 if (IS_ERR_OR_NULL(sta))
1168 continue;
1169
1170 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1171
1172 spin_lock_bh(&mvmsta->lock);
1173 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1174 inactive_tid_bitmap,
1175 &unshare_queues,
1176 &changetid_queues);
1177 if (ret && free_queue < 0) {
1178 queue_owner = sta;
1179 free_queue = i;
1180 }
1181
1182 spin_unlock_bh(&mvmsta->lock);
1183 }
1184
1185
1186
1187 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1188 iwl_mvm_unshare_queue(mvm, i);
1189 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1190 iwl_mvm_change_queue_tid(mvm, i);
1191
1192 rcu_read_unlock();
1193
1194 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1195 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1196 alloc_for_sta);
1197 if (ret)
1198 return ret;
1199 }
1200
1201 return free_queue;
1202}
1203
1204static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1205 struct ieee80211_sta *sta, u8 ac, int tid)
1206{
1207 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1208 struct iwl_trans_txq_scd_cfg cfg = {
1209 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1210 .sta_id = mvmsta->sta_id,
1211 .tid = tid,
1212 .frame_limit = IWL_FRAME_LIMIT,
1213 };
1214 unsigned int wdg_timeout =
1215 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1216 int queue = -1;
1217 unsigned long disable_agg_tids = 0;
1218 enum iwl_mvm_agg_state queue_state;
1219 bool shared_queue = false, inc_ssn;
1220 int ssn;
1221 unsigned long tfd_queue_mask;
1222 int ret;
1223
1224 lockdep_assert_held(&mvm->mutex);
1225
1226 if (iwl_mvm_has_new_tx_api(mvm))
1227 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1228
1229 spin_lock_bh(&mvmsta->lock);
1230 tfd_queue_mask = mvmsta->tfd_queue_msk;
1231 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1232 spin_unlock_bh(&mvmsta->lock);
1233
1234 if (tid == IWL_MAX_TID_COUNT) {
1235 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1236 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1237 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1238 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1239 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1240 queue);
1241
1242
1243 }
1244
1245 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1246 (mvm->queue_info[mvmsta->reserved_queue].status ==
1247 IWL_MVM_QUEUE_RESERVED)) {
1248 queue = mvmsta->reserved_queue;
1249 mvm->queue_info[queue].reserved = true;
1250 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1251 }
1252
1253 if (queue < 0)
1254 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1255 IWL_MVM_DQA_MIN_DATA_QUEUE,
1256 IWL_MVM_DQA_MAX_DATA_QUEUE);
1257 if (queue < 0) {
1258
1259 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1260 }
1261
1262
1263 if (queue <= 0) {
1264 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1265 if (queue > 0) {
1266 shared_queue = true;
1267 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1268 }
1269 }
1270
1271
1272
1273
1274
1275
1276
1277 if (queue > 0 && !shared_queue)
1278 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1279
1280
1281 if (WARN_ON(queue <= 0)) {
1282 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1283 tid, cfg.sta_id);
1284 return queue;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1294 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1295
1296 IWL_DEBUG_TX_QUEUES(mvm,
1297 "Allocating %squeue #%d to sta %d on tid %d\n",
1298 shared_queue ? "shared " : "", queue,
1299 mvmsta->sta_id, tid);
1300
1301 if (shared_queue) {
1302
1303 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1304
1305 if (disable_agg_tids) {
1306 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1307 queue);
1308 iwl_mvm_invalidate_sta_queue(mvm, queue,
1309 disable_agg_tids, false);
1310 }
1311 }
1312
1313 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1314
1315
1316
1317
1318
1319
1320
1321 if (shared_queue)
1322 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1323
1324 spin_lock_bh(&mvmsta->lock);
1325
1326
1327
1328
1329
1330 if (inc_ssn) {
1331 mvmsta->tid_data[tid].seq_number += 0x10;
1332 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1333 }
1334 mvmsta->tid_data[tid].txq_id = queue;
1335 mvmsta->tfd_queue_msk |= BIT(queue);
1336 queue_state = mvmsta->tid_data[tid].state;
1337
1338 if (mvmsta->reserved_queue == queue)
1339 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1340 spin_unlock_bh(&mvmsta->lock);
1341
1342 if (!shared_queue) {
1343 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1344 if (ret)
1345 goto out_err;
1346
1347
1348 if (queue_state == IWL_AGG_ON) {
1349 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1350 if (ret)
1351 goto out_err;
1352 }
1353 } else {
1354
1355 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1356 wdg_timeout, false,
1357 iwl_mvm_txq_from_tid(sta, tid));
1358 if (ret)
1359 goto out_err;
1360 }
1361
1362 return 0;
1363
1364out_err:
1365 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
1366
1367 return ret;
1368}
1369
1370static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1371{
1372 if (tid == IWL_MAX_TID_COUNT)
1373 return IEEE80211_AC_VO;
1374
1375 return tid_to_mac80211_ac[tid];
1376}
1377
1378void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1379{
1380 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1381 add_stream_wk);
1382
1383 mutex_lock(&mvm->mutex);
1384
1385 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1386
1387 while (!list_empty(&mvm->add_stream_txqs)) {
1388 struct iwl_mvm_txq *mvmtxq;
1389 struct ieee80211_txq *txq;
1390 u8 tid;
1391
1392 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1393 struct iwl_mvm_txq, list);
1394
1395 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1396 drv_priv);
1397 tid = txq->tid;
1398 if (tid == IEEE80211_NUM_TIDS)
1399 tid = IWL_MAX_TID_COUNT;
1400
1401
1402
1403
1404
1405
1406
1407 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1408 list_del_init(&mvmtxq->list);
1409 continue;
1410 }
1411
1412 list_del_init(&mvmtxq->list);
1413 local_bh_disable();
1414 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1415 local_bh_enable();
1416 }
1417
1418 mutex_unlock(&mvm->mutex);
1419}
1420
1421static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1422 struct ieee80211_sta *sta,
1423 enum nl80211_iftype vif_type)
1424{
1425 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1426 int queue;
1427
1428
1429 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1430 return 0;
1431
1432
1433 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1434
1435
1436 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1437 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1438 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1439 IWL_MVM_QUEUE_FREE))
1440 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1441 else
1442 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1443 IWL_MVM_DQA_MIN_DATA_QUEUE,
1444 IWL_MVM_DQA_MAX_DATA_QUEUE);
1445 if (queue < 0) {
1446
1447 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1448 if (queue < 0) {
1449 IWL_ERR(mvm, "No available queues for new station\n");
1450 return -ENOSPC;
1451 }
1452 }
1453 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1454
1455 mvmsta->reserved_queue = queue;
1456
1457 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1458 queue, mvmsta->sta_id);
1459
1460 return 0;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1471 struct ieee80211_sta *sta)
1472{
1473 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1474 unsigned int wdg =
1475 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1476 int i;
1477 struct iwl_trans_txq_scd_cfg cfg = {
1478 .sta_id = mvm_sta->sta_id,
1479 .frame_limit = IWL_FRAME_LIMIT,
1480 };
1481
1482
1483 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1484 mvm->queue_info[mvm_sta->reserved_queue].status =
1485 IWL_MVM_QUEUE_RESERVED;
1486
1487 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1488 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1489 int txq_id = tid_data->txq_id;
1490 int ac;
1491
1492 if (txq_id == IWL_MVM_INVALID_QUEUE)
1493 continue;
1494
1495 ac = tid_to_mac80211_ac[i];
1496
1497 if (iwl_mvm_has_new_tx_api(mvm)) {
1498 IWL_DEBUG_TX_QUEUES(mvm,
1499 "Re-mapping sta %d tid %d\n",
1500 mvm_sta->sta_id, i);
1501 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1502 i, wdg);
1503
1504
1505
1506
1507
1508 if (txq_id < 0)
1509 txq_id = IWL_MVM_INVALID_QUEUE;
1510 tid_data->txq_id = txq_id;
1511
1512
1513
1514
1515
1516
1517
1518 tid_data->seq_number = 0;
1519 } else {
1520 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1521
1522 cfg.tid = i;
1523 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1524 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1525 txq_id ==
1526 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1527
1528 IWL_DEBUG_TX_QUEUES(mvm,
1529 "Re-mapping sta %d tid %d to queue %d\n",
1530 mvm_sta->sta_id, i, txq_id);
1531
1532 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1533 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1534 }
1535 }
1536}
1537
1538static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1539 struct iwl_mvm_int_sta *sta,
1540 const u8 *addr,
1541 u16 mac_id, u16 color)
1542{
1543 struct iwl_mvm_add_sta_cmd cmd;
1544 int ret;
1545 u32 status = ADD_STA_SUCCESS;
1546
1547 lockdep_assert_held(&mvm->mutex);
1548
1549 memset(&cmd, 0, sizeof(cmd));
1550 cmd.sta_id = sta->sta_id;
1551 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1552 color));
1553 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1554 cmd.station_type = sta->type;
1555
1556 if (!iwl_mvm_has_new_tx_api(mvm))
1557 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1558 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1559
1560 if (addr)
1561 memcpy(cmd.addr, addr, ETH_ALEN);
1562
1563 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1564 iwl_mvm_add_sta_cmd_size(mvm),
1565 &cmd, &status);
1566 if (ret)
1567 return ret;
1568
1569 switch (status & IWL_ADD_STA_STATUS_MASK) {
1570 case ADD_STA_SUCCESS:
1571 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1572 return 0;
1573 default:
1574 ret = -EIO;
1575 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1576 status);
1577 break;
1578 }
1579 return ret;
1580}
1581
1582int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1583 struct ieee80211_vif *vif,
1584 struct ieee80211_sta *sta)
1585{
1586 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1587 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1588 struct iwl_mvm_rxq_dup_data *dup_data;
1589 int i, ret, sta_id;
1590 bool sta_update = false;
1591 unsigned int sta_flags = 0;
1592
1593 lockdep_assert_held(&mvm->mutex);
1594
1595 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1596 sta_id = iwl_mvm_find_free_sta_id(mvm,
1597 ieee80211_vif_type_p2p(vif));
1598 else
1599 sta_id = mvm_sta->sta_id;
1600
1601 if (sta_id == IWL_MVM_INVALID_STA)
1602 return -ENOSPC;
1603
1604 spin_lock_init(&mvm_sta->lock);
1605
1606
1607 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1608 struct iwl_mvm_int_sta tmp_sta = {
1609 .sta_id = sta_id,
1610 .type = mvm_sta->sta_type,
1611 };
1612
1613
1614
1615
1616
1617 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1618 mvmvif->id, mvmvif->color);
1619 if (ret)
1620 goto err;
1621
1622 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1623 sta_update = true;
1624 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1625 goto update_fw;
1626 }
1627
1628 mvm_sta->sta_id = sta_id;
1629 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1630 mvmvif->color);
1631 mvm_sta->vif = vif;
1632 if (!mvm->trans->trans_cfg->gen2)
1633 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1634 else
1635 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1636 mvm_sta->tx_protection = 0;
1637 mvm_sta->tt_tx_protection = false;
1638 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1639
1640
1641 mvm_sta->tid_disable_agg = 0xffff;
1642 mvm_sta->tfd_queue_msk = 0;
1643
1644
1645 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1646 u16 seq = mvm_sta->tid_data[i].seq_number;
1647 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1648 mvm_sta->tid_data[i].seq_number = seq;
1649
1650
1651
1652
1653
1654 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1655 }
1656
1657 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1658 struct iwl_mvm_txq *mvmtxq =
1659 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1660
1661 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1662 INIT_LIST_HEAD(&mvmtxq->list);
1663 atomic_set(&mvmtxq->tx_request, 0);
1664 }
1665
1666 mvm_sta->agg_tids = 0;
1667
1668 if (iwl_mvm_has_new_rx_api(mvm) &&
1669 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1670 int q;
1671
1672 dup_data = kcalloc(mvm->trans->num_rx_queues,
1673 sizeof(*dup_data), GFP_KERNEL);
1674 if (!dup_data)
1675 return -ENOMEM;
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1686 memset(dup_data[q].last_seq, 0xff,
1687 sizeof(dup_data[q].last_seq));
1688 mvm_sta->dup_data = dup_data;
1689 }
1690
1691 if (!iwl_mvm_has_new_tx_api(mvm)) {
1692 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1693 ieee80211_vif_type_p2p(vif));
1694 if (ret)
1695 goto err;
1696 }
1697
1698
1699
1700
1701
1702 if (iwl_mvm_has_tlc_offload(mvm))
1703 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1704 else
1705 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1706
1707 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1708
1709update_fw:
1710 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1711 if (ret)
1712 goto err;
1713
1714 if (vif->type == NL80211_IFTYPE_STATION) {
1715 if (!sta->tdls) {
1716 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1717 mvmvif->ap_sta_id = sta_id;
1718 } else {
1719 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1720 }
1721 }
1722
1723 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1724
1725 return 0;
1726
1727err:
1728 return ret;
1729}
1730
1731int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1732 bool drain)
1733{
1734 struct iwl_mvm_add_sta_cmd cmd = {};
1735 int ret;
1736 u32 status;
1737
1738 lockdep_assert_held(&mvm->mutex);
1739
1740 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1741 cmd.sta_id = mvmsta->sta_id;
1742 cmd.add_modify = STA_MODE_MODIFY;
1743 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1744 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1745
1746 status = ADD_STA_SUCCESS;
1747 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1748 iwl_mvm_add_sta_cmd_size(mvm),
1749 &cmd, &status);
1750 if (ret)
1751 return ret;
1752
1753 switch (status & IWL_ADD_STA_STATUS_MASK) {
1754 case ADD_STA_SUCCESS:
1755 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1756 mvmsta->sta_id);
1757 break;
1758 default:
1759 ret = -EIO;
1760 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1761 mvmsta->sta_id);
1762 break;
1763 }
1764
1765 return ret;
1766}
1767
1768
1769
1770
1771
1772
1773static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1774{
1775 struct ieee80211_sta *sta;
1776 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1777 .sta_id = sta_id,
1778 };
1779 int ret;
1780
1781 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1782 lockdep_is_held(&mvm->mutex));
1783
1784
1785 if (!sta) {
1786 IWL_ERR(mvm, "Invalid station id\n");
1787 return -EINVAL;
1788 }
1789
1790 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1791 sizeof(rm_sta_cmd), &rm_sta_cmd);
1792 if (ret) {
1793 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1794 return ret;
1795 }
1796
1797 return 0;
1798}
1799
1800static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1801 struct ieee80211_vif *vif,
1802 struct ieee80211_sta *sta)
1803{
1804 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1805 int i;
1806
1807 lockdep_assert_held(&mvm->mutex);
1808
1809 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1810 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1811 continue;
1812
1813 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1814 0);
1815 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1816 }
1817
1818 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1819 struct iwl_mvm_txq *mvmtxq =
1820 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1821
1822 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1823 }
1824}
1825
1826int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1827 struct iwl_mvm_sta *mvm_sta)
1828{
1829 int i;
1830
1831 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1832 u16 txq_id;
1833 int ret;
1834
1835 spin_lock_bh(&mvm_sta->lock);
1836 txq_id = mvm_sta->tid_data[i].txq_id;
1837 spin_unlock_bh(&mvm_sta->lock);
1838
1839 if (txq_id == IWL_MVM_INVALID_QUEUE)
1840 continue;
1841
1842 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1843 if (ret)
1844 return ret;
1845 }
1846
1847 return 0;
1848}
1849
1850int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1851 struct ieee80211_vif *vif,
1852 struct ieee80211_sta *sta)
1853{
1854 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1855 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1856 u8 sta_id = mvm_sta->sta_id;
1857 int ret;
1858
1859 lockdep_assert_held(&mvm->mutex);
1860
1861 if (iwl_mvm_has_new_rx_api(mvm))
1862 kfree(mvm_sta->dup_data);
1863
1864 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1865 if (ret)
1866 return ret;
1867
1868
1869 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1870 if (ret)
1871 return ret;
1872 if (iwl_mvm_has_new_tx_api(mvm)) {
1873 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1874 } else {
1875 u32 q_mask = mvm_sta->tfd_queue_msk;
1876
1877 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1878 q_mask);
1879 }
1880 if (ret)
1881 return ret;
1882
1883 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1884
1885 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1886
1887
1888 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1889 u8 reserved_txq = mvm_sta->reserved_queue;
1890 enum iwl_mvm_queue_status *status;
1891
1892
1893
1894
1895
1896
1897 status = &mvm->queue_info[reserved_txq].status;
1898 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1899 (*status != IWL_MVM_QUEUE_FREE),
1900 "sta_id %d reserved txq %d status %d",
1901 sta_id, reserved_txq, *status))
1902 return -EINVAL;
1903
1904 *status = IWL_MVM_QUEUE_FREE;
1905 }
1906
1907 if (vif->type == NL80211_IFTYPE_STATION &&
1908 mvmvif->ap_sta_id == sta_id) {
1909
1910 if (vif->bss_conf.assoc)
1911 return ret;
1912
1913
1914 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1915 }
1916
1917
1918
1919
1920
1921 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1922 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1923 cancel_delayed_work(&mvm->tdls_cs.dwork);
1924 }
1925
1926
1927
1928
1929
1930 spin_lock_bh(&mvm_sta->lock);
1931 spin_unlock_bh(&mvm_sta->lock);
1932
1933 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1934 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1935
1936 return ret;
1937}
1938
1939int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1940 struct ieee80211_vif *vif,
1941 u8 sta_id)
1942{
1943 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1944
1945 lockdep_assert_held(&mvm->mutex);
1946
1947 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1948 return ret;
1949}
1950
1951int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1952 struct iwl_mvm_int_sta *sta,
1953 u32 qmask, enum nl80211_iftype iftype,
1954 enum iwl_sta_type type)
1955{
1956 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1957 sta->sta_id == IWL_MVM_INVALID_STA) {
1958 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1959 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1960 return -ENOSPC;
1961 }
1962
1963 sta->tfd_queue_msk = qmask;
1964 sta->type = type;
1965
1966
1967 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1968 return 0;
1969}
1970
1971void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1972{
1973 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1974 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1975 sta->sta_id = IWL_MVM_INVALID_STA;
1976}
1977
1978static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
1979 u8 sta_id, u8 fifo)
1980{
1981 unsigned int wdg_timeout =
1982 mvm->trans->trans_cfg->base_params->wd_timeout;
1983 struct iwl_trans_txq_scd_cfg cfg = {
1984 .fifo = fifo,
1985 .sta_id = sta_id,
1986 .tid = IWL_MAX_TID_COUNT,
1987 .aggregate = false,
1988 .frame_limit = IWL_FRAME_LIMIT,
1989 };
1990
1991 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1992
1993 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
1994}
1995
1996static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
1997{
1998 unsigned int wdg_timeout =
1999 mvm->trans->trans_cfg->base_params->wd_timeout;
2000
2001 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2002
2003 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2004 wdg_timeout);
2005}
2006
2007static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2008 int maccolor,
2009 struct iwl_mvm_int_sta *sta,
2010 u16 *queue, int fifo)
2011{
2012 int ret;
2013
2014
2015 if (!iwl_mvm_has_new_tx_api(mvm))
2016 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2017
2018 ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
2019 if (ret) {
2020 if (!iwl_mvm_has_new_tx_api(mvm))
2021 iwl_mvm_disable_txq(mvm, NULL, *queue,
2022 IWL_MAX_TID_COUNT, 0);
2023 return ret;
2024 }
2025
2026
2027
2028
2029
2030 if (iwl_mvm_has_new_tx_api(mvm)) {
2031 int txq;
2032
2033 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2034 if (txq < 0) {
2035 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2036 return txq;
2037 }
2038
2039 *queue = txq;
2040 }
2041
2042 return 0;
2043}
2044
2045int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2046{
2047 int ret;
2048
2049 lockdep_assert_held(&mvm->mutex);
2050
2051
2052 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2053 NL80211_IFTYPE_UNSPECIFIED,
2054 IWL_STA_AUX_ACTIVITY);
2055 if (ret)
2056 return ret;
2057
2058 ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
2059 &mvm->aux_sta, &mvm->aux_queue,
2060 IWL_MVM_TX_FIFO_MCAST);
2061 if (ret) {
2062 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2063 return ret;
2064 }
2065
2066 return 0;
2067}
2068
2069int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2070{
2071 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2072
2073 lockdep_assert_held(&mvm->mutex);
2074
2075 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2076 &mvm->snif_sta, &mvm->snif_queue,
2077 IWL_MVM_TX_FIFO_BE);
2078}
2079
2080int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2081{
2082 int ret;
2083
2084 lockdep_assert_held(&mvm->mutex);
2085
2086 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2087 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2088 if (ret)
2089 IWL_WARN(mvm, "Failed sending remove station\n");
2090
2091 return ret;
2092}
2093
2094int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2095{
2096 int ret;
2097
2098 lockdep_assert_held(&mvm->mutex);
2099
2100 iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
2101 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2102 if (ret)
2103 IWL_WARN(mvm, "Failed sending remove station\n");
2104 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2105
2106 return ret;
2107}
2108
2109void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2110{
2111 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2123{
2124 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2125 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2126 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2127 const u8 *baddr = _baddr;
2128 int queue;
2129 int ret;
2130 unsigned int wdg_timeout =
2131 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2132 struct iwl_trans_txq_scd_cfg cfg = {
2133 .fifo = IWL_MVM_TX_FIFO_VO,
2134 .sta_id = mvmvif->bcast_sta.sta_id,
2135 .tid = IWL_MAX_TID_COUNT,
2136 .aggregate = false,
2137 .frame_limit = IWL_FRAME_LIMIT,
2138 };
2139
2140 lockdep_assert_held(&mvm->mutex);
2141
2142 if (!iwl_mvm_has_new_tx_api(mvm)) {
2143 if (vif->type == NL80211_IFTYPE_AP ||
2144 vif->type == NL80211_IFTYPE_ADHOC) {
2145 queue = mvm->probe_queue;
2146 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2147 queue = mvm->p2p_dev_queue;
2148 } else {
2149 WARN(1, "Missing required TXQ for adding bcast STA\n");
2150 return -EINVAL;
2151 }
2152
2153 bsta->tfd_queue_msk |= BIT(queue);
2154
2155 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2156 }
2157
2158 if (vif->type == NL80211_IFTYPE_ADHOC)
2159 baddr = vif->bss_conf.bssid;
2160
2161 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2162 return -ENOSPC;
2163
2164 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2165 mvmvif->id, mvmvif->color);
2166 if (ret)
2167 return ret;
2168
2169
2170
2171
2172
2173 if (iwl_mvm_has_new_tx_api(mvm)) {
2174 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2175 IWL_MAX_TID_COUNT,
2176 wdg_timeout);
2177 if (queue < 0) {
2178 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2179 return queue;
2180 }
2181
2182 if (vif->type == NL80211_IFTYPE_AP ||
2183 vif->type == NL80211_IFTYPE_ADHOC)
2184 mvm->probe_queue = queue;
2185 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2186 mvm->p2p_dev_queue = queue;
2187 }
2188
2189 return 0;
2190}
2191
2192static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2193 struct ieee80211_vif *vif)
2194{
2195 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2196 int queue;
2197
2198 lockdep_assert_held(&mvm->mutex);
2199
2200 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2201
2202 switch (vif->type) {
2203 case NL80211_IFTYPE_AP:
2204 case NL80211_IFTYPE_ADHOC:
2205 queue = mvm->probe_queue;
2206 break;
2207 case NL80211_IFTYPE_P2P_DEVICE:
2208 queue = mvm->p2p_dev_queue;
2209 break;
2210 default:
2211 WARN(1, "Can't free bcast queue on vif type %d\n",
2212 vif->type);
2213 return;
2214 }
2215
2216 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
2217 if (iwl_mvm_has_new_tx_api(mvm))
2218 return;
2219
2220 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2221 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2222}
2223
2224
2225
2226int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2227{
2228 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2229 int ret;
2230
2231 lockdep_assert_held(&mvm->mutex);
2232
2233 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2234
2235 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2236 if (ret)
2237 IWL_WARN(mvm, "Failed sending remove station\n");
2238 return ret;
2239}
2240
2241int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2242{
2243 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2244
2245 lockdep_assert_held(&mvm->mutex);
2246
2247 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2248 ieee80211_vif_type_p2p(vif),
2249 IWL_STA_GENERAL_PURPOSE);
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2260{
2261 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2262 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2263 int ret;
2264
2265 lockdep_assert_held(&mvm->mutex);
2266
2267 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2268 if (ret)
2269 return ret;
2270
2271 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2272
2273 if (ret)
2274 iwl_mvm_dealloc_int_sta(mvm, bsta);
2275
2276 return ret;
2277}
2278
2279void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2280{
2281 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2282
2283 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2284}
2285
2286
2287
2288
2289
2290int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2291{
2292 int ret;
2293
2294 lockdep_assert_held(&mvm->mutex);
2295
2296 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2297
2298 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2299
2300 return ret;
2301}
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2312{
2313 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2314 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2315 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2316 const u8 *maddr = _maddr;
2317 struct iwl_trans_txq_scd_cfg cfg = {
2318 .fifo = vif->type == NL80211_IFTYPE_AP ?
2319 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2320 .sta_id = msta->sta_id,
2321 .tid = 0,
2322 .aggregate = false,
2323 .frame_limit = IWL_FRAME_LIMIT,
2324 };
2325 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2326 int ret;
2327
2328 lockdep_assert_held(&mvm->mutex);
2329
2330 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2331 vif->type != NL80211_IFTYPE_ADHOC))
2332 return -ENOTSUPP;
2333
2334
2335
2336
2337
2338
2339
2340 if (vif->type == NL80211_IFTYPE_ADHOC)
2341 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2342
2343
2344
2345
2346
2347 if (!iwl_mvm_has_new_tx_api(mvm) &&
2348 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2349 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2350 timeout);
2351 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2352 }
2353 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2354 mvmvif->id, mvmvif->color);
2355 if (ret)
2356 goto err;
2357
2358
2359
2360
2361
2362
2363
2364
2365 if (iwl_mvm_has_new_tx_api(mvm)) {
2366 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2367 0,
2368 timeout);
2369 if (queue < 0) {
2370 ret = queue;
2371 goto err;
2372 }
2373 mvmvif->cab_queue = queue;
2374 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2375 IWL_UCODE_TLV_API_STA_TYPE))
2376 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2377 timeout);
2378
2379 return 0;
2380err:
2381 iwl_mvm_dealloc_int_sta(mvm, msta);
2382 return ret;
2383}
2384
2385static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2386 struct ieee80211_key_conf *keyconf,
2387 bool mcast)
2388{
2389 union {
2390 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2391 struct iwl_mvm_add_sta_key_cmd cmd;
2392 } u = {};
2393 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2394 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2395 __le16 key_flags;
2396 int ret, size;
2397 u32 status;
2398
2399
2400 if (sta_id == IWL_MVM_INVALID_STA)
2401 return 0;
2402
2403 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2404 STA_KEY_FLG_KEYID_MSK);
2405 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2406 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2407
2408 if (mcast)
2409 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2410
2411
2412
2413
2414
2415 u.cmd.common.key_flags = key_flags;
2416 u.cmd.common.key_offset = keyconf->hw_key_idx;
2417 u.cmd.common.sta_id = sta_id;
2418
2419 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2420
2421 status = ADD_STA_SUCCESS;
2422 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2423 &status);
2424
2425 switch (status) {
2426 case ADD_STA_SUCCESS:
2427 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2428 break;
2429 default:
2430 ret = -EIO;
2431 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2432 break;
2433 }
2434
2435 return ret;
2436}
2437
2438
2439
2440
2441
2442int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2443{
2444 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2445 int ret;
2446
2447 lockdep_assert_held(&mvm->mutex);
2448
2449 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2450
2451 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2452
2453 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2454 if (ret)
2455 IWL_WARN(mvm, "Failed sending remove station\n");
2456
2457 return ret;
2458}
2459
2460#define IWL_MAX_RX_BA_SESSIONS 16
2461
2462static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2463{
2464 struct iwl_mvm_rss_sync_notif notif = {
2465 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2466 .metadata.sync = 1,
2467 .delba.baid = baid,
2468 };
2469 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2470};
2471
2472static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2473 struct iwl_mvm_baid_data *data)
2474{
2475 int i;
2476
2477 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2478
2479 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2480 int j;
2481 struct iwl_mvm_reorder_buffer *reorder_buf =
2482 &data->reorder_buf[i];
2483 struct iwl_mvm_reorder_buf_entry *entries =
2484 &data->entries[i * data->entries_per_queue];
2485
2486 spin_lock_bh(&reorder_buf->lock);
2487 if (likely(!reorder_buf->num_stored)) {
2488 spin_unlock_bh(&reorder_buf->lock);
2489 continue;
2490 }
2491
2492
2493
2494
2495
2496
2497 WARN_ON(1);
2498
2499 for (j = 0; j < reorder_buf->buf_size; j++)
2500 __skb_queue_purge(&entries[j].e.frames);
2501
2502
2503
2504
2505
2506
2507
2508
2509 reorder_buf->removed = true;
2510 spin_unlock_bh(&reorder_buf->lock);
2511 del_timer_sync(&reorder_buf->reorder_timer);
2512 }
2513}
2514
2515static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2516 struct iwl_mvm_baid_data *data,
2517 u16 ssn, u16 buf_size)
2518{
2519 int i;
2520
2521 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2522 struct iwl_mvm_reorder_buffer *reorder_buf =
2523 &data->reorder_buf[i];
2524 struct iwl_mvm_reorder_buf_entry *entries =
2525 &data->entries[i * data->entries_per_queue];
2526 int j;
2527
2528 reorder_buf->num_stored = 0;
2529 reorder_buf->head_sn = ssn;
2530 reorder_buf->buf_size = buf_size;
2531
2532 timer_setup(&reorder_buf->reorder_timer,
2533 iwl_mvm_reorder_timer_expired, 0);
2534 spin_lock_init(&reorder_buf->lock);
2535 reorder_buf->mvm = mvm;
2536 reorder_buf->queue = i;
2537 reorder_buf->valid = false;
2538 for (j = 0; j < reorder_buf->buf_size; j++)
2539 __skb_queue_head_init(&entries[j].e.frames);
2540 }
2541}
2542
2543int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2544 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2545{
2546 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2547 struct iwl_mvm_add_sta_cmd cmd = {};
2548 struct iwl_mvm_baid_data *baid_data = NULL;
2549 int ret;
2550 u32 status;
2551
2552 lockdep_assert_held(&mvm->mutex);
2553
2554 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2555 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2556 return -ENOSPC;
2557 }
2558
2559 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2560 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2561
2562
2563#ifndef __CHECKER__
2564
2565
2566
2567
2568
2569
2570 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2571 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2572#endif
2573
2574
2575
2576
2577
2578
2579 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2580
2581
2582
2583
2584
2585 baid_data = kzalloc(sizeof(*baid_data) +
2586 mvm->trans->num_rx_queues *
2587 reorder_buf_size,
2588 GFP_KERNEL);
2589 if (!baid_data)
2590 return -ENOMEM;
2591
2592
2593
2594
2595
2596 baid_data->entries_per_queue =
2597 reorder_buf_size / sizeof(baid_data->entries[0]);
2598 }
2599
2600 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2601 cmd.sta_id = mvm_sta->sta_id;
2602 cmd.add_modify = STA_MODE_MODIFY;
2603 if (start) {
2604 cmd.add_immediate_ba_tid = (u8) tid;
2605 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2606 cmd.rx_ba_window = cpu_to_le16(buf_size);
2607 } else {
2608 cmd.remove_immediate_ba_tid = (u8) tid;
2609 }
2610 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2611 STA_MODIFY_REMOVE_BA_TID;
2612
2613 status = ADD_STA_SUCCESS;
2614 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2615 iwl_mvm_add_sta_cmd_size(mvm),
2616 &cmd, &status);
2617 if (ret)
2618 goto out_free;
2619
2620 switch (status & IWL_ADD_STA_STATUS_MASK) {
2621 case ADD_STA_SUCCESS:
2622 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2623 start ? "start" : "stopp");
2624 break;
2625 case ADD_STA_IMMEDIATE_BA_FAILURE:
2626 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2627 ret = -ENOSPC;
2628 break;
2629 default:
2630 ret = -EIO;
2631 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2632 start ? "start" : "stopp", status);
2633 break;
2634 }
2635
2636 if (ret)
2637 goto out_free;
2638
2639 if (start) {
2640 u8 baid;
2641
2642 mvm->rx_ba_sessions++;
2643
2644 if (!iwl_mvm_has_new_rx_api(mvm))
2645 return 0;
2646
2647 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2648 ret = -EINVAL;
2649 goto out_free;
2650 }
2651 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2652 IWL_ADD_STA_BAID_SHIFT);
2653 baid_data->baid = baid;
2654 baid_data->timeout = timeout;
2655 baid_data->last_rx = jiffies;
2656 baid_data->rcu_ptr = &mvm->baid_map[baid];
2657 timer_setup(&baid_data->session_timer,
2658 iwl_mvm_rx_agg_session_expired, 0);
2659 baid_data->mvm = mvm;
2660 baid_data->tid = tid;
2661 baid_data->sta_id = mvm_sta->sta_id;
2662
2663 mvm_sta->tid_to_baid[tid] = baid;
2664 if (timeout)
2665 mod_timer(&baid_data->session_timer,
2666 TU_TO_EXP_TIME(timeout * 2));
2667
2668 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2669
2670
2671
2672
2673
2674
2675 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2676 mvm_sta->sta_id, tid, baid);
2677 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2678 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2679 } else {
2680 u8 baid = mvm_sta->tid_to_baid[tid];
2681
2682 if (mvm->rx_ba_sessions > 0)
2683
2684 mvm->rx_ba_sessions--;
2685 if (!iwl_mvm_has_new_rx_api(mvm))
2686 return 0;
2687
2688 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2689 return -EINVAL;
2690
2691 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2692 if (WARN_ON(!baid_data))
2693 return -EINVAL;
2694
2695
2696 iwl_mvm_free_reorder(mvm, baid_data);
2697 del_timer_sync(&baid_data->session_timer);
2698 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2699 kfree_rcu(baid_data, rcu_head);
2700 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2701 }
2702 return 0;
2703
2704out_free:
2705 kfree(baid_data);
2706 return ret;
2707}
2708
2709int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2710 int tid, u8 queue, bool start)
2711{
2712 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2713 struct iwl_mvm_add_sta_cmd cmd = {};
2714 int ret;
2715 u32 status;
2716
2717 lockdep_assert_held(&mvm->mutex);
2718
2719 if (start) {
2720 mvm_sta->tfd_queue_msk |= BIT(queue);
2721 mvm_sta->tid_disable_agg &= ~BIT(tid);
2722 } else {
2723
2724 mvm_sta->tid_disable_agg |= BIT(tid);
2725 }
2726
2727 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2728 cmd.sta_id = mvm_sta->sta_id;
2729 cmd.add_modify = STA_MODE_MODIFY;
2730 if (!iwl_mvm_has_new_tx_api(mvm))
2731 cmd.modify_mask = STA_MODIFY_QUEUES;
2732 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2733 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2734 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2735
2736 status = ADD_STA_SUCCESS;
2737 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2738 iwl_mvm_add_sta_cmd_size(mvm),
2739 &cmd, &status);
2740 if (ret)
2741 return ret;
2742
2743 switch (status & IWL_ADD_STA_STATUS_MASK) {
2744 case ADD_STA_SUCCESS:
2745 break;
2746 default:
2747 ret = -EIO;
2748 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2749 start ? "start" : "stopp", status);
2750 break;
2751 }
2752
2753 return ret;
2754}
2755
2756const u8 tid_to_mac80211_ac[] = {
2757 IEEE80211_AC_BE,
2758 IEEE80211_AC_BK,
2759 IEEE80211_AC_BK,
2760 IEEE80211_AC_BE,
2761 IEEE80211_AC_VI,
2762 IEEE80211_AC_VI,
2763 IEEE80211_AC_VO,
2764 IEEE80211_AC_VO,
2765 IEEE80211_AC_VO,
2766};
2767
2768static const u8 tid_to_ucode_ac[] = {
2769 AC_BE,
2770 AC_BK,
2771 AC_BK,
2772 AC_BE,
2773 AC_VI,
2774 AC_VI,
2775 AC_VO,
2776 AC_VO,
2777};
2778
2779int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2780 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2781{
2782 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2783 struct iwl_mvm_tid_data *tid_data;
2784 u16 normalized_ssn;
2785 u16 txq_id;
2786 int ret;
2787
2788 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2789 return -EINVAL;
2790
2791 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2792 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2793 IWL_ERR(mvm,
2794 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2795 mvmsta->tid_data[tid].state);
2796 return -ENXIO;
2797 }
2798
2799 lockdep_assert_held(&mvm->mutex);
2800
2801 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2802 iwl_mvm_has_new_tx_api(mvm)) {
2803 u8 ac = tid_to_mac80211_ac[tid];
2804
2805 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2806 if (ret)
2807 return ret;
2808 }
2809
2810 spin_lock_bh(&mvmsta->lock);
2811
2812
2813
2814
2815
2816
2817
2818 txq_id = mvmsta->tid_data[tid].txq_id;
2819 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2820 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2821 IWL_MVM_DQA_MIN_DATA_QUEUE,
2822 IWL_MVM_DQA_MAX_DATA_QUEUE);
2823 if (ret < 0) {
2824 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2825 goto out;
2826 }
2827
2828 txq_id = ret;
2829
2830
2831 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2832 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2833 ret = -ENXIO;
2834 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2835 tid, IWL_MAX_HW_QUEUES - 1);
2836 goto out;
2837
2838 } else if (unlikely(mvm->queue_info[txq_id].status ==
2839 IWL_MVM_QUEUE_SHARED)) {
2840 ret = -ENXIO;
2841 IWL_DEBUG_TX_QUEUES(mvm,
2842 "Can't start tid %d agg on shared queue!\n",
2843 tid);
2844 goto out;
2845 }
2846
2847 IWL_DEBUG_TX_QUEUES(mvm,
2848 "AGG for tid %d will be on queue #%d\n",
2849 tid, txq_id);
2850
2851 tid_data = &mvmsta->tid_data[tid];
2852 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2853 tid_data->txq_id = txq_id;
2854 *ssn = tid_data->ssn;
2855
2856 IWL_DEBUG_TX_QUEUES(mvm,
2857 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2858 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2859 tid_data->next_reclaimed);
2860
2861
2862
2863
2864
2865 normalized_ssn = tid_data->ssn;
2866 if (mvm->trans->trans_cfg->gen2)
2867 normalized_ssn &= 0xff;
2868
2869 if (normalized_ssn == tid_data->next_reclaimed) {
2870 tid_data->state = IWL_AGG_STARTING;
2871 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
2872 } else {
2873 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2874 ret = 0;
2875 }
2876
2877out:
2878 spin_unlock_bh(&mvmsta->lock);
2879
2880 return ret;
2881}
2882
2883int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2884 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2885 bool amsdu)
2886{
2887 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2888 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2889 unsigned int wdg_timeout =
2890 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2891 int queue, ret;
2892 bool alloc_queue = true;
2893 enum iwl_mvm_queue_status queue_status;
2894 u16 ssn;
2895
2896 struct iwl_trans_txq_scd_cfg cfg = {
2897 .sta_id = mvmsta->sta_id,
2898 .tid = tid,
2899 .frame_limit = buf_size,
2900 .aggregate = true,
2901 };
2902
2903
2904
2905
2906
2907 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2908 return -EINVAL;
2909
2910 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2911 != IWL_MAX_TID_COUNT);
2912
2913 spin_lock_bh(&mvmsta->lock);
2914 ssn = tid_data->ssn;
2915 queue = tid_data->txq_id;
2916 tid_data->state = IWL_AGG_ON;
2917 mvmsta->agg_tids |= BIT(tid);
2918 tid_data->ssn = 0xffff;
2919 tid_data->amsdu_in_ampdu_allowed = amsdu;
2920 spin_unlock_bh(&mvmsta->lock);
2921
2922 if (iwl_mvm_has_new_tx_api(mvm)) {
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934 if (buf_size < IWL_FRAME_LIMIT)
2935 return -ENOTSUPP;
2936
2937 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2938 if (ret)
2939 return -EIO;
2940 goto out;
2941 }
2942
2943 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2944
2945 queue_status = mvm->queue_info[queue].status;
2946
2947
2948 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2949 alloc_queue = false;
2950
2951
2952
2953
2954
2955 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2956
2957
2958
2959
2960 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2961 BIT(queue));
2962 if (ret) {
2963 IWL_ERR(mvm,
2964 "Error draining queue before reconfig\n");
2965 return ret;
2966 }
2967
2968 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2969 mvmsta->sta_id, tid,
2970 buf_size, ssn);
2971 if (ret) {
2972 IWL_ERR(mvm,
2973 "Error reconfiguring TXQ #%d\n", queue);
2974 return ret;
2975 }
2976 }
2977
2978 if (alloc_queue)
2979 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2980 &cfg, wdg_timeout);
2981
2982
2983 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2984 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2985 if (ret)
2986 return -EIO;
2987 }
2988
2989
2990 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2991
2992out:
2993
2994
2995
2996
2997
2998
2999
3000 mvmsta->max_agg_bufsize =
3001 min(mvmsta->max_agg_bufsize, buf_size);
3002 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3003
3004 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3005 sta->addr, tid);
3006
3007 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3008}
3009
3010static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3011 struct iwl_mvm_sta *mvmsta,
3012 struct iwl_mvm_tid_data *tid_data)
3013{
3014 u16 txq_id = tid_data->txq_id;
3015
3016 lockdep_assert_held(&mvm->mutex);
3017
3018 if (iwl_mvm_has_new_tx_api(mvm))
3019 return;
3020
3021
3022
3023
3024
3025
3026
3027
3028 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3029 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3030 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3031 }
3032}
3033
3034int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3035 struct ieee80211_sta *sta, u16 tid)
3036{
3037 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3038 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3039 u16 txq_id;
3040 int err;
3041
3042
3043
3044
3045
3046 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3047 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3048 return 0;
3049 }
3050
3051 spin_lock_bh(&mvmsta->lock);
3052
3053 txq_id = tid_data->txq_id;
3054
3055 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3056 mvmsta->sta_id, tid, txq_id, tid_data->state);
3057
3058 mvmsta->agg_tids &= ~BIT(tid);
3059
3060 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3061
3062 switch (tid_data->state) {
3063 case IWL_AGG_ON:
3064 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3065
3066 IWL_DEBUG_TX_QUEUES(mvm,
3067 "ssn = %d, next_recl = %d\n",
3068 tid_data->ssn, tid_data->next_reclaimed);
3069
3070 tid_data->ssn = 0xffff;
3071 tid_data->state = IWL_AGG_OFF;
3072 spin_unlock_bh(&mvmsta->lock);
3073
3074 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3075
3076 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3077 return 0;
3078 case IWL_AGG_STARTING:
3079 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3080
3081
3082
3083
3084
3085
3086 lockdep_assert_held(&mvm->mutex);
3087
3088 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3089 tid_data->state = IWL_AGG_OFF;
3090 err = 0;
3091 break;
3092 default:
3093 IWL_ERR(mvm,
3094 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3095 mvmsta->sta_id, tid, tid_data->state);
3096 IWL_ERR(mvm,
3097 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3098 err = -EINVAL;
3099 }
3100
3101 spin_unlock_bh(&mvmsta->lock);
3102
3103 return err;
3104}
3105
3106int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3107 struct ieee80211_sta *sta, u16 tid)
3108{
3109 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3110 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3111 u16 txq_id;
3112 enum iwl_mvm_agg_state old_state;
3113
3114
3115
3116
3117
3118 spin_lock_bh(&mvmsta->lock);
3119 txq_id = tid_data->txq_id;
3120 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3121 mvmsta->sta_id, tid, txq_id, tid_data->state);
3122 old_state = tid_data->state;
3123 tid_data->state = IWL_AGG_OFF;
3124 mvmsta->agg_tids &= ~BIT(tid);
3125 spin_unlock_bh(&mvmsta->lock);
3126
3127 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3128
3129 if (old_state >= IWL_AGG_ON) {
3130 iwl_mvm_drain_sta(mvm, mvmsta, true);
3131
3132 if (iwl_mvm_has_new_tx_api(mvm)) {
3133 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3134 BIT(tid), 0))
3135 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3136 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3137 } else {
3138 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3139 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3140 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3141 }
3142
3143 iwl_mvm_drain_sta(mvm, mvmsta, false);
3144
3145 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3146 }
3147
3148 return 0;
3149}
3150
3151static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3152{
3153 int i, max = -1, max_offs = -1;
3154
3155 lockdep_assert_held(&mvm->mutex);
3156
3157
3158
3159
3160
3161
3162
3163 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3164 if (test_bit(i, mvm->fw_key_table))
3165 continue;
3166 if (mvm->fw_key_deleted[i] > max) {
3167 max = mvm->fw_key_deleted[i];
3168 max_offs = i;
3169 }
3170 }
3171
3172 if (max_offs < 0)
3173 return STA_KEY_IDX_INVALID;
3174
3175 return max_offs;
3176}
3177
3178static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3179 struct ieee80211_vif *vif,
3180 struct ieee80211_sta *sta)
3181{
3182 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3183
3184 if (sta)
3185 return iwl_mvm_sta_from_mac80211(sta);
3186
3187
3188
3189
3190
3191
3192 if (vif->type == NL80211_IFTYPE_STATION &&
3193 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3194 u8 sta_id = mvmvif->ap_sta_id;
3195
3196 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3197 lockdep_is_held(&mvm->mutex));
3198
3199
3200
3201
3202
3203
3204 if (IS_ERR_OR_NULL(sta))
3205 return NULL;
3206
3207 return iwl_mvm_sta_from_mac80211(sta);
3208 }
3209
3210 return NULL;
3211}
3212
3213static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3214 u32 sta_id,
3215 struct ieee80211_key_conf *key, bool mcast,
3216 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3217 u8 key_offset, bool mfp)
3218{
3219 union {
3220 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3221 struct iwl_mvm_add_sta_key_cmd cmd;
3222 } u = {};
3223 __le16 key_flags;
3224 int ret;
3225 u32 status;
3226 u16 keyidx;
3227 u64 pn = 0;
3228 int i, size;
3229 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3230 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3231
3232 if (sta_id == IWL_MVM_INVALID_STA)
3233 return -EINVAL;
3234
3235 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3236 STA_KEY_FLG_KEYID_MSK;
3237 key_flags = cpu_to_le16(keyidx);
3238 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3239
3240 switch (key->cipher) {
3241 case WLAN_CIPHER_SUITE_TKIP:
3242 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3243 if (new_api) {
3244 memcpy((void *)&u.cmd.tx_mic_key,
3245 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3246 IWL_MIC_KEY_SIZE);
3247
3248 memcpy((void *)&u.cmd.rx_mic_key,
3249 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3250 IWL_MIC_KEY_SIZE);
3251 pn = atomic64_read(&key->tx_pn);
3252
3253 } else {
3254 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3255 for (i = 0; i < 5; i++)
3256 u.cmd_v1.tkip_rx_ttak[i] =
3257 cpu_to_le16(tkip_p1k[i]);
3258 }
3259 memcpy(u.cmd.common.key, key->key, key->keylen);
3260 break;
3261 case WLAN_CIPHER_SUITE_CCMP:
3262 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3263 memcpy(u.cmd.common.key, key->key, key->keylen);
3264 if (new_api)
3265 pn = atomic64_read(&key->tx_pn);
3266 break;
3267 case WLAN_CIPHER_SUITE_WEP104:
3268 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3269
3270 case WLAN_CIPHER_SUITE_WEP40:
3271 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3272 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3273 break;
3274 case WLAN_CIPHER_SUITE_GCMP_256:
3275 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3276
3277 case WLAN_CIPHER_SUITE_GCMP:
3278 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3279 memcpy(u.cmd.common.key, key->key, key->keylen);
3280 if (new_api)
3281 pn = atomic64_read(&key->tx_pn);
3282 break;
3283 default:
3284 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3285 memcpy(u.cmd.common.key, key->key, key->keylen);
3286 }
3287
3288 if (mcast)
3289 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3290 if (mfp)
3291 key_flags |= cpu_to_le16(STA_KEY_MFP);
3292
3293 u.cmd.common.key_offset = key_offset;
3294 u.cmd.common.key_flags = key_flags;
3295 u.cmd.common.sta_id = sta_id;
3296
3297 if (new_api) {
3298 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3299 size = sizeof(u.cmd);
3300 } else {
3301 size = sizeof(u.cmd_v1);
3302 }
3303
3304 status = ADD_STA_SUCCESS;
3305 if (cmd_flags & CMD_ASYNC)
3306 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3307 &u.cmd);
3308 else
3309 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3310 &u.cmd, &status);
3311
3312 switch (status) {
3313 case ADD_STA_SUCCESS:
3314 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3315 break;
3316 default:
3317 ret = -EIO;
3318 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3319 break;
3320 }
3321
3322 return ret;
3323}
3324
3325static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3326 struct ieee80211_key_conf *keyconf,
3327 u8 sta_id, bool remove_key)
3328{
3329 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3330
3331
3332 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3333 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3334 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3335 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3336 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3337 return -EINVAL;
3338
3339 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3340 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3341 return -EINVAL;
3342
3343 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3344 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3345
3346 if (remove_key) {
3347
3348 if (sta_id == IWL_MVM_INVALID_STA)
3349 return 0;
3350
3351 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3352 } else {
3353 struct ieee80211_key_seq seq;
3354 const u8 *pn;
3355
3356 switch (keyconf->cipher) {
3357 case WLAN_CIPHER_SUITE_AES_CMAC:
3358 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3359 break;
3360 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3361 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3362 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3363 break;
3364 default:
3365 return -EINVAL;
3366 }
3367
3368 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3369 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3370 igtk_cmd.ctrl_flags |=
3371 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3372 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3373 pn = seq.aes_cmac.pn;
3374 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3375 ((u64) pn[4] << 8) |
3376 ((u64) pn[3] << 16) |
3377 ((u64) pn[2] << 24) |
3378 ((u64) pn[1] << 32) |
3379 ((u64) pn[0] << 40));
3380 }
3381
3382 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3383 remove_key ? "removing" : "installing",
3384 igtk_cmd.sta_id);
3385
3386 if (!iwl_mvm_has_new_rx_api(mvm)) {
3387 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3388 .ctrl_flags = igtk_cmd.ctrl_flags,
3389 .key_id = igtk_cmd.key_id,
3390 .sta_id = igtk_cmd.sta_id,
3391 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3392 };
3393
3394 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3395 ARRAY_SIZE(igtk_cmd_v1.igtk));
3396 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3397 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3398 }
3399 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3400 sizeof(igtk_cmd), &igtk_cmd);
3401}
3402
3403
3404static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3405 struct ieee80211_vif *vif,
3406 struct ieee80211_sta *sta)
3407{
3408 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3409
3410 if (sta)
3411 return sta->addr;
3412
3413 if (vif->type == NL80211_IFTYPE_STATION &&
3414 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3415 u8 sta_id = mvmvif->ap_sta_id;
3416 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3417 lockdep_is_held(&mvm->mutex));
3418 return sta->addr;
3419 }
3420
3421
3422 return NULL;
3423}
3424
3425static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3426 struct ieee80211_vif *vif,
3427 struct ieee80211_sta *sta,
3428 struct ieee80211_key_conf *keyconf,
3429 u8 key_offset,
3430 bool mcast)
3431{
3432 int ret;
3433 const u8 *addr;
3434 struct ieee80211_key_seq seq;
3435 u16 p1k[5];
3436 u32 sta_id;
3437 bool mfp = false;
3438
3439 if (sta) {
3440 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3441
3442 sta_id = mvm_sta->sta_id;
3443 mfp = sta->mfp;
3444 } else if (vif->type == NL80211_IFTYPE_AP &&
3445 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3446 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3447
3448 sta_id = mvmvif->mcast_sta.sta_id;
3449 } else {
3450 IWL_ERR(mvm, "Failed to find station id\n");
3451 return -EINVAL;
3452 }
3453
3454 switch (keyconf->cipher) {
3455 case WLAN_CIPHER_SUITE_TKIP:
3456 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3457
3458 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3459 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3460 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3461 seq.tkip.iv32, p1k, 0, key_offset,
3462 mfp);
3463 break;
3464 case WLAN_CIPHER_SUITE_CCMP:
3465 case WLAN_CIPHER_SUITE_WEP40:
3466 case WLAN_CIPHER_SUITE_WEP104:
3467 case WLAN_CIPHER_SUITE_GCMP:
3468 case WLAN_CIPHER_SUITE_GCMP_256:
3469 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3470 0, NULL, 0, key_offset, mfp);
3471 break;
3472 default:
3473 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3474 0, NULL, 0, key_offset, mfp);
3475 }
3476
3477 return ret;
3478}
3479
3480int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3481 struct ieee80211_vif *vif,
3482 struct ieee80211_sta *sta,
3483 struct ieee80211_key_conf *keyconf,
3484 u8 key_offset)
3485{
3486 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3487 struct iwl_mvm_sta *mvm_sta;
3488 u8 sta_id = IWL_MVM_INVALID_STA;
3489 int ret;
3490 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3491
3492 lockdep_assert_held(&mvm->mutex);
3493
3494 if (vif->type != NL80211_IFTYPE_AP ||
3495 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3496
3497 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3498 if (!mvm_sta) {
3499 IWL_ERR(mvm, "Failed to find station\n");
3500 return -EINVAL;
3501 }
3502 sta_id = mvm_sta->sta_id;
3503
3504
3505
3506
3507
3508
3509 if (!sta) {
3510 sta = rcu_dereference_protected(
3511 mvm->fw_id_to_mac_id[sta_id],
3512 lockdep_is_held(&mvm->mutex));
3513 if (IS_ERR_OR_NULL(sta)) {
3514 IWL_ERR(mvm, "Invalid station id\n");
3515 return -EINVAL;
3516 }
3517 }
3518
3519 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3520 return -EINVAL;
3521 } else {
3522 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3523
3524 sta_id = mvmvif->mcast_sta.sta_id;
3525 }
3526
3527 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3528 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3529 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3530 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3531 goto end;
3532 }
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545 if (key_offset == STA_KEY_IDX_INVALID) {
3546 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3547 if (key_offset == STA_KEY_IDX_INVALID)
3548 return -ENOSPC;
3549 keyconf->hw_key_idx = key_offset;
3550 }
3551
3552 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3553 if (ret)
3554 goto end;
3555
3556
3557
3558
3559
3560
3561
3562 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3563 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3564 sta) {
3565 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3566 key_offset, !mcast);
3567 if (ret) {
3568 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3569 goto end;
3570 }
3571 }
3572
3573 __set_bit(key_offset, mvm->fw_key_table);
3574
3575end:
3576 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3577 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3578 sta ? sta->addr : zero_addr, ret);
3579 return ret;
3580}
3581
3582int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3583 struct ieee80211_vif *vif,
3584 struct ieee80211_sta *sta,
3585 struct ieee80211_key_conf *keyconf)
3586{
3587 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3588 struct iwl_mvm_sta *mvm_sta;
3589 u8 sta_id = IWL_MVM_INVALID_STA;
3590 int ret, i;
3591
3592 lockdep_assert_held(&mvm->mutex);
3593
3594
3595 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3596 if (mvm_sta)
3597 sta_id = mvm_sta->sta_id;
3598 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3599 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3600
3601
3602 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3603 keyconf->keyidx, sta_id);
3604
3605 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3606 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3607 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3608 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3609
3610 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3611 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3612 keyconf->hw_key_idx);
3613 return -ENOENT;
3614 }
3615
3616
3617 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3618 if (mvm->fw_key_deleted[i] < U8_MAX)
3619 mvm->fw_key_deleted[i]++;
3620 }
3621 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3622
3623 if (sta && !mvm_sta) {
3624 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3625 return 0;
3626 }
3627
3628 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3629 if (ret)
3630 return ret;
3631
3632
3633 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3634 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3635 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3636
3637 return ret;
3638}
3639
3640void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3641 struct ieee80211_vif *vif,
3642 struct ieee80211_key_conf *keyconf,
3643 struct ieee80211_sta *sta, u32 iv32,
3644 u16 *phase1key)
3645{
3646 struct iwl_mvm_sta *mvm_sta;
3647 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3648 bool mfp = sta ? sta->mfp : false;
3649
3650 rcu_read_lock();
3651
3652 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3653 if (WARN_ON_ONCE(!mvm_sta))
3654 goto unlock;
3655 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3656 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3657 mfp);
3658
3659 unlock:
3660 rcu_read_unlock();
3661}
3662
3663void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3664 struct ieee80211_sta *sta)
3665{
3666 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3667 struct iwl_mvm_add_sta_cmd cmd = {
3668 .add_modify = STA_MODE_MODIFY,
3669 .sta_id = mvmsta->sta_id,
3670 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3671 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3672 };
3673 int ret;
3674
3675 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3676 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3677 if (ret)
3678 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3679}
3680
3681void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3682 struct ieee80211_sta *sta,
3683 enum ieee80211_frame_release_type reason,
3684 u16 cnt, u16 tids, bool more_data,
3685 bool single_sta_queue)
3686{
3687 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3688 struct iwl_mvm_add_sta_cmd cmd = {
3689 .add_modify = STA_MODE_MODIFY,
3690 .sta_id = mvmsta->sta_id,
3691 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3692 .sleep_tx_count = cpu_to_le16(cnt),
3693 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3694 };
3695 int tid, ret;
3696 unsigned long _tids = tids;
3697
3698
3699
3700
3701
3702 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3703 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3704
3705
3706
3707
3708
3709
3710
3711
3712 if (single_sta_queue) {
3713 int remaining = cnt;
3714 int sleep_tx_count;
3715
3716 spin_lock_bh(&mvmsta->lock);
3717 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3718 struct iwl_mvm_tid_data *tid_data;
3719 u16 n_queued;
3720
3721 tid_data = &mvmsta->tid_data[tid];
3722
3723 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3724 if (n_queued > remaining) {
3725 more_data = true;
3726 remaining = 0;
3727 break;
3728 }
3729 remaining -= n_queued;
3730 }
3731 sleep_tx_count = cnt - remaining;
3732 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3733 mvmsta->sleep_tx_count = sleep_tx_count;
3734 spin_unlock_bh(&mvmsta->lock);
3735
3736 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3737 if (WARN_ON(cnt - remaining == 0)) {
3738 ieee80211_sta_eosp(sta);
3739 return;
3740 }
3741 }
3742
3743
3744 if (more_data)
3745 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3746
3747 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3748 mvmsta->next_status_eosp = true;
3749 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3750 } else {
3751 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3752 }
3753
3754
3755 iwl_trans_block_txq_ptrs(mvm->trans, true);
3756
3757 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3758 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3759 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3760 if (ret)
3761 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3762}
3763
3764void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3765 struct iwl_rx_cmd_buffer *rxb)
3766{
3767 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3768 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3769 struct ieee80211_sta *sta;
3770 u32 sta_id = le32_to_cpu(notif->sta_id);
3771
3772 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3773 return;
3774
3775 rcu_read_lock();
3776 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3777 if (!IS_ERR_OR_NULL(sta))
3778 ieee80211_sta_eosp(sta);
3779 rcu_read_unlock();
3780}
3781
3782void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3783 struct iwl_mvm_sta *mvmsta, bool disable)
3784{
3785 struct iwl_mvm_add_sta_cmd cmd = {
3786 .add_modify = STA_MODE_MODIFY,
3787 .sta_id = mvmsta->sta_id,
3788 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3789 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3790 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3791 };
3792 int ret;
3793
3794 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3795 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3796 if (ret)
3797 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3798}
3799
3800void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3801 struct ieee80211_sta *sta,
3802 bool disable)
3803{
3804 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3805
3806 spin_lock_bh(&mvm_sta->lock);
3807
3808 if (mvm_sta->disable_tx == disable) {
3809 spin_unlock_bh(&mvm_sta->lock);
3810 return;
3811 }
3812
3813 mvm_sta->disable_tx = disable;
3814
3815
3816 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3817
3818 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3819
3820 spin_unlock_bh(&mvm_sta->lock);
3821}
3822
3823static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3824 struct iwl_mvm_vif *mvmvif,
3825 struct iwl_mvm_int_sta *sta,
3826 bool disable)
3827{
3828 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3829 struct iwl_mvm_add_sta_cmd cmd = {
3830 .add_modify = STA_MODE_MODIFY,
3831 .sta_id = sta->sta_id,
3832 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3833 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3834 .mac_id_n_color = cpu_to_le32(id),
3835 };
3836 int ret;
3837
3838 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3839 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3840 if (ret)
3841 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3842}
3843
3844void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3845 struct iwl_mvm_vif *mvmvif,
3846 bool disable)
3847{
3848 struct ieee80211_sta *sta;
3849 struct iwl_mvm_sta *mvm_sta;
3850 int i;
3851
3852 lockdep_assert_held(&mvm->mutex);
3853
3854
3855 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3856 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3857 lockdep_is_held(&mvm->mutex));
3858 if (IS_ERR_OR_NULL(sta))
3859 continue;
3860
3861 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3862 if (mvm_sta->mac_id_n_color !=
3863 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3864 continue;
3865
3866 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3867 }
3868
3869 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3870 return;
3871
3872
3873 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3874 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3875 &mvmvif->mcast_sta, disable);
3876
3877
3878
3879
3880
3881 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3882 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3883 &mvmvif->bcast_sta, disable);
3884}
3885
3886void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3887{
3888 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3889 struct iwl_mvm_sta *mvmsta;
3890
3891 rcu_read_lock();
3892
3893 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3894
3895 if (!WARN_ON(!mvmsta))
3896 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3897
3898 rcu_read_unlock();
3899}
3900
3901u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3902{
3903 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3904
3905
3906
3907
3908
3909 if (mvm->trans->trans_cfg->gen2)
3910 sn &= 0xff;
3911
3912 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3913}
3914