1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
68#include "rs.h"
69
70static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 u32 sta_id,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
77
78
79
80
81
82
83static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84{
85 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
88 else
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
90}
91
92static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
94{
95 int sta_id;
96 u32 reserved_ids = 0;
97
98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101 lockdep_assert_held(&mvm->mutex);
102
103
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
106
107
108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
109 if (BIT(sta_id) & reserved_ids)
110 continue;
111
112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
114 return sta_id;
115 }
116 return IWL_MVM_INVALID_STA;
117}
118
119
120int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121 bool update, unsigned int flags)
122{
123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
132 };
133 int ret;
134 u32 status;
135 u32 agg_size = 0, mpdu_dens = 0;
136
137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
139
140 if (!update || (flags & STA_MODIFY_QUEUES)) {
141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
142
143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 } else {
150 WARN_ON(flags & STA_MODIFY_QUEUES);
151 }
152 }
153
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 break;
169 }
170
171 switch (sta->rx_nss) {
172 case 1:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 break;
175 case 2:
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 break;
178 case 3 ... 8:
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 break;
181 }
182
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
186 WARN_ON(1);
187 break;
188 case IEEE80211_SMPS_STATIC:
189
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 break;
196 case IEEE80211_SMPS_OFF:
197
198 break;
199 }
200
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
205
206 mpdu_dens = sta->ht_cap.ampdu_density;
207 }
208
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 agg_size >>=
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
216 }
217
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
224
225 if (sta->wme) {
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
238 }
239
240 status = ADD_STA_SUCCESS;
241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
243 &add_sta_cmd, &status);
244 if (ret)
245 return ret;
246
247 switch (status & IWL_ADD_STA_STATUS_MASK) {
248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 break;
251 default:
252 ret = -EIO;
253 IWL_ERR(mvm, "ADD_STA failed\n");
254 break;
255 }
256
257 return ret;
258}
259
260static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
261{
262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
269
270 rcu_read_lock();
271
272 ba_data = rcu_dereference(*rcu_ptr);
273
274 if (WARN_ON(!ba_data))
275 goto unlock;
276
277 if (!ba_data->timeout)
278 goto unlock;
279
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
283 goto unlock;
284 }
285
286
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
288
289
290
291
292
293
294
295
296
297 if (!sta)
298 goto unlock;
299
300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
303unlock:
304 rcu_read_unlock();
305}
306
307
308static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
310 bool remove_queue)
311{
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
315 u32 status;
316 u8 sta_id;
317
318 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
319 return -EINVAL;
320
321 sta_id = mvm->queue_info[queue].ra_sta_id;
322
323 rcu_read_lock();
324
325 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
326
327 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
328 rcu_read_unlock();
329 return -EINVAL;
330 }
331
332 mvmsta = iwl_mvm_sta_from_mac80211(sta);
333
334 mvmsta->tid_disable_agg |= disable_agg_tids;
335
336 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
337 cmd.sta_id = mvmsta->sta_id;
338 cmd.add_modify = STA_MODE_MODIFY;
339 cmd.modify_mask = STA_MODIFY_QUEUES;
340 if (disable_agg_tids)
341 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
342 if (remove_queue)
343 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
344 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
345 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
346
347 rcu_read_unlock();
348
349
350 status = ADD_STA_SUCCESS;
351 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
352 iwl_mvm_add_sta_cmd_size(mvm),
353 &cmd, &status);
354}
355
356static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
357 int queue, u8 tid, u8 flags)
358{
359 struct iwl_scd_txq_cfg_cmd cmd = {
360 .scd_queue = queue,
361 .action = SCD_CFG_DISABLE_QUEUE,
362 };
363 int ret;
364
365 if (iwl_mvm_has_new_tx_api(mvm)) {
366 iwl_trans_txq_free(mvm->trans, queue);
367
368 return 0;
369 }
370
371 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
372 return 0;
373
374 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
375
376 cmd.action = mvm->queue_info[queue].tid_bitmap ?
377 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
378 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
379 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
380
381 IWL_DEBUG_TX_QUEUES(mvm,
382 "Disabling TXQ #%d tids=0x%x\n",
383 queue,
384 mvm->queue_info[queue].tid_bitmap);
385
386
387 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
388 return 0;
389
390 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
391 cmd.tid = mvm->queue_info[queue].txq_tid;
392
393
394 WARN(mvm->queue_info[queue].tid_bitmap,
395 "TXQ #%d info out-of-sync - tids=0x%x\n",
396 queue, mvm->queue_info[queue].tid_bitmap);
397
398
399 mvm->queue_info[queue].tid_bitmap = 0;
400
401 if (sta) {
402 struct iwl_mvm_txq *mvmtxq =
403 iwl_mvm_txq_from_tid(sta, tid);
404
405 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
406 }
407
408
409 mvm->queue_info[queue].reserved = false;
410
411 iwl_trans_txq_disable(mvm->trans, queue, false);
412 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
413 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
414
415 if (ret)
416 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
417 queue, ret);
418 return ret;
419}
420
421static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
422{
423 struct ieee80211_sta *sta;
424 struct iwl_mvm_sta *mvmsta;
425 unsigned long tid_bitmap;
426 unsigned long agg_tids = 0;
427 u8 sta_id;
428 int tid;
429
430 lockdep_assert_held(&mvm->mutex);
431
432 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
433 return -EINVAL;
434
435 sta_id = mvm->queue_info[queue].ra_sta_id;
436 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
437
438 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
439 lockdep_is_held(&mvm->mutex));
440
441 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
442 return -EINVAL;
443
444 mvmsta = iwl_mvm_sta_from_mac80211(sta);
445
446 spin_lock_bh(&mvmsta->lock);
447 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
448 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
449 agg_tids |= BIT(tid);
450 }
451 spin_unlock_bh(&mvmsta->lock);
452
453 return agg_tids;
454}
455
456
457
458
459
460
461static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
462{
463 struct ieee80211_sta *sta;
464 struct iwl_mvm_sta *mvmsta;
465 unsigned long tid_bitmap;
466 unsigned long disable_agg_tids = 0;
467 u8 sta_id;
468 int tid;
469
470 lockdep_assert_held(&mvm->mutex);
471
472 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
473 return -EINVAL;
474
475 sta_id = mvm->queue_info[queue].ra_sta_id;
476 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
477
478 rcu_read_lock();
479
480 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
481
482 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
483 rcu_read_unlock();
484 return 0;
485 }
486
487 mvmsta = iwl_mvm_sta_from_mac80211(sta);
488
489 spin_lock_bh(&mvmsta->lock);
490
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 struct iwl_mvm_txq *mvmtxq =
493 iwl_mvm_txq_from_tid(sta, tid);
494
495 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
496 disable_agg_tids |= BIT(tid);
497 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
498
499 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
500 }
501
502 mvmsta->tfd_queue_msk &= ~BIT(queue);
503 spin_unlock_bh(&mvmsta->lock);
504
505 rcu_read_unlock();
506
507
508
509
510
511
512
513
514
515 synchronize_net();
516
517 return disable_agg_tids;
518}
519
520static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
521 struct ieee80211_sta *old_sta,
522 u8 new_sta_id)
523{
524 struct iwl_mvm_sta *mvmsta;
525 u8 sta_id, tid;
526 unsigned long disable_agg_tids = 0;
527 bool same_sta;
528 int ret;
529
530 lockdep_assert_held(&mvm->mutex);
531
532 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
533 return -EINVAL;
534
535 sta_id = mvm->queue_info[queue].ra_sta_id;
536 tid = mvm->queue_info[queue].txq_tid;
537
538 same_sta = sta_id == new_sta_id;
539
540 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
541 if (WARN_ON(!mvmsta))
542 return -EINVAL;
543
544 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
545
546 if (disable_agg_tids)
547 iwl_mvm_invalidate_sta_queue(mvm, queue,
548 disable_agg_tids, false);
549
550 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
551 if (ret) {
552 IWL_ERR(mvm,
553 "Failed to free inactive queue %d (ret=%d)\n",
554 queue, ret);
555
556 return ret;
557 }
558
559
560 if (!same_sta)
561 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
562
563 return 0;
564}
565
566static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
567 unsigned long tfd_queue_mask, u8 ac)
568{
569 int queue = 0;
570 u8 ac_to_queue[IEEE80211_NUM_ACS];
571 int i;
572
573
574
575
576
577 lockdep_assert_held(&mvm->mutex);
578
579 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
580 return -EINVAL;
581
582 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
583
584
585 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
586
587 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
588 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
589 continue;
590
591 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
592 }
593
594
595
596
597
598
599
600
601
602
603
604 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
605 queue = ac_to_queue[IEEE80211_AC_BE];
606
607 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
608 queue = ac_to_queue[ac];
609
610 else if (ac == IEEE80211_AC_VO &&
611 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
612 queue = ac_to_queue[IEEE80211_AC_VI];
613
614 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
615 queue = ac_to_queue[IEEE80211_AC_BK];
616
617 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
618 queue = ac_to_queue[IEEE80211_AC_VI];
619
620 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
621 queue = ac_to_queue[IEEE80211_AC_VO];
622
623
624 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
625 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
626 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
627 IWL_ERR(mvm, "No DATA queues available to share\n");
628 return -ENOSPC;
629 }
630
631 return queue;
632}
633
634
635
636
637
638
639
640static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
641 int ac, int ssn, unsigned int wdg_timeout,
642 bool force, struct iwl_mvm_txq *txq)
643{
644 struct iwl_scd_txq_cfg_cmd cmd = {
645 .scd_queue = queue,
646 .action = SCD_CFG_DISABLE_QUEUE,
647 };
648 bool shared_queue;
649 int ret;
650
651 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
652 return -EINVAL;
653
654
655
656
657
658
659
660
661
662 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
663 IWL_DEBUG_TX_QUEUES(mvm,
664 "No redirection needed on TXQ #%d\n",
665 queue);
666 return 0;
667 }
668
669 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
670 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
671 cmd.tid = mvm->queue_info[queue].txq_tid;
672 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
673
674 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
675 queue, iwl_mvm_ac_to_tx_fifo[ac]);
676
677
678 txq->stopped = true;
679
680 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
681 if (ret) {
682 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
683 queue);
684 ret = -EIO;
685 goto out;
686 }
687
688
689 iwl_trans_txq_disable(mvm->trans, queue, false);
690 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
691 if (ret)
692 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
693 ret);
694
695
696 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
697
698
699 mvm->queue_info[queue].txq_tid = tid;
700
701
702
703
704 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
705 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
706
707
708 mvm->queue_info[queue].mac80211_ac = ac;
709
710
711
712
713
714
715
716 if (shared_queue)
717 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
718
719out:
720
721 txq->stopped = false;
722
723 return ret;
724}
725
726static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
727 u8 minq, u8 maxq)
728{
729 int i;
730
731 lockdep_assert_held(&mvm->mutex);
732
733
734 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
735 return -ENOSPC;
736
737
738 for (i = minq; i <= maxq; i++)
739 if (mvm->queue_info[i].tid_bitmap == 0 &&
740 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
741 return i;
742
743 return -ENOSPC;
744}
745
746static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
747 u8 sta_id, u8 tid, unsigned int timeout)
748{
749 int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
750 mvm->trans->cfg->min_256_ba_txq_size);
751
752 if (tid == IWL_MAX_TID_COUNT) {
753 tid = IWL_MGMT_TID;
754 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
755 mvm->trans->cfg->min_txq_size);
756 }
757 queue = iwl_trans_txq_alloc(mvm->trans,
758 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
759 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
760
761 if (queue < 0) {
762 IWL_DEBUG_TX_QUEUES(mvm,
763 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
764 sta_id, tid, queue);
765 return queue;
766 }
767
768 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
769 queue, sta_id, tid);
770
771 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
772
773 return queue;
774}
775
776static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
777 struct ieee80211_sta *sta, u8 ac,
778 int tid)
779{
780 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
781 struct iwl_mvm_txq *mvmtxq =
782 iwl_mvm_txq_from_tid(sta, tid);
783 unsigned int wdg_timeout =
784 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
785 int queue = -1;
786
787 lockdep_assert_held(&mvm->mutex);
788
789 IWL_DEBUG_TX_QUEUES(mvm,
790 "Allocating queue for sta %d on tid %d\n",
791 mvmsta->sta_id, tid);
792 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
793 if (queue < 0)
794 return queue;
795
796 mvmtxq->txq_id = queue;
797 mvm->tvqm_info[queue].txq_tid = tid;
798 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
799
800 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
801
802 spin_lock_bh(&mvmsta->lock);
803 mvmsta->tid_data[tid].txq_id = queue;
804 spin_unlock_bh(&mvmsta->lock);
805
806 return 0;
807}
808
809static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
810 struct ieee80211_sta *sta,
811 int queue, u8 sta_id, u8 tid)
812{
813 bool enable_queue = true;
814
815
816 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
817 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
818 queue, tid);
819 return false;
820 }
821
822
823 if (mvm->queue_info[queue].tid_bitmap)
824 enable_queue = false;
825
826 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
827 mvm->queue_info[queue].ra_sta_id = sta_id;
828
829 if (enable_queue) {
830 if (tid != IWL_MAX_TID_COUNT)
831 mvm->queue_info[queue].mac80211_ac =
832 tid_to_mac80211_ac[tid];
833 else
834 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
835
836 mvm->queue_info[queue].txq_tid = tid;
837 }
838
839 if (sta) {
840 struct iwl_mvm_txq *mvmtxq =
841 iwl_mvm_txq_from_tid(sta, tid);
842
843 mvmtxq->txq_id = queue;
844 }
845
846 IWL_DEBUG_TX_QUEUES(mvm,
847 "Enabling TXQ #%d tids=0x%x\n",
848 queue, mvm->queue_info[queue].tid_bitmap);
849
850 return enable_queue;
851}
852
853static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
854 int queue, u16 ssn,
855 const struct iwl_trans_txq_scd_cfg *cfg,
856 unsigned int wdg_timeout)
857{
858 struct iwl_scd_txq_cfg_cmd cmd = {
859 .scd_queue = queue,
860 .action = SCD_CFG_ENABLE_QUEUE,
861 .window = cfg->frame_limit,
862 .sta_id = cfg->sta_id,
863 .ssn = cpu_to_le16(ssn),
864 .tx_fifo = cfg->fifo,
865 .aggregate = cfg->aggregate,
866 .tid = cfg->tid,
867 };
868 bool inc_ssn;
869
870 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
871 return false;
872
873
874 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
875 return false;
876
877 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
878 NULL, wdg_timeout);
879 if (inc_ssn)
880 le16_add_cpu(&cmd.ssn, 1);
881
882 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
883 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
884
885 return inc_ssn;
886}
887
888static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
889{
890 struct iwl_scd_txq_cfg_cmd cmd = {
891 .scd_queue = queue,
892 .action = SCD_CFG_UPDATE_QUEUE_TID,
893 };
894 int tid;
895 unsigned long tid_bitmap;
896 int ret;
897
898 lockdep_assert_held(&mvm->mutex);
899
900 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
901 return;
902
903 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
904
905 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
906 return;
907
908
909 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
910 cmd.tid = tid;
911 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
912
913 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
914 if (ret) {
915 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
916 queue, ret);
917 return;
918 }
919
920 mvm->queue_info[queue].txq_tid = tid;
921 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
922 queue, tid);
923}
924
925static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
926{
927 struct ieee80211_sta *sta;
928 struct iwl_mvm_sta *mvmsta;
929 u8 sta_id;
930 int tid = -1;
931 unsigned long tid_bitmap;
932 unsigned int wdg_timeout;
933 int ssn;
934 int ret = true;
935
936
937 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
938 return;
939
940 lockdep_assert_held(&mvm->mutex);
941
942 sta_id = mvm->queue_info[queue].ra_sta_id;
943 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
944
945
946 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
947 if (tid_bitmap != BIT(tid)) {
948 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
949 queue, tid_bitmap);
950 return;
951 }
952
953 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
954 tid);
955
956 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
957 lockdep_is_held(&mvm->mutex));
958
959 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
960 return;
961
962 mvmsta = iwl_mvm_sta_from_mac80211(sta);
963 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
964
965 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
966
967 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
968 tid_to_mac80211_ac[tid], ssn,
969 wdg_timeout, true,
970 iwl_mvm_txq_from_tid(sta, tid));
971 if (ret) {
972 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
973 return;
974 }
975
976
977 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
978 struct iwl_mvm_add_sta_cmd cmd = {0};
979
980 mvmsta->tid_disable_agg &= ~BIT(tid);
981
982 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
983 cmd.sta_id = mvmsta->sta_id;
984 cmd.add_modify = STA_MODE_MODIFY;
985 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
986 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
987 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
988
989 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
990 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
991 if (!ret) {
992 IWL_DEBUG_TX_QUEUES(mvm,
993 "TXQ #%d is now aggregated again\n",
994 queue);
995
996
997 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
998 }
999 }
1000
1001 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1012 struct iwl_mvm_sta *mvmsta, int queue,
1013 unsigned long tid_bitmap,
1014 unsigned long *unshare_queues,
1015 unsigned long *changetid_queues)
1016{
1017 int tid;
1018
1019 lockdep_assert_held(&mvmsta->lock);
1020 lockdep_assert_held(&mvm->mutex);
1021
1022 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1023 return false;
1024
1025
1026 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1027
1028 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1029 tid_bitmap &= ~BIT(tid);
1030
1031
1032 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1033 tid_bitmap &= ~BIT(tid);
1034 }
1035
1036
1037 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1038 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1039 return true;
1040 }
1041
1042
1043
1044
1045
1046 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1047 u16 tid_bitmap;
1048
1049 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1050 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1051
1052 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1066 set_bit(queue, changetid_queues);
1067
1068 IWL_DEBUG_TX_QUEUES(mvm,
1069 "Removing inactive TID %d from shared Q:%d\n",
1070 tid, queue);
1071 }
1072
1073 IWL_DEBUG_TX_QUEUES(mvm,
1074 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1075 mvm->queue_info[queue].tid_bitmap);
1076
1077
1078
1079
1080
1081 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1082
1083
1084 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1085 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1086 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1087 queue);
1088 set_bit(queue, unshare_queues);
1089 }
1090
1091 return false;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1104{
1105 unsigned long now = jiffies;
1106 unsigned long unshare_queues = 0;
1107 unsigned long changetid_queues = 0;
1108 int i, ret, free_queue = -ENOSPC;
1109 struct ieee80211_sta *queue_owner = NULL;
1110
1111 lockdep_assert_held(&mvm->mutex);
1112
1113 if (iwl_mvm_has_new_tx_api(mvm))
1114 return -ENOSPC;
1115
1116 rcu_read_lock();
1117
1118
1119 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1120
1121 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1122 struct ieee80211_sta *sta;
1123 struct iwl_mvm_sta *mvmsta;
1124 u8 sta_id;
1125 int tid;
1126 unsigned long inactive_tid_bitmap = 0;
1127 unsigned long queue_tid_bitmap;
1128
1129 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1130 if (!queue_tid_bitmap)
1131 continue;
1132
1133
1134 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1135 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1136 continue;
1137
1138
1139 for_each_set_bit(tid, &queue_tid_bitmap,
1140 IWL_MAX_TID_COUNT + 1) {
1141 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1142 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1143 continue;
1144
1145 inactive_tid_bitmap |= BIT(tid);
1146 }
1147
1148
1149 if (!inactive_tid_bitmap)
1150 continue;
1151
1152
1153
1154
1155
1156
1157 sta_id = mvm->queue_info[i].ra_sta_id;
1158 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1159
1160
1161
1162
1163
1164
1165 if (IS_ERR_OR_NULL(sta))
1166 continue;
1167
1168 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1169
1170 spin_lock_bh(&mvmsta->lock);
1171 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1172 inactive_tid_bitmap,
1173 &unshare_queues,
1174 &changetid_queues);
1175 if (ret >= 0 && free_queue < 0) {
1176 queue_owner = sta;
1177 free_queue = ret;
1178 }
1179
1180 spin_unlock_bh(&mvmsta->lock);
1181 }
1182
1183
1184
1185 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1186 iwl_mvm_unshare_queue(mvm, i);
1187 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1188 iwl_mvm_change_queue_tid(mvm, i);
1189
1190 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1191 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1192 alloc_for_sta);
1193 if (ret) {
1194 rcu_read_unlock();
1195 return ret;
1196 }
1197 }
1198
1199 rcu_read_unlock();
1200
1201 return free_queue;
1202}
1203
1204static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1205 struct ieee80211_sta *sta, u8 ac, int tid)
1206{
1207 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1208 struct iwl_trans_txq_scd_cfg cfg = {
1209 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1210 .sta_id = mvmsta->sta_id,
1211 .tid = tid,
1212 .frame_limit = IWL_FRAME_LIMIT,
1213 };
1214 unsigned int wdg_timeout =
1215 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1216 int queue = -1;
1217 unsigned long disable_agg_tids = 0;
1218 enum iwl_mvm_agg_state queue_state;
1219 bool shared_queue = false, inc_ssn;
1220 int ssn;
1221 unsigned long tfd_queue_mask;
1222 int ret;
1223
1224 lockdep_assert_held(&mvm->mutex);
1225
1226 if (iwl_mvm_has_new_tx_api(mvm))
1227 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1228
1229 spin_lock_bh(&mvmsta->lock);
1230 tfd_queue_mask = mvmsta->tfd_queue_msk;
1231 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1232 spin_unlock_bh(&mvmsta->lock);
1233
1234 if (tid == IWL_MAX_TID_COUNT) {
1235 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1236 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1237 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1238 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1239 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1240 queue);
1241
1242
1243 }
1244
1245 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1246 (mvm->queue_info[mvmsta->reserved_queue].status ==
1247 IWL_MVM_QUEUE_RESERVED)) {
1248 queue = mvmsta->reserved_queue;
1249 mvm->queue_info[queue].reserved = true;
1250 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1251 }
1252
1253 if (queue < 0)
1254 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1255 IWL_MVM_DQA_MIN_DATA_QUEUE,
1256 IWL_MVM_DQA_MAX_DATA_QUEUE);
1257 if (queue < 0) {
1258
1259 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1260 }
1261
1262
1263 if (queue <= 0) {
1264 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1265 if (queue > 0) {
1266 shared_queue = true;
1267 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1268 }
1269 }
1270
1271
1272
1273
1274
1275
1276
1277 if (queue > 0 && !shared_queue)
1278 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1279
1280
1281 if (WARN_ON(queue <= 0)) {
1282 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1283 tid, cfg.sta_id);
1284 return queue;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1294 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1295
1296 IWL_DEBUG_TX_QUEUES(mvm,
1297 "Allocating %squeue #%d to sta %d on tid %d\n",
1298 shared_queue ? "shared " : "", queue,
1299 mvmsta->sta_id, tid);
1300
1301 if (shared_queue) {
1302
1303 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1304
1305 if (disable_agg_tids) {
1306 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1307 queue);
1308 iwl_mvm_invalidate_sta_queue(mvm, queue,
1309 disable_agg_tids, false);
1310 }
1311 }
1312
1313 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1314
1315
1316
1317
1318
1319
1320
1321 if (shared_queue)
1322 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1323
1324 spin_lock_bh(&mvmsta->lock);
1325
1326
1327
1328
1329
1330 if (inc_ssn) {
1331 mvmsta->tid_data[tid].seq_number += 0x10;
1332 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1333 }
1334 mvmsta->tid_data[tid].txq_id = queue;
1335 mvmsta->tfd_queue_msk |= BIT(queue);
1336 queue_state = mvmsta->tid_data[tid].state;
1337
1338 if (mvmsta->reserved_queue == queue)
1339 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1340 spin_unlock_bh(&mvmsta->lock);
1341
1342 if (!shared_queue) {
1343 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1344 if (ret)
1345 goto out_err;
1346
1347
1348 if (queue_state == IWL_AGG_ON) {
1349 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1350 if (ret)
1351 goto out_err;
1352 }
1353 } else {
1354
1355 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1356 wdg_timeout, false,
1357 iwl_mvm_txq_from_tid(sta, tid));
1358 if (ret)
1359 goto out_err;
1360 }
1361
1362 return 0;
1363
1364out_err:
1365 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
1366
1367 return ret;
1368}
1369
1370static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1371{
1372 if (tid == IWL_MAX_TID_COUNT)
1373 return IEEE80211_AC_VO;
1374
1375 return tid_to_mac80211_ac[tid];
1376}
1377
1378void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1379{
1380 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1381 add_stream_wk);
1382
1383 mutex_lock(&mvm->mutex);
1384
1385 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1386
1387 while (!list_empty(&mvm->add_stream_txqs)) {
1388 struct iwl_mvm_txq *mvmtxq;
1389 struct ieee80211_txq *txq;
1390 u8 tid;
1391
1392 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1393 struct iwl_mvm_txq, list);
1394
1395 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1396 drv_priv);
1397 tid = txq->tid;
1398 if (tid == IEEE80211_NUM_TIDS)
1399 tid = IWL_MAX_TID_COUNT;
1400
1401 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
1402 list_del_init(&mvmtxq->list);
1403 local_bh_disable();
1404 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1405 local_bh_enable();
1406 }
1407
1408 mutex_unlock(&mvm->mutex);
1409}
1410
1411static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1412 struct ieee80211_sta *sta,
1413 enum nl80211_iftype vif_type)
1414{
1415 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1416 int queue;
1417
1418
1419 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1420 return 0;
1421
1422
1423 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1424
1425
1426 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1427 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1428 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1429 IWL_MVM_QUEUE_FREE))
1430 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1431 else
1432 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1433 IWL_MVM_DQA_MIN_DATA_QUEUE,
1434 IWL_MVM_DQA_MAX_DATA_QUEUE);
1435 if (queue < 0) {
1436
1437 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1438 if (queue < 0) {
1439 IWL_ERR(mvm, "No available queues for new station\n");
1440 return -ENOSPC;
1441 }
1442 }
1443 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1444
1445 mvmsta->reserved_queue = queue;
1446
1447 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1448 queue, mvmsta->sta_id);
1449
1450 return 0;
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1461 struct ieee80211_sta *sta)
1462{
1463 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1464 unsigned int wdg =
1465 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1466 int i;
1467 struct iwl_trans_txq_scd_cfg cfg = {
1468 .sta_id = mvm_sta->sta_id,
1469 .frame_limit = IWL_FRAME_LIMIT,
1470 };
1471
1472
1473 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1474 mvm->queue_info[mvm_sta->reserved_queue].status =
1475 IWL_MVM_QUEUE_RESERVED;
1476
1477 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1478 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1479 int txq_id = tid_data->txq_id;
1480 int ac;
1481
1482 if (txq_id == IWL_MVM_INVALID_QUEUE)
1483 continue;
1484
1485 ac = tid_to_mac80211_ac[i];
1486
1487 if (iwl_mvm_has_new_tx_api(mvm)) {
1488 IWL_DEBUG_TX_QUEUES(mvm,
1489 "Re-mapping sta %d tid %d\n",
1490 mvm_sta->sta_id, i);
1491 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1492 i, wdg);
1493 tid_data->txq_id = txq_id;
1494
1495
1496
1497
1498
1499
1500
1501 tid_data->seq_number = 0;
1502 } else {
1503 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1504
1505 cfg.tid = i;
1506 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1507 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1508 txq_id ==
1509 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1510
1511 IWL_DEBUG_TX_QUEUES(mvm,
1512 "Re-mapping sta %d tid %d to queue %d\n",
1513 mvm_sta->sta_id, i, txq_id);
1514
1515 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1516 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1517 }
1518 }
1519}
1520
1521static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1522 struct iwl_mvm_int_sta *sta,
1523 const u8 *addr,
1524 u16 mac_id, u16 color)
1525{
1526 struct iwl_mvm_add_sta_cmd cmd;
1527 int ret;
1528 u32 status = ADD_STA_SUCCESS;
1529
1530 lockdep_assert_held(&mvm->mutex);
1531
1532 memset(&cmd, 0, sizeof(cmd));
1533 cmd.sta_id = sta->sta_id;
1534 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1535 color));
1536 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1537 cmd.station_type = sta->type;
1538
1539 if (!iwl_mvm_has_new_tx_api(mvm))
1540 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1541 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1542
1543 if (addr)
1544 memcpy(cmd.addr, addr, ETH_ALEN);
1545
1546 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1547 iwl_mvm_add_sta_cmd_size(mvm),
1548 &cmd, &status);
1549 if (ret)
1550 return ret;
1551
1552 switch (status & IWL_ADD_STA_STATUS_MASK) {
1553 case ADD_STA_SUCCESS:
1554 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1555 return 0;
1556 default:
1557 ret = -EIO;
1558 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1559 status);
1560 break;
1561 }
1562 return ret;
1563}
1564
1565int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1566 struct ieee80211_vif *vif,
1567 struct ieee80211_sta *sta)
1568{
1569 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1570 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1571 struct iwl_mvm_rxq_dup_data *dup_data;
1572 int i, ret, sta_id;
1573 bool sta_update = false;
1574 unsigned int sta_flags = 0;
1575
1576 lockdep_assert_held(&mvm->mutex);
1577
1578 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1579 sta_id = iwl_mvm_find_free_sta_id(mvm,
1580 ieee80211_vif_type_p2p(vif));
1581 else
1582 sta_id = mvm_sta->sta_id;
1583
1584 if (sta_id == IWL_MVM_INVALID_STA)
1585 return -ENOSPC;
1586
1587 spin_lock_init(&mvm_sta->lock);
1588
1589
1590 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1591 struct iwl_mvm_int_sta tmp_sta = {
1592 .sta_id = sta_id,
1593 .type = mvm_sta->sta_type,
1594 };
1595
1596
1597
1598
1599
1600 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1601 mvmvif->id, mvmvif->color);
1602 if (ret)
1603 goto err;
1604
1605 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1606 sta_update = true;
1607 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1608 goto update_fw;
1609 }
1610
1611 mvm_sta->sta_id = sta_id;
1612 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1613 mvmvif->color);
1614 mvm_sta->vif = vif;
1615 if (!mvm->trans->cfg->gen2)
1616 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1617 else
1618 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1619 mvm_sta->tx_protection = 0;
1620 mvm_sta->tt_tx_protection = false;
1621 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1622
1623
1624 mvm_sta->tid_disable_agg = 0xffff;
1625 mvm_sta->tfd_queue_msk = 0;
1626
1627
1628 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1629 u16 seq = mvm_sta->tid_data[i].seq_number;
1630 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1631 mvm_sta->tid_data[i].seq_number = seq;
1632
1633
1634
1635
1636
1637 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1638 }
1639
1640 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1641 struct iwl_mvm_txq *mvmtxq =
1642 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1643
1644 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1645 INIT_LIST_HEAD(&mvmtxq->list);
1646 atomic_set(&mvmtxq->tx_request, 0);
1647 }
1648
1649 mvm_sta->agg_tids = 0;
1650
1651 if (iwl_mvm_has_new_rx_api(mvm) &&
1652 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1653 int q;
1654
1655 dup_data = kcalloc(mvm->trans->num_rx_queues,
1656 sizeof(*dup_data), GFP_KERNEL);
1657 if (!dup_data)
1658 return -ENOMEM;
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1669 memset(dup_data[q].last_seq, 0xff,
1670 sizeof(dup_data[q].last_seq));
1671 mvm_sta->dup_data = dup_data;
1672 }
1673
1674 if (!iwl_mvm_has_new_tx_api(mvm)) {
1675 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1676 ieee80211_vif_type_p2p(vif));
1677 if (ret)
1678 goto err;
1679 }
1680
1681
1682
1683
1684
1685 if (iwl_mvm_has_tlc_offload(mvm))
1686 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1687 else
1688 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1689
1690 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1691
1692update_fw:
1693 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1694 if (ret)
1695 goto err;
1696
1697 if (vif->type == NL80211_IFTYPE_STATION) {
1698 if (!sta->tdls) {
1699 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1700 mvmvif->ap_sta_id = sta_id;
1701 } else {
1702 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1703 }
1704 }
1705
1706 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1707
1708 return 0;
1709
1710err:
1711 return ret;
1712}
1713
1714int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1715 bool drain)
1716{
1717 struct iwl_mvm_add_sta_cmd cmd = {};
1718 int ret;
1719 u32 status;
1720
1721 lockdep_assert_held(&mvm->mutex);
1722
1723 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1724 cmd.sta_id = mvmsta->sta_id;
1725 cmd.add_modify = STA_MODE_MODIFY;
1726 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1727 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1728
1729 status = ADD_STA_SUCCESS;
1730 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1731 iwl_mvm_add_sta_cmd_size(mvm),
1732 &cmd, &status);
1733 if (ret)
1734 return ret;
1735
1736 switch (status & IWL_ADD_STA_STATUS_MASK) {
1737 case ADD_STA_SUCCESS:
1738 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1739 mvmsta->sta_id);
1740 break;
1741 default:
1742 ret = -EIO;
1743 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1744 mvmsta->sta_id);
1745 break;
1746 }
1747
1748 return ret;
1749}
1750
1751
1752
1753
1754
1755
1756static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1757{
1758 struct ieee80211_sta *sta;
1759 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1760 .sta_id = sta_id,
1761 };
1762 int ret;
1763
1764 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1765 lockdep_is_held(&mvm->mutex));
1766
1767
1768 if (!sta) {
1769 IWL_ERR(mvm, "Invalid station id\n");
1770 return -EINVAL;
1771 }
1772
1773 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1774 sizeof(rm_sta_cmd), &rm_sta_cmd);
1775 if (ret) {
1776 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1777 return ret;
1778 }
1779
1780 return 0;
1781}
1782
1783static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1784 struct ieee80211_vif *vif,
1785 struct ieee80211_sta *sta)
1786{
1787 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1788 int i;
1789
1790 lockdep_assert_held(&mvm->mutex);
1791
1792 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1793 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1794 continue;
1795
1796 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1797 0);
1798 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1799 }
1800
1801 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1802 struct iwl_mvm_txq *mvmtxq =
1803 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1804
1805 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1806 }
1807}
1808
1809int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1810 struct iwl_mvm_sta *mvm_sta)
1811{
1812 int i;
1813
1814 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1815 u16 txq_id;
1816 int ret;
1817
1818 spin_lock_bh(&mvm_sta->lock);
1819 txq_id = mvm_sta->tid_data[i].txq_id;
1820 spin_unlock_bh(&mvm_sta->lock);
1821
1822 if (txq_id == IWL_MVM_INVALID_QUEUE)
1823 continue;
1824
1825 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1826 if (ret)
1827 return ret;
1828 }
1829
1830 return 0;
1831}
1832
1833int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1834 struct ieee80211_vif *vif,
1835 struct ieee80211_sta *sta)
1836{
1837 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1838 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1839 u8 sta_id = mvm_sta->sta_id;
1840 int ret;
1841
1842 lockdep_assert_held(&mvm->mutex);
1843
1844 if (iwl_mvm_has_new_rx_api(mvm))
1845 kfree(mvm_sta->dup_data);
1846
1847 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1848 if (ret)
1849 return ret;
1850
1851
1852 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1853 if (ret)
1854 return ret;
1855 if (iwl_mvm_has_new_tx_api(mvm)) {
1856 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1857 } else {
1858 u32 q_mask = mvm_sta->tfd_queue_msk;
1859
1860 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1861 q_mask);
1862 }
1863 if (ret)
1864 return ret;
1865
1866 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1867
1868 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1869
1870
1871 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1872 u8 reserved_txq = mvm_sta->reserved_queue;
1873 enum iwl_mvm_queue_status *status;
1874
1875
1876
1877
1878
1879
1880 status = &mvm->queue_info[reserved_txq].status;
1881 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1882 (*status != IWL_MVM_QUEUE_FREE),
1883 "sta_id %d reserved txq %d status %d",
1884 sta_id, reserved_txq, *status))
1885 return -EINVAL;
1886
1887 *status = IWL_MVM_QUEUE_FREE;
1888 }
1889
1890 if (vif->type == NL80211_IFTYPE_STATION &&
1891 mvmvif->ap_sta_id == sta_id) {
1892
1893 if (vif->bss_conf.assoc)
1894 return ret;
1895
1896
1897 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1898
1899
1900 if (mvm->d0i3_ap_sta_id == sta_id)
1901 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1902 }
1903
1904
1905
1906
1907
1908 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1909 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1910 cancel_delayed_work(&mvm->tdls_cs.dwork);
1911 }
1912
1913
1914
1915
1916
1917 spin_lock_bh(&mvm_sta->lock);
1918 spin_unlock_bh(&mvm_sta->lock);
1919
1920 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1921 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1922
1923 return ret;
1924}
1925
1926int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1927 struct ieee80211_vif *vif,
1928 u8 sta_id)
1929{
1930 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1931
1932 lockdep_assert_held(&mvm->mutex);
1933
1934 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1935 return ret;
1936}
1937
1938int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1939 struct iwl_mvm_int_sta *sta,
1940 u32 qmask, enum nl80211_iftype iftype,
1941 enum iwl_sta_type type)
1942{
1943 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1944 sta->sta_id == IWL_MVM_INVALID_STA) {
1945 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1946 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1947 return -ENOSPC;
1948 }
1949
1950 sta->tfd_queue_msk = qmask;
1951 sta->type = type;
1952
1953
1954 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1955 return 0;
1956}
1957
1958void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1959{
1960 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1961 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1962 sta->sta_id = IWL_MVM_INVALID_STA;
1963}
1964
1965static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1966 u8 sta_id, u8 fifo)
1967{
1968 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1969 mvm->cfg->base_params->wd_timeout :
1970 IWL_WATCHDOG_DISABLED;
1971
1972 if (iwl_mvm_has_new_tx_api(mvm)) {
1973 int tvqm_queue =
1974 iwl_mvm_tvqm_enable_txq(mvm, sta_id,
1975 IWL_MAX_TID_COUNT,
1976 wdg_timeout);
1977 *queue = tvqm_queue;
1978 } else {
1979 struct iwl_trans_txq_scd_cfg cfg = {
1980 .fifo = fifo,
1981 .sta_id = sta_id,
1982 .tid = IWL_MAX_TID_COUNT,
1983 .aggregate = false,
1984 .frame_limit = IWL_FRAME_LIMIT,
1985 };
1986
1987 iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
1988 }
1989}
1990
1991int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1992{
1993 int ret;
1994
1995 lockdep_assert_held(&mvm->mutex);
1996
1997
1998 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1999 NL80211_IFTYPE_UNSPECIFIED,
2000 IWL_STA_AUX_ACTIVITY);
2001 if (ret)
2002 return ret;
2003
2004
2005 if (!iwl_mvm_has_new_tx_api(mvm))
2006 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2007 mvm->aux_sta.sta_id,
2008 IWL_MVM_TX_FIFO_MCAST);
2009
2010 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2011 MAC_INDEX_AUX, 0);
2012 if (ret) {
2013 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2014 return ret;
2015 }
2016
2017
2018
2019
2020
2021 if (iwl_mvm_has_new_tx_api(mvm))
2022 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2023 mvm->aux_sta.sta_id,
2024 IWL_MVM_TX_FIFO_MCAST);
2025
2026 return 0;
2027}
2028
2029int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2030{
2031 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2032 int ret;
2033
2034 lockdep_assert_held(&mvm->mutex);
2035
2036
2037 if (!iwl_mvm_has_new_tx_api(mvm))
2038 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2039 mvm->snif_sta.sta_id,
2040 IWL_MVM_TX_FIFO_BE);
2041
2042 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2043 mvmvif->id, 0);
2044 if (ret)
2045 return ret;
2046
2047
2048
2049
2050
2051 if (iwl_mvm_has_new_tx_api(mvm))
2052 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2053 mvm->snif_sta.sta_id,
2054 IWL_MVM_TX_FIFO_BE);
2055
2056 return 0;
2057}
2058
2059int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2060{
2061 int ret;
2062
2063 lockdep_assert_held(&mvm->mutex);
2064
2065 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2066 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2067 if (ret)
2068 IWL_WARN(mvm, "Failed sending remove station\n");
2069
2070 return ret;
2071}
2072
2073void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2074{
2075 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2076}
2077
2078void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2079{
2080 lockdep_assert_held(&mvm->mutex);
2081
2082 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2094{
2095 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2096 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2097 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2098 const u8 *baddr = _baddr;
2099 int queue;
2100 int ret;
2101 unsigned int wdg_timeout =
2102 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2103 struct iwl_trans_txq_scd_cfg cfg = {
2104 .fifo = IWL_MVM_TX_FIFO_VO,
2105 .sta_id = mvmvif->bcast_sta.sta_id,
2106 .tid = IWL_MAX_TID_COUNT,
2107 .aggregate = false,
2108 .frame_limit = IWL_FRAME_LIMIT,
2109 };
2110
2111 lockdep_assert_held(&mvm->mutex);
2112
2113 if (!iwl_mvm_has_new_tx_api(mvm)) {
2114 if (vif->type == NL80211_IFTYPE_AP ||
2115 vif->type == NL80211_IFTYPE_ADHOC) {
2116 queue = mvm->probe_queue;
2117 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2118 queue = mvm->p2p_dev_queue;
2119 } else {
2120 WARN(1, "Missing required TXQ for adding bcast STA\n");
2121 return -EINVAL;
2122 }
2123
2124 bsta->tfd_queue_msk |= BIT(queue);
2125
2126 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2127 }
2128
2129 if (vif->type == NL80211_IFTYPE_ADHOC)
2130 baddr = vif->bss_conf.bssid;
2131
2132 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2133 return -ENOSPC;
2134
2135 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2136 mvmvif->id, mvmvif->color);
2137 if (ret)
2138 return ret;
2139
2140
2141
2142
2143
2144 if (iwl_mvm_has_new_tx_api(mvm)) {
2145 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2146 IWL_MAX_TID_COUNT,
2147 wdg_timeout);
2148
2149 if (vif->type == NL80211_IFTYPE_AP ||
2150 vif->type == NL80211_IFTYPE_ADHOC)
2151 mvm->probe_queue = queue;
2152 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2153 mvm->p2p_dev_queue = queue;
2154 }
2155
2156 return 0;
2157}
2158
2159static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2160 struct ieee80211_vif *vif)
2161{
2162 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2163 int queue;
2164
2165 lockdep_assert_held(&mvm->mutex);
2166
2167 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2168
2169 switch (vif->type) {
2170 case NL80211_IFTYPE_AP:
2171 case NL80211_IFTYPE_ADHOC:
2172 queue = mvm->probe_queue;
2173 break;
2174 case NL80211_IFTYPE_P2P_DEVICE:
2175 queue = mvm->p2p_dev_queue;
2176 break;
2177 default:
2178 WARN(1, "Can't free bcast queue on vif type %d\n",
2179 vif->type);
2180 return;
2181 }
2182
2183 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
2184 if (iwl_mvm_has_new_tx_api(mvm))
2185 return;
2186
2187 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2188 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2189}
2190
2191
2192
2193int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2194{
2195 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2196 int ret;
2197
2198 lockdep_assert_held(&mvm->mutex);
2199
2200 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2201
2202 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2203 if (ret)
2204 IWL_WARN(mvm, "Failed sending remove station\n");
2205 return ret;
2206}
2207
2208int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2209{
2210 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2211
2212 lockdep_assert_held(&mvm->mutex);
2213
2214 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2215 ieee80211_vif_type_p2p(vif),
2216 IWL_STA_GENERAL_PURPOSE);
2217}
2218
2219
2220
2221
2222
2223
2224
2225
2226int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2227{
2228 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2229 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2230 int ret;
2231
2232 lockdep_assert_held(&mvm->mutex);
2233
2234 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2235 if (ret)
2236 return ret;
2237
2238 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2239
2240 if (ret)
2241 iwl_mvm_dealloc_int_sta(mvm, bsta);
2242
2243 return ret;
2244}
2245
2246void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2247{
2248 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2249
2250 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2251}
2252
2253
2254
2255
2256
2257int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2258{
2259 int ret;
2260
2261 lockdep_assert_held(&mvm->mutex);
2262
2263 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2264
2265 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2266
2267 return ret;
2268}
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2279{
2280 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2281 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2282 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2283 const u8 *maddr = _maddr;
2284 struct iwl_trans_txq_scd_cfg cfg = {
2285 .fifo = vif->type == NL80211_IFTYPE_AP ?
2286 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2287 .sta_id = msta->sta_id,
2288 .tid = 0,
2289 .aggregate = false,
2290 .frame_limit = IWL_FRAME_LIMIT,
2291 };
2292 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2293 int ret;
2294
2295 lockdep_assert_held(&mvm->mutex);
2296
2297 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2298 vif->type != NL80211_IFTYPE_ADHOC))
2299 return -ENOTSUPP;
2300
2301
2302
2303
2304
2305
2306
2307 if (vif->type == NL80211_IFTYPE_ADHOC)
2308 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2309
2310
2311
2312
2313
2314 if (!iwl_mvm_has_new_tx_api(mvm) &&
2315 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2316 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2317 timeout);
2318 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2319 }
2320 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2321 mvmvif->id, mvmvif->color);
2322 if (ret) {
2323 iwl_mvm_dealloc_int_sta(mvm, msta);
2324 return ret;
2325 }
2326
2327
2328
2329
2330
2331
2332
2333
2334 if (iwl_mvm_has_new_tx_api(mvm)) {
2335 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2336 0,
2337 timeout);
2338 mvmvif->cab_queue = queue;
2339 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2340 IWL_UCODE_TLV_API_STA_TYPE))
2341 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2342 timeout);
2343
2344 return 0;
2345}
2346
2347static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2348 struct ieee80211_key_conf *keyconf,
2349 bool mcast)
2350{
2351 union {
2352 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2353 struct iwl_mvm_add_sta_key_cmd cmd;
2354 } u = {};
2355 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2356 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2357 __le16 key_flags;
2358 int ret, size;
2359 u32 status;
2360
2361
2362 if (sta_id == IWL_MVM_INVALID_STA)
2363 return 0;
2364
2365 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2366 STA_KEY_FLG_KEYID_MSK);
2367 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2368 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2369
2370 if (mcast)
2371 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2372
2373
2374
2375
2376
2377 u.cmd.common.key_flags = key_flags;
2378 u.cmd.common.key_offset = keyconf->hw_key_idx;
2379 u.cmd.common.sta_id = sta_id;
2380
2381 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2382
2383 status = ADD_STA_SUCCESS;
2384 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2385 &status);
2386
2387 switch (status) {
2388 case ADD_STA_SUCCESS:
2389 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2390 break;
2391 default:
2392 ret = -EIO;
2393 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2394 break;
2395 }
2396
2397 return ret;
2398}
2399
2400
2401
2402
2403
2404int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2405{
2406 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2407 int ret;
2408
2409 lockdep_assert_held(&mvm->mutex);
2410
2411 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2412
2413 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2414
2415 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2416 if (ret)
2417 IWL_WARN(mvm, "Failed sending remove station\n");
2418
2419 return ret;
2420}
2421
2422#define IWL_MAX_RX_BA_SESSIONS 16
2423
2424static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2425{
2426 struct iwl_mvm_rss_sync_notif notif = {
2427 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2428 .metadata.sync = 1,
2429 .delba.baid = baid,
2430 };
2431 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2432};
2433
2434static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2435 struct iwl_mvm_baid_data *data)
2436{
2437 int i;
2438
2439 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2440
2441 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2442 int j;
2443 struct iwl_mvm_reorder_buffer *reorder_buf =
2444 &data->reorder_buf[i];
2445 struct iwl_mvm_reorder_buf_entry *entries =
2446 &data->entries[i * data->entries_per_queue];
2447
2448 spin_lock_bh(&reorder_buf->lock);
2449 if (likely(!reorder_buf->num_stored)) {
2450 spin_unlock_bh(&reorder_buf->lock);
2451 continue;
2452 }
2453
2454
2455
2456
2457
2458
2459 WARN_ON(1);
2460
2461 for (j = 0; j < reorder_buf->buf_size; j++)
2462 __skb_queue_purge(&entries[j].e.frames);
2463
2464
2465
2466
2467
2468
2469
2470
2471 reorder_buf->removed = true;
2472 spin_unlock_bh(&reorder_buf->lock);
2473 del_timer_sync(&reorder_buf->reorder_timer);
2474 }
2475}
2476
2477static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2478 struct iwl_mvm_baid_data *data,
2479 u16 ssn, u16 buf_size)
2480{
2481 int i;
2482
2483 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2484 struct iwl_mvm_reorder_buffer *reorder_buf =
2485 &data->reorder_buf[i];
2486 struct iwl_mvm_reorder_buf_entry *entries =
2487 &data->entries[i * data->entries_per_queue];
2488 int j;
2489
2490 reorder_buf->num_stored = 0;
2491 reorder_buf->head_sn = ssn;
2492 reorder_buf->buf_size = buf_size;
2493
2494 timer_setup(&reorder_buf->reorder_timer,
2495 iwl_mvm_reorder_timer_expired, 0);
2496 spin_lock_init(&reorder_buf->lock);
2497 reorder_buf->mvm = mvm;
2498 reorder_buf->queue = i;
2499 reorder_buf->valid = false;
2500 for (j = 0; j < reorder_buf->buf_size; j++)
2501 __skb_queue_head_init(&entries[j].e.frames);
2502 }
2503}
2504
2505int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2506 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2507{
2508 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2509 struct iwl_mvm_add_sta_cmd cmd = {};
2510 struct iwl_mvm_baid_data *baid_data = NULL;
2511 int ret;
2512 u32 status;
2513
2514 lockdep_assert_held(&mvm->mutex);
2515
2516 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2517 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2518 return -ENOSPC;
2519 }
2520
2521 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2522 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2523
2524
2525#ifndef __CHECKER__
2526
2527
2528
2529
2530
2531
2532 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2533 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2534#endif
2535
2536
2537
2538
2539
2540
2541 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2542
2543
2544
2545
2546
2547 baid_data = kzalloc(sizeof(*baid_data) +
2548 mvm->trans->num_rx_queues *
2549 reorder_buf_size,
2550 GFP_KERNEL);
2551 if (!baid_data)
2552 return -ENOMEM;
2553
2554
2555
2556
2557
2558 baid_data->entries_per_queue =
2559 reorder_buf_size / sizeof(baid_data->entries[0]);
2560 }
2561
2562 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2563 cmd.sta_id = mvm_sta->sta_id;
2564 cmd.add_modify = STA_MODE_MODIFY;
2565 if (start) {
2566 cmd.add_immediate_ba_tid = (u8) tid;
2567 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2568 cmd.rx_ba_window = cpu_to_le16(buf_size);
2569 } else {
2570 cmd.remove_immediate_ba_tid = (u8) tid;
2571 }
2572 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2573 STA_MODIFY_REMOVE_BA_TID;
2574
2575 status = ADD_STA_SUCCESS;
2576 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2577 iwl_mvm_add_sta_cmd_size(mvm),
2578 &cmd, &status);
2579 if (ret)
2580 goto out_free;
2581
2582 switch (status & IWL_ADD_STA_STATUS_MASK) {
2583 case ADD_STA_SUCCESS:
2584 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2585 start ? "start" : "stopp");
2586 break;
2587 case ADD_STA_IMMEDIATE_BA_FAILURE:
2588 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2589 ret = -ENOSPC;
2590 break;
2591 default:
2592 ret = -EIO;
2593 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2594 start ? "start" : "stopp", status);
2595 break;
2596 }
2597
2598 if (ret)
2599 goto out_free;
2600
2601 if (start) {
2602 u8 baid;
2603
2604 mvm->rx_ba_sessions++;
2605
2606 if (!iwl_mvm_has_new_rx_api(mvm))
2607 return 0;
2608
2609 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2610 ret = -EINVAL;
2611 goto out_free;
2612 }
2613 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2614 IWL_ADD_STA_BAID_SHIFT);
2615 baid_data->baid = baid;
2616 baid_data->timeout = timeout;
2617 baid_data->last_rx = jiffies;
2618 baid_data->rcu_ptr = &mvm->baid_map[baid];
2619 timer_setup(&baid_data->session_timer,
2620 iwl_mvm_rx_agg_session_expired, 0);
2621 baid_data->mvm = mvm;
2622 baid_data->tid = tid;
2623 baid_data->sta_id = mvm_sta->sta_id;
2624
2625 mvm_sta->tid_to_baid[tid] = baid;
2626 if (timeout)
2627 mod_timer(&baid_data->session_timer,
2628 TU_TO_EXP_TIME(timeout * 2));
2629
2630 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2631
2632
2633
2634
2635
2636
2637 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2638 mvm_sta->sta_id, tid, baid);
2639 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2640 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2641 } else {
2642 u8 baid = mvm_sta->tid_to_baid[tid];
2643
2644 if (mvm->rx_ba_sessions > 0)
2645
2646 mvm->rx_ba_sessions--;
2647 if (!iwl_mvm_has_new_rx_api(mvm))
2648 return 0;
2649
2650 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2651 return -EINVAL;
2652
2653 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2654 if (WARN_ON(!baid_data))
2655 return -EINVAL;
2656
2657
2658 iwl_mvm_free_reorder(mvm, baid_data);
2659 del_timer_sync(&baid_data->session_timer);
2660 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2661 kfree_rcu(baid_data, rcu_head);
2662 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2663 }
2664 return 0;
2665
2666out_free:
2667 kfree(baid_data);
2668 return ret;
2669}
2670
2671int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2672 int tid, u8 queue, bool start)
2673{
2674 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2675 struct iwl_mvm_add_sta_cmd cmd = {};
2676 int ret;
2677 u32 status;
2678
2679 lockdep_assert_held(&mvm->mutex);
2680
2681 if (start) {
2682 mvm_sta->tfd_queue_msk |= BIT(queue);
2683 mvm_sta->tid_disable_agg &= ~BIT(tid);
2684 } else {
2685
2686 mvm_sta->tid_disable_agg |= BIT(tid);
2687 }
2688
2689 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2690 cmd.sta_id = mvm_sta->sta_id;
2691 cmd.add_modify = STA_MODE_MODIFY;
2692 if (!iwl_mvm_has_new_tx_api(mvm))
2693 cmd.modify_mask = STA_MODIFY_QUEUES;
2694 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2695 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2696 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2697
2698 status = ADD_STA_SUCCESS;
2699 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2700 iwl_mvm_add_sta_cmd_size(mvm),
2701 &cmd, &status);
2702 if (ret)
2703 return ret;
2704
2705 switch (status & IWL_ADD_STA_STATUS_MASK) {
2706 case ADD_STA_SUCCESS:
2707 break;
2708 default:
2709 ret = -EIO;
2710 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2711 start ? "start" : "stopp", status);
2712 break;
2713 }
2714
2715 return ret;
2716}
2717
2718const u8 tid_to_mac80211_ac[] = {
2719 IEEE80211_AC_BE,
2720 IEEE80211_AC_BK,
2721 IEEE80211_AC_BK,
2722 IEEE80211_AC_BE,
2723 IEEE80211_AC_VI,
2724 IEEE80211_AC_VI,
2725 IEEE80211_AC_VO,
2726 IEEE80211_AC_VO,
2727 IEEE80211_AC_VO,
2728};
2729
2730static const u8 tid_to_ucode_ac[] = {
2731 AC_BE,
2732 AC_BK,
2733 AC_BK,
2734 AC_BE,
2735 AC_VI,
2736 AC_VI,
2737 AC_VO,
2738 AC_VO,
2739};
2740
2741int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2742 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2743{
2744 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2745 struct iwl_mvm_tid_data *tid_data;
2746 u16 normalized_ssn;
2747 u16 txq_id;
2748 int ret;
2749
2750 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2751 return -EINVAL;
2752
2753 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2754 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2755 IWL_ERR(mvm,
2756 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2757 mvmsta->tid_data[tid].state);
2758 return -ENXIO;
2759 }
2760
2761 lockdep_assert_held(&mvm->mutex);
2762
2763 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2764 iwl_mvm_has_new_tx_api(mvm)) {
2765 u8 ac = tid_to_mac80211_ac[tid];
2766
2767 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2768 if (ret)
2769 return ret;
2770 }
2771
2772 spin_lock_bh(&mvmsta->lock);
2773
2774
2775 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2776 spin_unlock_bh(&mvmsta->lock);
2777 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2778 return -EIO;
2779 }
2780
2781
2782
2783
2784
2785
2786
2787 txq_id = mvmsta->tid_data[tid].txq_id;
2788 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2789 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2790 IWL_MVM_DQA_MIN_DATA_QUEUE,
2791 IWL_MVM_DQA_MAX_DATA_QUEUE);
2792 if (ret < 0) {
2793 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2794 goto out;
2795 }
2796
2797 txq_id = ret;
2798
2799
2800 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2801 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2802 ret = -ENXIO;
2803 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2804 tid, IWL_MAX_HW_QUEUES - 1);
2805 goto out;
2806
2807 } else if (unlikely(mvm->queue_info[txq_id].status ==
2808 IWL_MVM_QUEUE_SHARED)) {
2809 ret = -ENXIO;
2810 IWL_DEBUG_TX_QUEUES(mvm,
2811 "Can't start tid %d agg on shared queue!\n",
2812 tid);
2813 goto out;
2814 }
2815
2816 IWL_DEBUG_TX_QUEUES(mvm,
2817 "AGG for tid %d will be on queue #%d\n",
2818 tid, txq_id);
2819
2820 tid_data = &mvmsta->tid_data[tid];
2821 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2822 tid_data->txq_id = txq_id;
2823 *ssn = tid_data->ssn;
2824
2825 IWL_DEBUG_TX_QUEUES(mvm,
2826 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2827 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2828 tid_data->next_reclaimed);
2829
2830
2831
2832
2833
2834 normalized_ssn = tid_data->ssn;
2835 if (mvm->trans->cfg->gen2)
2836 normalized_ssn &= 0xff;
2837
2838 if (normalized_ssn == tid_data->next_reclaimed) {
2839 tid_data->state = IWL_AGG_STARTING;
2840 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2841 } else {
2842 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2843 }
2844
2845 ret = 0;
2846
2847out:
2848 spin_unlock_bh(&mvmsta->lock);
2849
2850 return ret;
2851}
2852
2853int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2854 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2855 bool amsdu)
2856{
2857 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2858 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2859 unsigned int wdg_timeout =
2860 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2861 int queue, ret;
2862 bool alloc_queue = true;
2863 enum iwl_mvm_queue_status queue_status;
2864 u16 ssn;
2865
2866 struct iwl_trans_txq_scd_cfg cfg = {
2867 .sta_id = mvmsta->sta_id,
2868 .tid = tid,
2869 .frame_limit = buf_size,
2870 .aggregate = true,
2871 };
2872
2873
2874
2875
2876
2877 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2878 return -EINVAL;
2879
2880 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2881 != IWL_MAX_TID_COUNT);
2882
2883 spin_lock_bh(&mvmsta->lock);
2884 ssn = tid_data->ssn;
2885 queue = tid_data->txq_id;
2886 tid_data->state = IWL_AGG_ON;
2887 mvmsta->agg_tids |= BIT(tid);
2888 tid_data->ssn = 0xffff;
2889 tid_data->amsdu_in_ampdu_allowed = amsdu;
2890 spin_unlock_bh(&mvmsta->lock);
2891
2892 if (iwl_mvm_has_new_tx_api(mvm)) {
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904 if (buf_size < IWL_FRAME_LIMIT)
2905 return -ENOTSUPP;
2906
2907 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2908 if (ret)
2909 return -EIO;
2910 goto out;
2911 }
2912
2913 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2914
2915 queue_status = mvm->queue_info[queue].status;
2916
2917
2918 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2919 alloc_queue = false;
2920
2921
2922
2923
2924
2925 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2926
2927
2928
2929
2930 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2931 BIT(queue));
2932 if (ret) {
2933 IWL_ERR(mvm,
2934 "Error draining queue before reconfig\n");
2935 return ret;
2936 }
2937
2938 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2939 mvmsta->sta_id, tid,
2940 buf_size, ssn);
2941 if (ret) {
2942 IWL_ERR(mvm,
2943 "Error reconfiguring TXQ #%d\n", queue);
2944 return ret;
2945 }
2946 }
2947
2948 if (alloc_queue)
2949 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2950 &cfg, wdg_timeout);
2951
2952
2953 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2954 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2955 if (ret)
2956 return -EIO;
2957 }
2958
2959
2960 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2961
2962out:
2963
2964
2965
2966
2967
2968
2969
2970 mvmsta->max_agg_bufsize =
2971 min(mvmsta->max_agg_bufsize, buf_size);
2972 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2973
2974 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2975 sta->addr, tid);
2976
2977 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
2978}
2979
2980static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2981 struct iwl_mvm_sta *mvmsta,
2982 struct iwl_mvm_tid_data *tid_data)
2983{
2984 u16 txq_id = tid_data->txq_id;
2985
2986 lockdep_assert_held(&mvm->mutex);
2987
2988 if (iwl_mvm_has_new_tx_api(mvm))
2989 return;
2990
2991
2992
2993
2994
2995
2996
2997
2998 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
2999 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3000 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3001 }
3002}
3003
3004int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3005 struct ieee80211_sta *sta, u16 tid)
3006{
3007 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3008 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3009 u16 txq_id;
3010 int err;
3011
3012
3013
3014
3015
3016 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3017 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3018 return 0;
3019 }
3020
3021 spin_lock_bh(&mvmsta->lock);
3022
3023 txq_id = tid_data->txq_id;
3024
3025 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3026 mvmsta->sta_id, tid, txq_id, tid_data->state);
3027
3028 mvmsta->agg_tids &= ~BIT(tid);
3029
3030 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3031
3032 switch (tid_data->state) {
3033 case IWL_AGG_ON:
3034 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3035
3036 IWL_DEBUG_TX_QUEUES(mvm,
3037 "ssn = %d, next_recl = %d\n",
3038 tid_data->ssn, tid_data->next_reclaimed);
3039
3040 tid_data->ssn = 0xffff;
3041 tid_data->state = IWL_AGG_OFF;
3042 spin_unlock_bh(&mvmsta->lock);
3043
3044 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3045
3046 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3047 return 0;
3048 case IWL_AGG_STARTING:
3049 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3050
3051
3052
3053
3054
3055
3056 lockdep_assert_held(&mvm->mutex);
3057
3058 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3059 tid_data->state = IWL_AGG_OFF;
3060 err = 0;
3061 break;
3062 default:
3063 IWL_ERR(mvm,
3064 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3065 mvmsta->sta_id, tid, tid_data->state);
3066 IWL_ERR(mvm,
3067 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3068 err = -EINVAL;
3069 }
3070
3071 spin_unlock_bh(&mvmsta->lock);
3072
3073 return err;
3074}
3075
3076int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3077 struct ieee80211_sta *sta, u16 tid)
3078{
3079 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3080 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3081 u16 txq_id;
3082 enum iwl_mvm_agg_state old_state;
3083
3084
3085
3086
3087
3088 spin_lock_bh(&mvmsta->lock);
3089 txq_id = tid_data->txq_id;
3090 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3091 mvmsta->sta_id, tid, txq_id, tid_data->state);
3092 old_state = tid_data->state;
3093 tid_data->state = IWL_AGG_OFF;
3094 mvmsta->agg_tids &= ~BIT(tid);
3095 spin_unlock_bh(&mvmsta->lock);
3096
3097 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3098
3099 if (old_state >= IWL_AGG_ON) {
3100 iwl_mvm_drain_sta(mvm, mvmsta, true);
3101
3102 if (iwl_mvm_has_new_tx_api(mvm)) {
3103 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3104 BIT(tid), 0))
3105 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3106 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3107 } else {
3108 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3109 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3110 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3111 }
3112
3113 iwl_mvm_drain_sta(mvm, mvmsta, false);
3114
3115 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3116 }
3117
3118 return 0;
3119}
3120
3121static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3122{
3123 int i, max = -1, max_offs = -1;
3124
3125 lockdep_assert_held(&mvm->mutex);
3126
3127
3128
3129
3130
3131
3132
3133 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3134 if (test_bit(i, mvm->fw_key_table))
3135 continue;
3136 if (mvm->fw_key_deleted[i] > max) {
3137 max = mvm->fw_key_deleted[i];
3138 max_offs = i;
3139 }
3140 }
3141
3142 if (max_offs < 0)
3143 return STA_KEY_IDX_INVALID;
3144
3145 return max_offs;
3146}
3147
3148static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3149 struct ieee80211_vif *vif,
3150 struct ieee80211_sta *sta)
3151{
3152 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3153
3154 if (sta)
3155 return iwl_mvm_sta_from_mac80211(sta);
3156
3157
3158
3159
3160
3161
3162 if (vif->type == NL80211_IFTYPE_STATION &&
3163 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3164 u8 sta_id = mvmvif->ap_sta_id;
3165
3166 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3167 lockdep_is_held(&mvm->mutex));
3168
3169
3170
3171
3172
3173
3174 if (IS_ERR_OR_NULL(sta))
3175 return NULL;
3176
3177 return iwl_mvm_sta_from_mac80211(sta);
3178 }
3179
3180 return NULL;
3181}
3182
3183static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3184 u32 sta_id,
3185 struct ieee80211_key_conf *key, bool mcast,
3186 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3187 u8 key_offset, bool mfp)
3188{
3189 union {
3190 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3191 struct iwl_mvm_add_sta_key_cmd cmd;
3192 } u = {};
3193 __le16 key_flags;
3194 int ret;
3195 u32 status;
3196 u16 keyidx;
3197 u64 pn = 0;
3198 int i, size;
3199 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3200 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3201
3202 if (sta_id == IWL_MVM_INVALID_STA)
3203 return -EINVAL;
3204
3205 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3206 STA_KEY_FLG_KEYID_MSK;
3207 key_flags = cpu_to_le16(keyidx);
3208 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3209
3210 switch (key->cipher) {
3211 case WLAN_CIPHER_SUITE_TKIP:
3212 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3213 if (new_api) {
3214 memcpy((void *)&u.cmd.tx_mic_key,
3215 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3216 IWL_MIC_KEY_SIZE);
3217
3218 memcpy((void *)&u.cmd.rx_mic_key,
3219 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3220 IWL_MIC_KEY_SIZE);
3221 pn = atomic64_read(&key->tx_pn);
3222
3223 } else {
3224 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3225 for (i = 0; i < 5; i++)
3226 u.cmd_v1.tkip_rx_ttak[i] =
3227 cpu_to_le16(tkip_p1k[i]);
3228 }
3229 memcpy(u.cmd.common.key, key->key, key->keylen);
3230 break;
3231 case WLAN_CIPHER_SUITE_CCMP:
3232 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3233 memcpy(u.cmd.common.key, key->key, key->keylen);
3234 if (new_api)
3235 pn = atomic64_read(&key->tx_pn);
3236 break;
3237 case WLAN_CIPHER_SUITE_WEP104:
3238 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3239
3240 case WLAN_CIPHER_SUITE_WEP40:
3241 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3242 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3243 break;
3244 case WLAN_CIPHER_SUITE_GCMP_256:
3245 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3246
3247 case WLAN_CIPHER_SUITE_GCMP:
3248 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3249 memcpy(u.cmd.common.key, key->key, key->keylen);
3250 if (new_api)
3251 pn = atomic64_read(&key->tx_pn);
3252 break;
3253 default:
3254 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3255 memcpy(u.cmd.common.key, key->key, key->keylen);
3256 }
3257
3258 if (mcast)
3259 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3260 if (mfp)
3261 key_flags |= cpu_to_le16(STA_KEY_MFP);
3262
3263 u.cmd.common.key_offset = key_offset;
3264 u.cmd.common.key_flags = key_flags;
3265 u.cmd.common.sta_id = sta_id;
3266
3267 if (new_api) {
3268 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3269 size = sizeof(u.cmd);
3270 } else {
3271 size = sizeof(u.cmd_v1);
3272 }
3273
3274 status = ADD_STA_SUCCESS;
3275 if (cmd_flags & CMD_ASYNC)
3276 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3277 &u.cmd);
3278 else
3279 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3280 &u.cmd, &status);
3281
3282 switch (status) {
3283 case ADD_STA_SUCCESS:
3284 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3285 break;
3286 default:
3287 ret = -EIO;
3288 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3289 break;
3290 }
3291
3292 return ret;
3293}
3294
3295static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3296 struct ieee80211_key_conf *keyconf,
3297 u8 sta_id, bool remove_key)
3298{
3299 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3300
3301
3302 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3303 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3304 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3305 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3306 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3307 return -EINVAL;
3308
3309 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3310 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3311 return -EINVAL;
3312
3313 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3314 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3315
3316 if (remove_key) {
3317 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3318 } else {
3319 struct ieee80211_key_seq seq;
3320 const u8 *pn;
3321
3322 switch (keyconf->cipher) {
3323 case WLAN_CIPHER_SUITE_AES_CMAC:
3324 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3325 break;
3326 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3327 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3328 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3329 break;
3330 default:
3331 return -EINVAL;
3332 }
3333
3334 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3335 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3336 igtk_cmd.ctrl_flags |=
3337 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3338 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3339 pn = seq.aes_cmac.pn;
3340 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3341 ((u64) pn[4] << 8) |
3342 ((u64) pn[3] << 16) |
3343 ((u64) pn[2] << 24) |
3344 ((u64) pn[1] << 32) |
3345 ((u64) pn[0] << 40));
3346 }
3347
3348 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3349 remove_key ? "removing" : "installing",
3350 igtk_cmd.sta_id);
3351
3352 if (!iwl_mvm_has_new_rx_api(mvm)) {
3353 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3354 .ctrl_flags = igtk_cmd.ctrl_flags,
3355 .key_id = igtk_cmd.key_id,
3356 .sta_id = igtk_cmd.sta_id,
3357 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3358 };
3359
3360 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3361 ARRAY_SIZE(igtk_cmd_v1.igtk));
3362 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3363 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3364 }
3365 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3366 sizeof(igtk_cmd), &igtk_cmd);
3367}
3368
3369
3370static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3371 struct ieee80211_vif *vif,
3372 struct ieee80211_sta *sta)
3373{
3374 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3375
3376 if (sta)
3377 return sta->addr;
3378
3379 if (vif->type == NL80211_IFTYPE_STATION &&
3380 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3381 u8 sta_id = mvmvif->ap_sta_id;
3382 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3383 lockdep_is_held(&mvm->mutex));
3384 return sta->addr;
3385 }
3386
3387
3388 return NULL;
3389}
3390
3391static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3392 struct ieee80211_vif *vif,
3393 struct ieee80211_sta *sta,
3394 struct ieee80211_key_conf *keyconf,
3395 u8 key_offset,
3396 bool mcast)
3397{
3398 int ret;
3399 const u8 *addr;
3400 struct ieee80211_key_seq seq;
3401 u16 p1k[5];
3402 u32 sta_id;
3403 bool mfp = false;
3404
3405 if (sta) {
3406 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3407
3408 sta_id = mvm_sta->sta_id;
3409 mfp = sta->mfp;
3410 } else if (vif->type == NL80211_IFTYPE_AP &&
3411 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3412 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3413
3414 sta_id = mvmvif->mcast_sta.sta_id;
3415 } else {
3416 IWL_ERR(mvm, "Failed to find station id\n");
3417 return -EINVAL;
3418 }
3419
3420 switch (keyconf->cipher) {
3421 case WLAN_CIPHER_SUITE_TKIP:
3422 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3423
3424 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3425 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3426 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3427 seq.tkip.iv32, p1k, 0, key_offset,
3428 mfp);
3429 break;
3430 case WLAN_CIPHER_SUITE_CCMP:
3431 case WLAN_CIPHER_SUITE_WEP40:
3432 case WLAN_CIPHER_SUITE_WEP104:
3433 case WLAN_CIPHER_SUITE_GCMP:
3434 case WLAN_CIPHER_SUITE_GCMP_256:
3435 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3436 0, NULL, 0, key_offset, mfp);
3437 break;
3438 default:
3439 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3440 0, NULL, 0, key_offset, mfp);
3441 }
3442
3443 return ret;
3444}
3445
3446int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3447 struct ieee80211_vif *vif,
3448 struct ieee80211_sta *sta,
3449 struct ieee80211_key_conf *keyconf,
3450 u8 key_offset)
3451{
3452 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3453 struct iwl_mvm_sta *mvm_sta;
3454 u8 sta_id = IWL_MVM_INVALID_STA;
3455 int ret;
3456 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3457
3458 lockdep_assert_held(&mvm->mutex);
3459
3460 if (vif->type != NL80211_IFTYPE_AP ||
3461 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3462
3463 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3464 if (!mvm_sta) {
3465 IWL_ERR(mvm, "Failed to find station\n");
3466 return -EINVAL;
3467 }
3468 sta_id = mvm_sta->sta_id;
3469
3470
3471
3472
3473
3474
3475 if (!sta) {
3476 sta = rcu_dereference_protected(
3477 mvm->fw_id_to_mac_id[sta_id],
3478 lockdep_is_held(&mvm->mutex));
3479 if (IS_ERR_OR_NULL(sta)) {
3480 IWL_ERR(mvm, "Invalid station id\n");
3481 return -EINVAL;
3482 }
3483 }
3484
3485 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3486 return -EINVAL;
3487 } else {
3488 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3489
3490 sta_id = mvmvif->mcast_sta.sta_id;
3491 }
3492
3493 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3494 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3495 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3496 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3497 goto end;
3498 }
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511 if (key_offset == STA_KEY_IDX_INVALID) {
3512 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3513 if (key_offset == STA_KEY_IDX_INVALID)
3514 return -ENOSPC;
3515 keyconf->hw_key_idx = key_offset;
3516 }
3517
3518 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3519 if (ret)
3520 goto end;
3521
3522
3523
3524
3525
3526
3527
3528 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3529 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3530 sta) {
3531 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3532 key_offset, !mcast);
3533 if (ret) {
3534 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3535 goto end;
3536 }
3537 }
3538
3539 __set_bit(key_offset, mvm->fw_key_table);
3540
3541end:
3542 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3543 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3544 sta ? sta->addr : zero_addr, ret);
3545 return ret;
3546}
3547
3548int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3549 struct ieee80211_vif *vif,
3550 struct ieee80211_sta *sta,
3551 struct ieee80211_key_conf *keyconf)
3552{
3553 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3554 struct iwl_mvm_sta *mvm_sta;
3555 u8 sta_id = IWL_MVM_INVALID_STA;
3556 int ret, i;
3557
3558 lockdep_assert_held(&mvm->mutex);
3559
3560
3561 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3562 if (mvm_sta)
3563 sta_id = mvm_sta->sta_id;
3564 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3565 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3566
3567
3568 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3569 keyconf->keyidx, sta_id);
3570
3571 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3572 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3573 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3574 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3575
3576 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3577 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3578 keyconf->hw_key_idx);
3579 return -ENOENT;
3580 }
3581
3582
3583 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3584 if (mvm->fw_key_deleted[i] < U8_MAX)
3585 mvm->fw_key_deleted[i]++;
3586 }
3587 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3588
3589 if (sta && !mvm_sta) {
3590 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3591 return 0;
3592 }
3593
3594 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3595 if (ret)
3596 return ret;
3597
3598
3599 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3600 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3601 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3602
3603 return ret;
3604}
3605
3606void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3607 struct ieee80211_vif *vif,
3608 struct ieee80211_key_conf *keyconf,
3609 struct ieee80211_sta *sta, u32 iv32,
3610 u16 *phase1key)
3611{
3612 struct iwl_mvm_sta *mvm_sta;
3613 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3614 bool mfp = sta ? sta->mfp : false;
3615
3616 rcu_read_lock();
3617
3618 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3619 if (WARN_ON_ONCE(!mvm_sta))
3620 goto unlock;
3621 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3622 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3623 mfp);
3624
3625 unlock:
3626 rcu_read_unlock();
3627}
3628
3629void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3630 struct ieee80211_sta *sta)
3631{
3632 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3633 struct iwl_mvm_add_sta_cmd cmd = {
3634 .add_modify = STA_MODE_MODIFY,
3635 .sta_id = mvmsta->sta_id,
3636 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3637 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3638 };
3639 int ret;
3640
3641 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3642 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3643 if (ret)
3644 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3645}
3646
3647void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3648 struct ieee80211_sta *sta,
3649 enum ieee80211_frame_release_type reason,
3650 u16 cnt, u16 tids, bool more_data,
3651 bool single_sta_queue)
3652{
3653 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3654 struct iwl_mvm_add_sta_cmd cmd = {
3655 .add_modify = STA_MODE_MODIFY,
3656 .sta_id = mvmsta->sta_id,
3657 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3658 .sleep_tx_count = cpu_to_le16(cnt),
3659 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3660 };
3661 int tid, ret;
3662 unsigned long _tids = tids;
3663
3664
3665
3666
3667
3668 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3669 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3670
3671
3672
3673
3674
3675
3676
3677
3678 if (single_sta_queue) {
3679 int remaining = cnt;
3680 int sleep_tx_count;
3681
3682 spin_lock_bh(&mvmsta->lock);
3683 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3684 struct iwl_mvm_tid_data *tid_data;
3685 u16 n_queued;
3686
3687 tid_data = &mvmsta->tid_data[tid];
3688
3689 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3690 if (n_queued > remaining) {
3691 more_data = true;
3692 remaining = 0;
3693 break;
3694 }
3695 remaining -= n_queued;
3696 }
3697 sleep_tx_count = cnt - remaining;
3698 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3699 mvmsta->sleep_tx_count = sleep_tx_count;
3700 spin_unlock_bh(&mvmsta->lock);
3701
3702 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3703 if (WARN_ON(cnt - remaining == 0)) {
3704 ieee80211_sta_eosp(sta);
3705 return;
3706 }
3707 }
3708
3709
3710 if (more_data)
3711 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3712
3713 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3714 mvmsta->next_status_eosp = true;
3715 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3716 } else {
3717 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3718 }
3719
3720
3721 iwl_trans_block_txq_ptrs(mvm->trans, true);
3722
3723 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3724 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3725 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3726 if (ret)
3727 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3728}
3729
3730void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3731 struct iwl_rx_cmd_buffer *rxb)
3732{
3733 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3734 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3735 struct ieee80211_sta *sta;
3736 u32 sta_id = le32_to_cpu(notif->sta_id);
3737
3738 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3739 return;
3740
3741 rcu_read_lock();
3742 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3743 if (!IS_ERR_OR_NULL(sta))
3744 ieee80211_sta_eosp(sta);
3745 rcu_read_unlock();
3746}
3747
3748void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3749 struct iwl_mvm_sta *mvmsta, bool disable)
3750{
3751 struct iwl_mvm_add_sta_cmd cmd = {
3752 .add_modify = STA_MODE_MODIFY,
3753 .sta_id = mvmsta->sta_id,
3754 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3755 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3756 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3757 };
3758 int ret;
3759
3760 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3761 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3762 if (ret)
3763 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3764}
3765
3766void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3767 struct ieee80211_sta *sta,
3768 bool disable)
3769{
3770 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3771
3772 spin_lock_bh(&mvm_sta->lock);
3773
3774 if (mvm_sta->disable_tx == disable) {
3775 spin_unlock_bh(&mvm_sta->lock);
3776 return;
3777 }
3778
3779 mvm_sta->disable_tx = disable;
3780
3781
3782 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3783
3784 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3785
3786 spin_unlock_bh(&mvm_sta->lock);
3787}
3788
3789static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3790 struct iwl_mvm_vif *mvmvif,
3791 struct iwl_mvm_int_sta *sta,
3792 bool disable)
3793{
3794 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3795 struct iwl_mvm_add_sta_cmd cmd = {
3796 .add_modify = STA_MODE_MODIFY,
3797 .sta_id = sta->sta_id,
3798 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3799 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3800 .mac_id_n_color = cpu_to_le32(id),
3801 };
3802 int ret;
3803
3804 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3805 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3806 if (ret)
3807 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3808}
3809
3810void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3811 struct iwl_mvm_vif *mvmvif,
3812 bool disable)
3813{
3814 struct ieee80211_sta *sta;
3815 struct iwl_mvm_sta *mvm_sta;
3816 int i;
3817
3818 lockdep_assert_held(&mvm->mutex);
3819
3820
3821 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3822 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3823 lockdep_is_held(&mvm->mutex));
3824 if (IS_ERR_OR_NULL(sta))
3825 continue;
3826
3827 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3828 if (mvm_sta->mac_id_n_color !=
3829 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3830 continue;
3831
3832 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3833 }
3834
3835 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3836 return;
3837
3838
3839 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3840 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3841 &mvmvif->mcast_sta, disable);
3842
3843
3844
3845
3846
3847 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3848 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3849 &mvmvif->bcast_sta, disable);
3850}
3851
3852void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3853{
3854 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3855 struct iwl_mvm_sta *mvmsta;
3856
3857 rcu_read_lock();
3858
3859 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3860
3861 if (!WARN_ON(!mvmsta))
3862 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3863
3864 rcu_read_unlock();
3865}
3866
3867u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3868{
3869 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3870
3871
3872
3873
3874
3875 if (mvm->trans->cfg->gen2)
3876 sn &= 0xff;
3877
3878 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3879}
3880