1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
71#include "rs.h"
72
73
74
75
76
77
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 if (iwl_mvm_has_new_rx_api(mvm) ||
81 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
82 return sizeof(struct iwl_mvm_add_sta_cmd);
83 else
84 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
85}
86
87static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
88 enum nl80211_iftype iftype)
89{
90 int sta_id;
91 u32 reserved_ids = 0;
92
93 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
94 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
95
96 lockdep_assert_held(&mvm->mutex);
97
98
99 if (iftype != NL80211_IFTYPE_STATION)
100 reserved_ids = BIT(0);
101
102
103 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
104 if (BIT(sta_id) & reserved_ids)
105 continue;
106
107 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
108 lockdep_is_held(&mvm->mutex)))
109 return sta_id;
110 }
111 return IWL_MVM_INVALID_STA;
112}
113
114
115int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
116 bool update, unsigned int flags)
117{
118 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
119 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
120 .sta_id = mvm_sta->sta_id,
121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
122 .add_modify = update ? 1 : 0,
123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
124 STA_FLG_MIMO_EN_MSK |
125 STA_FLG_RTS_MIMO_PROT),
126 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
127 };
128 int ret;
129 u32 status;
130 u32 agg_size = 0, mpdu_dens = 0;
131
132 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
133 add_sta_cmd.station_type = mvm_sta->sta_type;
134
135 if (!update || (flags & STA_MODIFY_QUEUES)) {
136 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
137
138 if (!iwl_mvm_has_new_tx_api(mvm)) {
139 add_sta_cmd.tfd_queue_msk =
140 cpu_to_le32(mvm_sta->tfd_queue_msk);
141
142 if (flags & STA_MODIFY_QUEUES)
143 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
144 } else {
145 WARN_ON(flags & STA_MODIFY_QUEUES);
146 }
147 }
148
149 switch (sta->bandwidth) {
150 case IEEE80211_STA_RX_BW_160:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
152
153 case IEEE80211_STA_RX_BW_80:
154 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
155
156 case IEEE80211_STA_RX_BW_40:
157 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
158
159 case IEEE80211_STA_RX_BW_20:
160 if (sta->ht_cap.ht_supported)
161 add_sta_cmd.station_flags |=
162 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
163 break;
164 }
165
166 switch (sta->rx_nss) {
167 case 1:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
169 break;
170 case 2:
171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
172 break;
173 case 3 ... 8:
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
175 break;
176 }
177
178 switch (sta->smps_mode) {
179 case IEEE80211_SMPS_AUTOMATIC:
180 case IEEE80211_SMPS_NUM_MODES:
181 WARN_ON(1);
182 break;
183 case IEEE80211_SMPS_STATIC:
184
185 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
186 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
187 break;
188 case IEEE80211_SMPS_DYNAMIC:
189 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
190 break;
191 case IEEE80211_SMPS_OFF:
192
193 break;
194 }
195
196 if (sta->ht_cap.ht_supported) {
197 add_sta_cmd.station_flags_msk |=
198 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
199 STA_FLG_AGG_MPDU_DENS_MSK);
200
201 mpdu_dens = sta->ht_cap.ampdu_density;
202 }
203
204 if (sta->vht_cap.vht_supported) {
205 agg_size = sta->vht_cap.cap &
206 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
207 agg_size >>=
208 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
209 } else if (sta->ht_cap.ht_supported) {
210 agg_size = sta->ht_cap.ampdu_factor;
211 }
212
213 add_sta_cmd.station_flags |=
214 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
215 add_sta_cmd.station_flags |=
216 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
217 if (mvm_sta->associated)
218 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
219
220 if (sta->wme) {
221 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
222
223 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
224 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
225 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
226 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
227 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
228 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
229 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
230 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
231 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
232 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
233 }
234
235 status = ADD_STA_SUCCESS;
236 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
237 iwl_mvm_add_sta_cmd_size(mvm),
238 &add_sta_cmd, &status);
239 if (ret)
240 return ret;
241
242 switch (status & IWL_ADD_STA_STATUS_MASK) {
243 case ADD_STA_SUCCESS:
244 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
245 break;
246 default:
247 ret = -EIO;
248 IWL_ERR(mvm, "ADD_STA failed\n");
249 break;
250 }
251
252 return ret;
253}
254
255static void iwl_mvm_rx_agg_session_expired(unsigned long data)
256{
257 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
258 struct iwl_mvm_baid_data *ba_data;
259 struct ieee80211_sta *sta;
260 struct iwl_mvm_sta *mvm_sta;
261 unsigned long timeout;
262
263 rcu_read_lock();
264
265 ba_data = rcu_dereference(*rcu_ptr);
266
267 if (WARN_ON(!ba_data))
268 goto unlock;
269
270 if (!ba_data->timeout)
271 goto unlock;
272
273 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
274 if (time_is_after_jiffies(timeout)) {
275 mod_timer(&ba_data->session_timer, timeout);
276 goto unlock;
277 }
278
279
280 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
281
282
283
284
285
286
287
288
289
290 if (!sta)
291 goto unlock;
292
293 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
294 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
295 sta->addr, ba_data->tid);
296unlock:
297 rcu_read_unlock();
298}
299
300
301static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
302 unsigned long disable_agg_tids,
303 bool remove_queue)
304{
305 struct iwl_mvm_add_sta_cmd cmd = {};
306 struct ieee80211_sta *sta;
307 struct iwl_mvm_sta *mvmsta;
308 u32 status;
309 u8 sta_id;
310 int ret;
311
312 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
313 return -EINVAL;
314
315 spin_lock_bh(&mvm->queue_info_lock);
316 sta_id = mvm->queue_info[queue].ra_sta_id;
317 spin_unlock_bh(&mvm->queue_info_lock);
318
319 rcu_read_lock();
320
321 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
322
323 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
324 rcu_read_unlock();
325 return -EINVAL;
326 }
327
328 mvmsta = iwl_mvm_sta_from_mac80211(sta);
329
330 mvmsta->tid_disable_agg |= disable_agg_tids;
331
332 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
333 cmd.sta_id = mvmsta->sta_id;
334 cmd.add_modify = STA_MODE_MODIFY;
335 cmd.modify_mask = STA_MODIFY_QUEUES;
336 if (disable_agg_tids)
337 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
338 if (remove_queue)
339 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
340 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
341 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
342
343 rcu_read_unlock();
344
345
346 status = ADD_STA_SUCCESS;
347 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
348 iwl_mvm_add_sta_cmd_size(mvm),
349 &cmd, &status);
350
351 return ret;
352}
353
354static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
355{
356 struct ieee80211_sta *sta;
357 struct iwl_mvm_sta *mvmsta;
358 unsigned long tid_bitmap;
359 unsigned long agg_tids = 0;
360 u8 sta_id;
361 int tid;
362
363 lockdep_assert_held(&mvm->mutex);
364
365 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
366 return -EINVAL;
367
368 spin_lock_bh(&mvm->queue_info_lock);
369 sta_id = mvm->queue_info[queue].ra_sta_id;
370 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
371 spin_unlock_bh(&mvm->queue_info_lock);
372
373 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
374 lockdep_is_held(&mvm->mutex));
375
376 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
377 return -EINVAL;
378
379 mvmsta = iwl_mvm_sta_from_mac80211(sta);
380
381 spin_lock_bh(&mvmsta->lock);
382 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
383 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
384 agg_tids |= BIT(tid);
385 }
386 spin_unlock_bh(&mvmsta->lock);
387
388 return agg_tids;
389}
390
391
392
393
394
395
396static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
397{
398 struct ieee80211_sta *sta;
399 struct iwl_mvm_sta *mvmsta;
400 unsigned long tid_bitmap;
401 unsigned long disable_agg_tids = 0;
402 u8 sta_id;
403 int tid;
404
405 lockdep_assert_held(&mvm->mutex);
406
407 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
408 return -EINVAL;
409
410 spin_lock_bh(&mvm->queue_info_lock);
411 sta_id = mvm->queue_info[queue].ra_sta_id;
412 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
413 spin_unlock_bh(&mvm->queue_info_lock);
414
415 rcu_read_lock();
416
417 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
418
419 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
420 rcu_read_unlock();
421 return 0;
422 }
423
424 mvmsta = iwl_mvm_sta_from_mac80211(sta);
425
426 spin_lock_bh(&mvmsta->lock);
427
428 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
429 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
430 disable_agg_tids |= BIT(tid);
431 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
432 }
433
434 mvmsta->tfd_queue_msk &= ~BIT(queue);
435 spin_unlock_bh(&mvmsta->lock);
436
437 rcu_read_unlock();
438
439 return disable_agg_tids;
440}
441
442static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
443 bool same_sta)
444{
445 struct iwl_mvm_sta *mvmsta;
446 u8 txq_curr_ac, sta_id, tid;
447 unsigned long disable_agg_tids = 0;
448 int ret;
449
450 lockdep_assert_held(&mvm->mutex);
451
452 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
453 return -EINVAL;
454
455 spin_lock_bh(&mvm->queue_info_lock);
456 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
457 sta_id = mvm->queue_info[queue].ra_sta_id;
458 tid = mvm->queue_info[queue].txq_tid;
459 spin_unlock_bh(&mvm->queue_info_lock);
460
461 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
462 if (WARN_ON(!mvmsta))
463 return -EINVAL;
464
465 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
466
467 if (disable_agg_tids)
468 iwl_mvm_invalidate_sta_queue(mvm, queue,
469 disable_agg_tids, false);
470
471 ret = iwl_mvm_disable_txq(mvm, queue,
472 mvmsta->vif->hw_queue[txq_curr_ac],
473 tid, 0);
474 if (ret) {
475
476 spin_lock_bh(&mvm->queue_info_lock);
477 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
478 spin_unlock_bh(&mvm->queue_info_lock);
479 IWL_ERR(mvm,
480 "Failed to free inactive queue %d (ret=%d)\n",
481 queue, ret);
482
483 return ret;
484 }
485
486
487 if (!same_sta)
488 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
489
490 return 0;
491}
492
493static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
494 unsigned long tfd_queue_mask, u8 ac)
495{
496 int queue = 0;
497 u8 ac_to_queue[IEEE80211_NUM_ACS];
498 int i;
499
500 lockdep_assert_held(&mvm->queue_info_lock);
501 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
502 return -EINVAL;
503
504 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
505
506
507 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
508
509 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
510 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
511 continue;
512
513
514 if (mvm->queue_info[queue].status ==
515 IWL_MVM_QUEUE_RECONFIGURING)
516 continue;
517
518 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
519 }
520
521
522
523
524
525
526
527
528
529
530
531 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
532 queue = ac_to_queue[IEEE80211_AC_BE];
533
534 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
535 queue = ac_to_queue[ac];
536
537 else if (ac == IEEE80211_AC_VO &&
538 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
539 queue = ac_to_queue[IEEE80211_AC_VI];
540
541 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
542 queue = ac_to_queue[IEEE80211_AC_BK];
543
544 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
545 queue = ac_to_queue[IEEE80211_AC_VI];
546
547 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
548 queue = ac_to_queue[IEEE80211_AC_VO];
549
550
551 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
552 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
553 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
554 IWL_ERR(mvm, "No DATA queues available to share\n");
555 return -ENOSPC;
556 }
557
558
559 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
560 IWL_ERR(mvm,
561 "TXQ %d is in the middle of re-config - try again\n",
562 queue);
563 return -EBUSY;
564 }
565
566 return queue;
567}
568
569
570
571
572
573
574
575int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
576 int ac, int ssn, unsigned int wdg_timeout,
577 bool force)
578{
579 struct iwl_scd_txq_cfg_cmd cmd = {
580 .scd_queue = queue,
581 .action = SCD_CFG_DISABLE_QUEUE,
582 };
583 bool shared_queue;
584 unsigned long mq;
585 int ret;
586
587 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
588 return -EINVAL;
589
590
591
592
593
594
595
596
597
598 spin_lock_bh(&mvm->queue_info_lock);
599 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
600 spin_unlock_bh(&mvm->queue_info_lock);
601
602 IWL_DEBUG_TX_QUEUES(mvm,
603 "No redirection needed on TXQ #%d\n",
604 queue);
605 return 0;
606 }
607
608 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
609 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
610 cmd.tid = mvm->queue_info[queue].txq_tid;
611 mq = mvm->hw_queue_to_mac80211[queue];
612 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
613 spin_unlock_bh(&mvm->queue_info_lock);
614
615 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
616 queue, iwl_mvm_ac_to_tx_fifo[ac]);
617
618
619 iwl_mvm_stop_mac_queues(mvm, mq);
620 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
621 if (ret) {
622 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
623 queue);
624 ret = -EIO;
625 goto out;
626 }
627
628
629 iwl_trans_txq_disable(mvm->trans, queue, false);
630 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
631 if (ret)
632 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
633 ret);
634
635
636 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
637
638
639 spin_lock_bh(&mvm->queue_info_lock);
640 mvm->queue_info[queue].txq_tid = tid;
641 spin_unlock_bh(&mvm->queue_info_lock);
642
643
644
645
646 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
647 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
648 ssn);
649
650
651 spin_lock_bh(&mvm->queue_info_lock);
652 mvm->queue_info[queue].mac80211_ac = ac;
653 spin_unlock_bh(&mvm->queue_info_lock);
654
655
656
657
658
659
660
661 if (shared_queue)
662 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
663
664out:
665
666 iwl_mvm_start_mac_queues(mvm, mq);
667
668 return ret;
669}
670
671static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
672 struct ieee80211_sta *sta, u8 ac,
673 int tid)
674{
675 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
676 unsigned int wdg_timeout =
677 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
678 u8 mac_queue = mvmsta->vif->hw_queue[ac];
679 int queue = -1;
680
681 lockdep_assert_held(&mvm->mutex);
682
683 IWL_DEBUG_TX_QUEUES(mvm,
684 "Allocating queue for sta %d on tid %d\n",
685 mvmsta->sta_id, tid);
686 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
687 wdg_timeout);
688 if (queue < 0)
689 return queue;
690
691 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
692
693 spin_lock_bh(&mvmsta->lock);
694 mvmsta->tid_data[tid].txq_id = queue;
695 mvmsta->tid_data[tid].is_tid_active = true;
696 spin_unlock_bh(&mvmsta->lock);
697
698 return 0;
699}
700
701static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
702 struct ieee80211_sta *sta, u8 ac, int tid,
703 struct ieee80211_hdr *hdr)
704{
705 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
706 struct iwl_trans_txq_scd_cfg cfg = {
707 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
708 .sta_id = mvmsta->sta_id,
709 .tid = tid,
710 .frame_limit = IWL_FRAME_LIMIT,
711 };
712 unsigned int wdg_timeout =
713 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
714 u8 mac_queue = mvmsta->vif->hw_queue[ac];
715 int queue = -1;
716 bool using_inactive_queue = false, same_sta = false;
717 unsigned long disable_agg_tids = 0;
718 enum iwl_mvm_agg_state queue_state;
719 bool shared_queue = false, inc_ssn;
720 int ssn;
721 unsigned long tfd_queue_mask;
722 int ret;
723
724 lockdep_assert_held(&mvm->mutex);
725
726 if (iwl_mvm_has_new_tx_api(mvm))
727 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
728
729 spin_lock_bh(&mvmsta->lock);
730 tfd_queue_mask = mvmsta->tfd_queue_msk;
731 spin_unlock_bh(&mvmsta->lock);
732
733 spin_lock_bh(&mvm->queue_info_lock);
734
735
736
737
738
739 if (!ieee80211_is_data_qos(hdr->frame_control) ||
740 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
741 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
742 IWL_MVM_DQA_MIN_MGMT_QUEUE,
743 IWL_MVM_DQA_MAX_MGMT_QUEUE);
744 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
745 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
746 queue);
747
748
749 }
750
751 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
752 (mvm->queue_info[mvmsta->reserved_queue].status ==
753 IWL_MVM_QUEUE_RESERVED ||
754 mvm->queue_info[mvmsta->reserved_queue].status ==
755 IWL_MVM_QUEUE_INACTIVE)) {
756 queue = mvmsta->reserved_queue;
757 mvm->queue_info[queue].reserved = true;
758 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
759 }
760
761 if (queue < 0)
762 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
763 IWL_MVM_DQA_MIN_DATA_QUEUE,
764 IWL_MVM_DQA_MAX_DATA_QUEUE);
765
766
767
768
769
770
771
772 if (queue > 0 &&
773 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
774 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
775 using_inactive_queue = true;
776 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
777 IWL_DEBUG_TX_QUEUES(mvm,
778 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
779 queue, mvmsta->sta_id, tid);
780 }
781
782
783 if (queue <= 0) {
784 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
785 if (queue > 0) {
786 shared_queue = true;
787 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
788 }
789 }
790
791
792
793
794
795
796
797 if ((queue > 0) && !shared_queue)
798 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
799
800 spin_unlock_bh(&mvm->queue_info_lock);
801
802
803 if (WARN_ON(queue <= 0)) {
804 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
805 tid, cfg.sta_id);
806 return queue;
807 }
808
809
810
811
812
813
814
815 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
816 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
817
818
819
820
821
822 if (using_inactive_queue) {
823 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
824 if (ret)
825 return ret;
826 }
827
828 IWL_DEBUG_TX_QUEUES(mvm,
829 "Allocating %squeue #%d to sta %d on tid %d\n",
830 shared_queue ? "shared " : "", queue,
831 mvmsta->sta_id, tid);
832
833 if (shared_queue) {
834
835 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
836
837 if (disable_agg_tids) {
838 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
839 queue);
840 iwl_mvm_invalidate_sta_queue(mvm, queue,
841 disable_agg_tids, false);
842 }
843 }
844
845 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
846 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
847 ssn, &cfg, wdg_timeout);
848 if (inc_ssn) {
849 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
850 le16_add_cpu(&hdr->seq_ctrl, 0x10);
851 }
852
853
854
855
856
857
858
859 if (shared_queue)
860 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
861
862 spin_lock_bh(&mvmsta->lock);
863
864
865
866
867
868 if (inc_ssn)
869 mvmsta->tid_data[tid].seq_number += 0x10;
870 mvmsta->tid_data[tid].txq_id = queue;
871 mvmsta->tid_data[tid].is_tid_active = true;
872 mvmsta->tfd_queue_msk |= BIT(queue);
873 queue_state = mvmsta->tid_data[tid].state;
874
875 if (mvmsta->reserved_queue == queue)
876 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
877 spin_unlock_bh(&mvmsta->lock);
878
879 if (!shared_queue) {
880 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
881 if (ret)
882 goto out_err;
883
884
885 if (queue_state == IWL_AGG_ON) {
886 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
887 if (ret)
888 goto out_err;
889 }
890 } else {
891
892 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
893 wdg_timeout, false);
894 if (ret)
895 goto out_err;
896 }
897
898 return 0;
899
900out_err:
901 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
902
903 return ret;
904}
905
906static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
907{
908 struct iwl_scd_txq_cfg_cmd cmd = {
909 .scd_queue = queue,
910 .action = SCD_CFG_UPDATE_QUEUE_TID,
911 };
912 int tid;
913 unsigned long tid_bitmap;
914 int ret;
915
916 lockdep_assert_held(&mvm->mutex);
917
918 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
919 return;
920
921 spin_lock_bh(&mvm->queue_info_lock);
922 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
923 spin_unlock_bh(&mvm->queue_info_lock);
924
925 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
926 return;
927
928
929 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
930 cmd.tid = tid;
931 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
932
933 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
934 if (ret) {
935 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
936 queue, ret);
937 return;
938 }
939
940 spin_lock_bh(&mvm->queue_info_lock);
941 mvm->queue_info[queue].txq_tid = tid;
942 spin_unlock_bh(&mvm->queue_info_lock);
943 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
944 queue, tid);
945}
946
947static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
948{
949 struct ieee80211_sta *sta;
950 struct iwl_mvm_sta *mvmsta;
951 u8 sta_id;
952 int tid = -1;
953 unsigned long tid_bitmap;
954 unsigned int wdg_timeout;
955 int ssn;
956 int ret = true;
957
958
959 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
960 return;
961
962 lockdep_assert_held(&mvm->mutex);
963
964 spin_lock_bh(&mvm->queue_info_lock);
965 sta_id = mvm->queue_info[queue].ra_sta_id;
966 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
967 spin_unlock_bh(&mvm->queue_info_lock);
968
969
970 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
971 if (tid_bitmap != BIT(tid)) {
972 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
973 queue, tid_bitmap);
974 return;
975 }
976
977 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
978 tid);
979
980 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
981 lockdep_is_held(&mvm->mutex));
982
983 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
984 return;
985
986 mvmsta = iwl_mvm_sta_from_mac80211(sta);
987 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
988
989 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
990
991 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
992 tid_to_mac80211_ac[tid], ssn,
993 wdg_timeout, true);
994 if (ret) {
995 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
996 return;
997 }
998
999
1000 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1001 struct iwl_mvm_add_sta_cmd cmd = {0};
1002
1003 mvmsta->tid_disable_agg &= ~BIT(tid);
1004
1005 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1006 cmd.sta_id = mvmsta->sta_id;
1007 cmd.add_modify = STA_MODE_MODIFY;
1008 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1009 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1010 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1011
1012 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1013 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1014 if (!ret) {
1015 IWL_DEBUG_TX_QUEUES(mvm,
1016 "TXQ #%d is now aggregated again\n",
1017 queue);
1018
1019
1020 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1021 }
1022 }
1023
1024 spin_lock_bh(&mvm->queue_info_lock);
1025 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1026 spin_unlock_bh(&mvm->queue_info_lock);
1027}
1028
1029static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1030{
1031 if (tid == IWL_MAX_TID_COUNT)
1032 return IEEE80211_AC_VO;
1033
1034 return tid_to_mac80211_ac[tid];
1035}
1036
1037static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1038 struct ieee80211_sta *sta, int tid)
1039{
1040 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1041 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1042 struct sk_buff *skb;
1043 struct ieee80211_hdr *hdr;
1044 struct sk_buff_head deferred_tx;
1045 u8 mac_queue;
1046 bool no_queue = false;
1047 u8 ac;
1048
1049 lockdep_assert_held(&mvm->mutex);
1050
1051 skb = skb_peek(&tid_data->deferred_tx_frames);
1052 if (!skb)
1053 return;
1054 hdr = (void *)skb->data;
1055
1056 ac = iwl_mvm_tid_to_ac_queue(tid);
1057 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1058
1059 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1060 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1061 IWL_ERR(mvm,
1062 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1063 mvmsta->sta_id, tid);
1064
1065
1066
1067
1068
1069 no_queue = true;
1070 }
1071
1072 __skb_queue_head_init(&deferred_tx);
1073
1074
1075 local_bh_disable();
1076 spin_lock(&mvmsta->lock);
1077 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1078 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1079 spin_unlock(&mvmsta->lock);
1080
1081 while ((skb = __skb_dequeue(&deferred_tx)))
1082 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1083 ieee80211_free_txskb(mvm->hw, skb);
1084 local_bh_enable();
1085
1086
1087 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1088}
1089
1090void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1091{
1092 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1093 add_stream_wk);
1094 struct ieee80211_sta *sta;
1095 struct iwl_mvm_sta *mvmsta;
1096 unsigned long deferred_tid_traffic;
1097 int queue, sta_id, tid;
1098
1099
1100 iwl_mvm_inactivity_check(mvm);
1101
1102 mutex_lock(&mvm->mutex);
1103
1104
1105 if (iwl_mvm_has_new_tx_api(mvm))
1106 goto alloc_queues;
1107
1108
1109 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
1110 bool reconfig;
1111 bool change_owner;
1112
1113 spin_lock_bh(&mvm->queue_info_lock);
1114 reconfig = (mvm->queue_info[queue].status ==
1115 IWL_MVM_QUEUE_RECONFIGURING);
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1126 BIT(mvm->queue_info[queue].txq_tid)) &&
1127 (mvm->queue_info[queue].status ==
1128 IWL_MVM_QUEUE_SHARED);
1129 spin_unlock_bh(&mvm->queue_info_lock);
1130
1131 if (reconfig)
1132 iwl_mvm_unshare_queue(mvm, queue);
1133 else if (change_owner)
1134 iwl_mvm_change_queue_owner(mvm, queue);
1135 }
1136
1137alloc_queues:
1138
1139 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1140 IWL_MVM_STATION_COUNT) {
1141 clear_bit(sta_id, mvm->sta_deferred_frames);
1142 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1143 lockdep_is_held(&mvm->mutex));
1144 if (IS_ERR_OR_NULL(sta))
1145 continue;
1146
1147 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1148 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1149
1150 for_each_set_bit(tid, &deferred_tid_traffic,
1151 IWL_MAX_TID_COUNT + 1)
1152 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1153 }
1154
1155 mutex_unlock(&mvm->mutex);
1156}
1157
1158static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1159 struct ieee80211_sta *sta,
1160 enum nl80211_iftype vif_type)
1161{
1162 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1163 int queue;
1164 bool using_inactive_queue = false, same_sta = false;
1165
1166
1167 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1168 return 0;
1169
1170
1171
1172
1173
1174 iwl_mvm_inactivity_check(mvm);
1175
1176 spin_lock_bh(&mvm->queue_info_lock);
1177
1178
1179 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1180 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
1181 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1182 IWL_MVM_QUEUE_FREE))
1183 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1184 else
1185 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1186 IWL_MVM_DQA_MIN_DATA_QUEUE,
1187 IWL_MVM_DQA_MAX_DATA_QUEUE);
1188 if (queue < 0) {
1189 spin_unlock_bh(&mvm->queue_info_lock);
1190 IWL_ERR(mvm, "No available queues for new station\n");
1191 return -ENOSPC;
1192 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1193
1194
1195
1196
1197
1198 using_inactive_queue = true;
1199 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
1200 }
1201 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1202
1203 spin_unlock_bh(&mvm->queue_info_lock);
1204
1205 mvmsta->reserved_queue = queue;
1206
1207 if (using_inactive_queue)
1208 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1209
1210 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1211 queue, mvmsta->sta_id);
1212
1213 return 0;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1224 struct iwl_mvm_sta *mvm_sta)
1225{
1226 unsigned int wdg_timeout =
1227 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1228 int i;
1229 struct iwl_trans_txq_scd_cfg cfg = {
1230 .sta_id = mvm_sta->sta_id,
1231 .frame_limit = IWL_FRAME_LIMIT,
1232 };
1233
1234
1235 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1236 mvm->queue_info[mvm_sta->reserved_queue].status =
1237 IWL_MVM_QUEUE_RESERVED;
1238
1239 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1240 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1241 int txq_id = tid_data->txq_id;
1242 int ac;
1243 u8 mac_queue;
1244
1245 if (txq_id == IWL_MVM_INVALID_QUEUE)
1246 continue;
1247
1248 skb_queue_head_init(&tid_data->deferred_tx_frames);
1249
1250 ac = tid_to_mac80211_ac[i];
1251 mac_queue = mvm_sta->vif->hw_queue[ac];
1252
1253 if (iwl_mvm_has_new_tx_api(mvm)) {
1254 IWL_DEBUG_TX_QUEUES(mvm,
1255 "Re-mapping sta %d tid %d\n",
1256 mvm_sta->sta_id, i);
1257 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1258 mvm_sta->sta_id,
1259 i, wdg_timeout);
1260 tid_data->txq_id = txq_id;
1261 } else {
1262 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1263
1264 cfg.tid = i;
1265 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1266 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1267 txq_id ==
1268 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1269
1270 IWL_DEBUG_TX_QUEUES(mvm,
1271 "Re-mapping sta %d tid %d to queue %d\n",
1272 mvm_sta->sta_id, i, txq_id);
1273
1274 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1275 wdg_timeout);
1276 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1277 }
1278 }
1279}
1280
1281static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1282 struct iwl_mvm_int_sta *sta,
1283 const u8 *addr,
1284 u16 mac_id, u16 color)
1285{
1286 struct iwl_mvm_add_sta_cmd cmd;
1287 int ret;
1288 u32 status = ADD_STA_SUCCESS;
1289
1290 lockdep_assert_held(&mvm->mutex);
1291
1292 memset(&cmd, 0, sizeof(cmd));
1293 cmd.sta_id = sta->sta_id;
1294 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1295 color));
1296 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1297 cmd.station_type = sta->type;
1298
1299 if (!iwl_mvm_has_new_tx_api(mvm))
1300 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1301 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1302
1303 if (addr)
1304 memcpy(cmd.addr, addr, ETH_ALEN);
1305
1306 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1307 iwl_mvm_add_sta_cmd_size(mvm),
1308 &cmd, &status);
1309 if (ret)
1310 return ret;
1311
1312 switch (status & IWL_ADD_STA_STATUS_MASK) {
1313 case ADD_STA_SUCCESS:
1314 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1315 return 0;
1316 default:
1317 ret = -EIO;
1318 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1319 status);
1320 break;
1321 }
1322 return ret;
1323}
1324
1325int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1326 struct ieee80211_vif *vif,
1327 struct ieee80211_sta *sta)
1328{
1329 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1330 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1331 struct iwl_mvm_rxq_dup_data *dup_data;
1332 int i, ret, sta_id;
1333 bool sta_update = false;
1334 unsigned int sta_flags = 0;
1335
1336 lockdep_assert_held(&mvm->mutex);
1337
1338 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1339 sta_id = iwl_mvm_find_free_sta_id(mvm,
1340 ieee80211_vif_type_p2p(vif));
1341 else
1342 sta_id = mvm_sta->sta_id;
1343
1344 if (sta_id == IWL_MVM_INVALID_STA)
1345 return -ENOSPC;
1346
1347 spin_lock_init(&mvm_sta->lock);
1348
1349
1350 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1351 struct iwl_mvm_int_sta tmp_sta = {
1352 .sta_id = sta_id,
1353 .type = mvm_sta->sta_type,
1354 };
1355
1356
1357
1358
1359
1360 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1361 mvmvif->id, mvmvif->color);
1362 if (ret)
1363 goto err;
1364
1365 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1366 sta_update = true;
1367 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1368 goto update_fw;
1369 }
1370
1371 mvm_sta->sta_id = sta_id;
1372 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1373 mvmvif->color);
1374 mvm_sta->vif = vif;
1375 if (!mvm->trans->cfg->gen2)
1376 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1377 else
1378 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1379 mvm_sta->tx_protection = 0;
1380 mvm_sta->tt_tx_protection = false;
1381 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1382
1383
1384 mvm_sta->tid_disable_agg = 0xffff;
1385 mvm_sta->tfd_queue_msk = 0;
1386
1387
1388 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1389 u16 seq = mvm_sta->tid_data[i].seq_number;
1390 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1391 mvm_sta->tid_data[i].seq_number = seq;
1392
1393
1394
1395
1396
1397 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1398 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1399 }
1400 mvm_sta->deferred_traffic_tid_map = 0;
1401 mvm_sta->agg_tids = 0;
1402
1403 if (iwl_mvm_has_new_rx_api(mvm) &&
1404 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1405 int q;
1406
1407 dup_data = kcalloc(mvm->trans->num_rx_queues,
1408 sizeof(*dup_data), GFP_KERNEL);
1409 if (!dup_data)
1410 return -ENOMEM;
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1421 memset(dup_data[q].last_seq, 0xff,
1422 sizeof(dup_data[q].last_seq));
1423 mvm_sta->dup_data = dup_data;
1424 }
1425
1426 if (!iwl_mvm_has_new_tx_api(mvm)) {
1427 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1428 ieee80211_vif_type_p2p(vif));
1429 if (ret)
1430 goto err;
1431 }
1432
1433update_fw:
1434 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1435 if (ret)
1436 goto err;
1437
1438 if (vif->type == NL80211_IFTYPE_STATION) {
1439 if (!sta->tdls) {
1440 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1441 mvmvif->ap_sta_id = sta_id;
1442 } else {
1443 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1444 }
1445 }
1446
1447 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1448
1449 return 0;
1450
1451err:
1452 return ret;
1453}
1454
1455int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1456 bool drain)
1457{
1458 struct iwl_mvm_add_sta_cmd cmd = {};
1459 int ret;
1460 u32 status;
1461
1462 lockdep_assert_held(&mvm->mutex);
1463
1464 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1465 cmd.sta_id = mvmsta->sta_id;
1466 cmd.add_modify = STA_MODE_MODIFY;
1467 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1468 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1469
1470 status = ADD_STA_SUCCESS;
1471 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1472 iwl_mvm_add_sta_cmd_size(mvm),
1473 &cmd, &status);
1474 if (ret)
1475 return ret;
1476
1477 switch (status & IWL_ADD_STA_STATUS_MASK) {
1478 case ADD_STA_SUCCESS:
1479 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1480 mvmsta->sta_id);
1481 break;
1482 default:
1483 ret = -EIO;
1484 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1485 mvmsta->sta_id);
1486 break;
1487 }
1488
1489 return ret;
1490}
1491
1492
1493
1494
1495
1496
1497static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1498{
1499 struct ieee80211_sta *sta;
1500 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1501 .sta_id = sta_id,
1502 };
1503 int ret;
1504
1505 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1506 lockdep_is_held(&mvm->mutex));
1507
1508
1509 if (!sta) {
1510 IWL_ERR(mvm, "Invalid station id\n");
1511 return -EINVAL;
1512 }
1513
1514 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1515 sizeof(rm_sta_cmd), &rm_sta_cmd);
1516 if (ret) {
1517 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1518 return ret;
1519 }
1520
1521 return 0;
1522}
1523
1524static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1525 struct ieee80211_vif *vif,
1526 struct iwl_mvm_sta *mvm_sta)
1527{
1528 int ac;
1529 int i;
1530
1531 lockdep_assert_held(&mvm->mutex);
1532
1533 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1534 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1535 continue;
1536
1537 ac = iwl_mvm_tid_to_ac_queue(i);
1538 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1539 vif->hw_queue[ac], i, 0);
1540 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1541 }
1542}
1543
1544int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1545 struct iwl_mvm_sta *mvm_sta)
1546{
1547 int i;
1548
1549 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1550 u16 txq_id;
1551 int ret;
1552
1553 spin_lock_bh(&mvm_sta->lock);
1554 txq_id = mvm_sta->tid_data[i].txq_id;
1555 spin_unlock_bh(&mvm_sta->lock);
1556
1557 if (txq_id == IWL_MVM_INVALID_QUEUE)
1558 continue;
1559
1560 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1561 if (ret)
1562 return ret;
1563 }
1564
1565 return 0;
1566}
1567
1568int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1569 struct ieee80211_vif *vif,
1570 struct ieee80211_sta *sta)
1571{
1572 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1573 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1574 u8 sta_id = mvm_sta->sta_id;
1575 int ret;
1576
1577 lockdep_assert_held(&mvm->mutex);
1578
1579 if (iwl_mvm_has_new_rx_api(mvm))
1580 kfree(mvm_sta->dup_data);
1581
1582 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1583 if (ret)
1584 return ret;
1585
1586
1587 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1588 if (ret)
1589 return ret;
1590 if (iwl_mvm_has_new_tx_api(mvm)) {
1591 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1592 } else {
1593 u32 q_mask = mvm_sta->tfd_queue_msk;
1594
1595 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1596 q_mask);
1597 }
1598 if (ret)
1599 return ret;
1600
1601 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1602
1603 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1604
1605
1606 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1607 u8 reserved_txq = mvm_sta->reserved_queue;
1608 enum iwl_mvm_queue_status *status;
1609
1610
1611
1612
1613
1614
1615 spin_lock_bh(&mvm->queue_info_lock);
1616 status = &mvm->queue_info[reserved_txq].status;
1617 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1618 (*status != IWL_MVM_QUEUE_FREE),
1619 "sta_id %d reserved txq %d status %d",
1620 sta_id, reserved_txq, *status)) {
1621 spin_unlock_bh(&mvm->queue_info_lock);
1622 return -EINVAL;
1623 }
1624
1625 *status = IWL_MVM_QUEUE_FREE;
1626 spin_unlock_bh(&mvm->queue_info_lock);
1627 }
1628
1629 if (vif->type == NL80211_IFTYPE_STATION &&
1630 mvmvif->ap_sta_id == sta_id) {
1631
1632 if (vif->bss_conf.assoc)
1633 return ret;
1634
1635
1636 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1637
1638
1639 if (mvm->d0i3_ap_sta_id == sta_id)
1640 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1641 }
1642
1643
1644
1645
1646
1647 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1648 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1649 cancel_delayed_work(&mvm->tdls_cs.dwork);
1650 }
1651
1652
1653
1654
1655
1656 spin_lock_bh(&mvm_sta->lock);
1657 spin_unlock_bh(&mvm_sta->lock);
1658
1659 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1660 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1661
1662 return ret;
1663}
1664
1665int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1666 struct ieee80211_vif *vif,
1667 u8 sta_id)
1668{
1669 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1670
1671 lockdep_assert_held(&mvm->mutex);
1672
1673 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1674 return ret;
1675}
1676
1677int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1678 struct iwl_mvm_int_sta *sta,
1679 u32 qmask, enum nl80211_iftype iftype,
1680 enum iwl_sta_type type)
1681{
1682 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1683 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1684 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1685 return -ENOSPC;
1686 }
1687
1688 sta->tfd_queue_msk = qmask;
1689 sta->type = type;
1690
1691
1692 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1693 return 0;
1694}
1695
1696void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1697{
1698 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1699 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1700 sta->sta_id = IWL_MVM_INVALID_STA;
1701}
1702
1703static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1704 u8 sta_id, u8 fifo)
1705{
1706 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1707 mvm->cfg->base_params->wd_timeout :
1708 IWL_WATCHDOG_DISABLED;
1709
1710 if (iwl_mvm_has_new_tx_api(mvm)) {
1711 int tvqm_queue =
1712 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
1713 IWL_MAX_TID_COUNT,
1714 wdg_timeout);
1715 *queue = tvqm_queue;
1716 } else {
1717 struct iwl_trans_txq_scd_cfg cfg = {
1718 .fifo = fifo,
1719 .sta_id = sta_id,
1720 .tid = IWL_MAX_TID_COUNT,
1721 .aggregate = false,
1722 .frame_limit = IWL_FRAME_LIMIT,
1723 };
1724
1725 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
1726 }
1727}
1728
1729int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1730{
1731 int ret;
1732
1733 lockdep_assert_held(&mvm->mutex);
1734
1735
1736 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1737 NL80211_IFTYPE_UNSPECIFIED,
1738 IWL_STA_AUX_ACTIVITY);
1739 if (ret)
1740 return ret;
1741
1742
1743 if (!iwl_mvm_has_new_tx_api(mvm))
1744 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1745 mvm->aux_sta.sta_id,
1746 IWL_MVM_TX_FIFO_MCAST);
1747
1748 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1749 MAC_INDEX_AUX, 0);
1750 if (ret) {
1751 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1752 return ret;
1753 }
1754
1755
1756
1757
1758
1759 if (iwl_mvm_has_new_tx_api(mvm))
1760 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1761 mvm->aux_sta.sta_id,
1762 IWL_MVM_TX_FIFO_MCAST);
1763
1764 return 0;
1765}
1766
1767int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1768{
1769 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1770 int ret;
1771
1772 lockdep_assert_held(&mvm->mutex);
1773
1774
1775 if (!iwl_mvm_has_new_tx_api(mvm))
1776 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1777 mvm->snif_sta.sta_id,
1778 IWL_MVM_TX_FIFO_BE);
1779
1780 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1781 mvmvif->id, 0);
1782 if (ret)
1783 return ret;
1784
1785
1786
1787
1788
1789 if (iwl_mvm_has_new_tx_api(mvm))
1790 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1791 mvm->snif_sta.sta_id,
1792 IWL_MVM_TX_FIFO_BE);
1793
1794 return 0;
1795}
1796
1797int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1798{
1799 int ret;
1800
1801 lockdep_assert_held(&mvm->mutex);
1802
1803 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
1804 IWL_MAX_TID_COUNT, 0);
1805 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1806 if (ret)
1807 IWL_WARN(mvm, "Failed sending remove station\n");
1808
1809 return ret;
1810}
1811
1812void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1813{
1814 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1815}
1816
1817void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1818{
1819 lockdep_assert_held(&mvm->mutex);
1820
1821 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1833{
1834 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1835 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1836 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1837 const u8 *baddr = _baddr;
1838 int queue;
1839 int ret;
1840 unsigned int wdg_timeout =
1841 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1842 struct iwl_trans_txq_scd_cfg cfg = {
1843 .fifo = IWL_MVM_TX_FIFO_VO,
1844 .sta_id = mvmvif->bcast_sta.sta_id,
1845 .tid = IWL_MAX_TID_COUNT,
1846 .aggregate = false,
1847 .frame_limit = IWL_FRAME_LIMIT,
1848 };
1849
1850 lockdep_assert_held(&mvm->mutex);
1851
1852 if (!iwl_mvm_has_new_tx_api(mvm)) {
1853 if (vif->type == NL80211_IFTYPE_AP ||
1854 vif->type == NL80211_IFTYPE_ADHOC)
1855 queue = mvm->probe_queue;
1856 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1857 queue = mvm->p2p_dev_queue;
1858 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
1859 return -EINVAL;
1860
1861 bsta->tfd_queue_msk |= BIT(queue);
1862
1863 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1864 &cfg, wdg_timeout);
1865 }
1866
1867 if (vif->type == NL80211_IFTYPE_ADHOC)
1868 baddr = vif->bss_conf.bssid;
1869
1870 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
1871 return -ENOSPC;
1872
1873 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1874 mvmvif->id, mvmvif->color);
1875 if (ret)
1876 return ret;
1877
1878
1879
1880
1881
1882 if (iwl_mvm_has_new_tx_api(mvm)) {
1883 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1884 bsta->sta_id,
1885 IWL_MAX_TID_COUNT,
1886 wdg_timeout);
1887
1888 if (vif->type == NL80211_IFTYPE_AP ||
1889 vif->type == NL80211_IFTYPE_ADHOC)
1890 mvm->probe_queue = queue;
1891 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1892 mvm->p2p_dev_queue = queue;
1893 }
1894
1895 return 0;
1896}
1897
1898static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1899 struct ieee80211_vif *vif)
1900{
1901 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1902 int queue;
1903
1904 lockdep_assert_held(&mvm->mutex);
1905
1906 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
1907
1908 switch (vif->type) {
1909 case NL80211_IFTYPE_AP:
1910 case NL80211_IFTYPE_ADHOC:
1911 queue = mvm->probe_queue;
1912 break;
1913 case NL80211_IFTYPE_P2P_DEVICE:
1914 queue = mvm->p2p_dev_queue;
1915 break;
1916 default:
1917 WARN(1, "Can't free bcast queue on vif type %d\n",
1918 vif->type);
1919 return;
1920 }
1921
1922 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
1923 if (iwl_mvm_has_new_tx_api(mvm))
1924 return;
1925
1926 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
1927 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
1928}
1929
1930
1931
1932int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1933{
1934 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1935 int ret;
1936
1937 lockdep_assert_held(&mvm->mutex);
1938
1939 iwl_mvm_free_bcast_sta_queues(mvm, vif);
1940
1941 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
1942 if (ret)
1943 IWL_WARN(mvm, "Failed sending remove station\n");
1944 return ret;
1945}
1946
1947int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1948{
1949 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1950
1951 lockdep_assert_held(&mvm->mutex);
1952
1953 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
1954 ieee80211_vif_type_p2p(vif),
1955 IWL_STA_GENERAL_PURPOSE);
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1966{
1967 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1968 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1969 int ret;
1970
1971 lockdep_assert_held(&mvm->mutex);
1972
1973 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1974 if (ret)
1975 return ret;
1976
1977 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
1978
1979 if (ret)
1980 iwl_mvm_dealloc_int_sta(mvm, bsta);
1981
1982 return ret;
1983}
1984
1985void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1986{
1987 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1988
1989 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1990}
1991
1992
1993
1994
1995
1996int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1997{
1998 int ret;
1999
2000 lockdep_assert_held(&mvm->mutex);
2001
2002 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2003
2004 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2005
2006 return ret;
2007}
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2018{
2019 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2020 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2021 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2022 const u8 *maddr = _maddr;
2023 struct iwl_trans_txq_scd_cfg cfg = {
2024 .fifo = IWL_MVM_TX_FIFO_MCAST,
2025 .sta_id = msta->sta_id,
2026 .tid = IWL_MAX_TID_COUNT,
2027 .aggregate = false,
2028 .frame_limit = IWL_FRAME_LIMIT,
2029 };
2030 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2031 int ret;
2032
2033 lockdep_assert_held(&mvm->mutex);
2034
2035 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2036 vif->type != NL80211_IFTYPE_ADHOC))
2037 return -ENOTSUPP;
2038
2039
2040
2041
2042
2043 if (!iwl_mvm_has_new_tx_api(mvm) &&
2044 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2045 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2046 &cfg, timeout);
2047 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2048 }
2049 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2050 mvmvif->id, mvmvif->color);
2051 if (ret) {
2052 iwl_mvm_dealloc_int_sta(mvm, msta);
2053 return ret;
2054 }
2055
2056
2057
2058
2059
2060
2061
2062
2063 if (iwl_mvm_has_new_tx_api(mvm)) {
2064 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2065 msta->sta_id,
2066 IWL_MAX_TID_COUNT,
2067 timeout);
2068 mvmvif->cab_queue = queue;
2069 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2070 IWL_UCODE_TLV_API_STA_TYPE)) {
2071
2072
2073
2074
2075
2076
2077 if (vif->type == NL80211_IFTYPE_ADHOC) {
2078 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2079 mvmvif->cab_queue = vif->cab_queue;
2080 }
2081 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2082 &cfg, timeout);
2083 }
2084
2085 return 0;
2086}
2087
2088
2089
2090
2091
2092int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2093{
2094 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2095 int ret;
2096
2097 lockdep_assert_held(&mvm->mutex);
2098
2099 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2100
2101 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2102 IWL_MAX_TID_COUNT, 0);
2103
2104 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2105 if (ret)
2106 IWL_WARN(mvm, "Failed sending remove station\n");
2107
2108 return ret;
2109}
2110
2111#define IWL_MAX_RX_BA_SESSIONS 16
2112
2113static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2114{
2115 struct iwl_mvm_delba_notif notif = {
2116 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2117 .metadata.sync = 1,
2118 .delba.baid = baid,
2119 };
2120 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2121};
2122
2123static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2124 struct iwl_mvm_baid_data *data)
2125{
2126 int i;
2127
2128 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2129
2130 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2131 int j;
2132 struct iwl_mvm_reorder_buffer *reorder_buf =
2133 &data->reorder_buf[i];
2134
2135 spin_lock_bh(&reorder_buf->lock);
2136 if (likely(!reorder_buf->num_stored)) {
2137 spin_unlock_bh(&reorder_buf->lock);
2138 continue;
2139 }
2140
2141
2142
2143
2144
2145
2146 WARN_ON(1);
2147
2148 for (j = 0; j < reorder_buf->buf_size; j++)
2149 __skb_queue_purge(&reorder_buf->entries[j]);
2150
2151
2152
2153
2154
2155
2156
2157
2158 reorder_buf->removed = true;
2159 spin_unlock_bh(&reorder_buf->lock);
2160 del_timer_sync(&reorder_buf->reorder_timer);
2161 }
2162}
2163
2164static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2165 u32 sta_id,
2166 struct iwl_mvm_baid_data *data,
2167 u16 ssn, u8 buf_size)
2168{
2169 int i;
2170
2171 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2172 struct iwl_mvm_reorder_buffer *reorder_buf =
2173 &data->reorder_buf[i];
2174 int j;
2175
2176 reorder_buf->num_stored = 0;
2177 reorder_buf->head_sn = ssn;
2178 reorder_buf->buf_size = buf_size;
2179
2180 reorder_buf->reorder_timer.function =
2181 iwl_mvm_reorder_timer_expired;
2182 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2183 init_timer(&reorder_buf->reorder_timer);
2184 spin_lock_init(&reorder_buf->lock);
2185 reorder_buf->mvm = mvm;
2186 reorder_buf->queue = i;
2187 reorder_buf->sta_id = sta_id;
2188 reorder_buf->valid = false;
2189 for (j = 0; j < reorder_buf->buf_size; j++)
2190 __skb_queue_head_init(&reorder_buf->entries[j]);
2191 }
2192}
2193
2194int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2195 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
2196{
2197 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2198 struct iwl_mvm_add_sta_cmd cmd = {};
2199 struct iwl_mvm_baid_data *baid_data = NULL;
2200 int ret;
2201 u32 status;
2202
2203 lockdep_assert_held(&mvm->mutex);
2204
2205 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2206 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2207 return -ENOSPC;
2208 }
2209
2210 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2211
2212
2213
2214
2215 baid_data = kzalloc(sizeof(*baid_data) +
2216 mvm->trans->num_rx_queues *
2217 sizeof(baid_data->reorder_buf[0]),
2218 GFP_KERNEL);
2219 if (!baid_data)
2220 return -ENOMEM;
2221 }
2222
2223 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2224 cmd.sta_id = mvm_sta->sta_id;
2225 cmd.add_modify = STA_MODE_MODIFY;
2226 if (start) {
2227 cmd.add_immediate_ba_tid = (u8) tid;
2228 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2229 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
2230 } else {
2231 cmd.remove_immediate_ba_tid = (u8) tid;
2232 }
2233 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2234 STA_MODIFY_REMOVE_BA_TID;
2235
2236 status = ADD_STA_SUCCESS;
2237 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2238 iwl_mvm_add_sta_cmd_size(mvm),
2239 &cmd, &status);
2240 if (ret)
2241 goto out_free;
2242
2243 switch (status & IWL_ADD_STA_STATUS_MASK) {
2244 case ADD_STA_SUCCESS:
2245 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2246 start ? "start" : "stopp");
2247 break;
2248 case ADD_STA_IMMEDIATE_BA_FAILURE:
2249 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2250 ret = -ENOSPC;
2251 break;
2252 default:
2253 ret = -EIO;
2254 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2255 start ? "start" : "stopp", status);
2256 break;
2257 }
2258
2259 if (ret)
2260 goto out_free;
2261
2262 if (start) {
2263 u8 baid;
2264
2265 mvm->rx_ba_sessions++;
2266
2267 if (!iwl_mvm_has_new_rx_api(mvm))
2268 return 0;
2269
2270 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2271 ret = -EINVAL;
2272 goto out_free;
2273 }
2274 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2275 IWL_ADD_STA_BAID_SHIFT);
2276 baid_data->baid = baid;
2277 baid_data->timeout = timeout;
2278 baid_data->last_rx = jiffies;
2279 setup_timer(&baid_data->session_timer,
2280 iwl_mvm_rx_agg_session_expired,
2281 (unsigned long)&mvm->baid_map[baid]);
2282 baid_data->mvm = mvm;
2283 baid_data->tid = tid;
2284 baid_data->sta_id = mvm_sta->sta_id;
2285
2286 mvm_sta->tid_to_baid[tid] = baid;
2287 if (timeout)
2288 mod_timer(&baid_data->session_timer,
2289 TU_TO_EXP_TIME(timeout * 2));
2290
2291 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2292 baid_data, ssn, buf_size);
2293
2294
2295
2296
2297
2298
2299 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2300 mvm_sta->sta_id, tid, baid);
2301 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2302 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2303 } else {
2304 u8 baid = mvm_sta->tid_to_baid[tid];
2305
2306 if (mvm->rx_ba_sessions > 0)
2307
2308 mvm->rx_ba_sessions--;
2309 if (!iwl_mvm_has_new_rx_api(mvm))
2310 return 0;
2311
2312 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2313 return -EINVAL;
2314
2315 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2316 if (WARN_ON(!baid_data))
2317 return -EINVAL;
2318
2319
2320 iwl_mvm_free_reorder(mvm, baid_data);
2321 del_timer_sync(&baid_data->session_timer);
2322 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2323 kfree_rcu(baid_data, rcu_head);
2324 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2325 }
2326 return 0;
2327
2328out_free:
2329 kfree(baid_data);
2330 return ret;
2331}
2332
2333int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2334 int tid, u8 queue, bool start)
2335{
2336 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2337 struct iwl_mvm_add_sta_cmd cmd = {};
2338 int ret;
2339 u32 status;
2340
2341 lockdep_assert_held(&mvm->mutex);
2342
2343 if (start) {
2344 mvm_sta->tfd_queue_msk |= BIT(queue);
2345 mvm_sta->tid_disable_agg &= ~BIT(tid);
2346 } else {
2347
2348 mvm_sta->tid_disable_agg |= BIT(tid);
2349 }
2350
2351 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2352 cmd.sta_id = mvm_sta->sta_id;
2353 cmd.add_modify = STA_MODE_MODIFY;
2354 if (!iwl_mvm_has_new_tx_api(mvm))
2355 cmd.modify_mask = STA_MODIFY_QUEUES;
2356 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2357 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2358 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2359
2360 status = ADD_STA_SUCCESS;
2361 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2362 iwl_mvm_add_sta_cmd_size(mvm),
2363 &cmd, &status);
2364 if (ret)
2365 return ret;
2366
2367 switch (status & IWL_ADD_STA_STATUS_MASK) {
2368 case ADD_STA_SUCCESS:
2369 break;
2370 default:
2371 ret = -EIO;
2372 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2373 start ? "start" : "stopp", status);
2374 break;
2375 }
2376
2377 return ret;
2378}
2379
2380const u8 tid_to_mac80211_ac[] = {
2381 IEEE80211_AC_BE,
2382 IEEE80211_AC_BK,
2383 IEEE80211_AC_BK,
2384 IEEE80211_AC_BE,
2385 IEEE80211_AC_VI,
2386 IEEE80211_AC_VI,
2387 IEEE80211_AC_VO,
2388 IEEE80211_AC_VO,
2389 IEEE80211_AC_VO,
2390};
2391
2392static const u8 tid_to_ucode_ac[] = {
2393 AC_BE,
2394 AC_BK,
2395 AC_BK,
2396 AC_BE,
2397 AC_VI,
2398 AC_VI,
2399 AC_VO,
2400 AC_VO,
2401};
2402
2403int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2404 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2405{
2406 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2407 struct iwl_mvm_tid_data *tid_data;
2408 u16 normalized_ssn;
2409 int txq_id;
2410 int ret;
2411
2412 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2413 return -EINVAL;
2414
2415 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2416 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2417 IWL_ERR(mvm,
2418 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2419 mvmsta->tid_data[tid].state);
2420 return -ENXIO;
2421 }
2422
2423 lockdep_assert_held(&mvm->mutex);
2424
2425 spin_lock_bh(&mvmsta->lock);
2426
2427
2428 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2429 spin_unlock_bh(&mvmsta->lock);
2430 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2431 return -EIO;
2432 }
2433
2434 spin_lock(&mvm->queue_info_lock);
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 txq_id = mvmsta->tid_data[tid].txq_id;
2447 if (iwl_mvm_has_new_tx_api(mvm)) {
2448 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2449 ret = -ENXIO;
2450 goto release_locks;
2451 }
2452 } else if (unlikely(mvm->queue_info[txq_id].status ==
2453 IWL_MVM_QUEUE_SHARED)) {
2454 ret = -ENXIO;
2455 IWL_DEBUG_TX_QUEUES(mvm,
2456 "Can't start tid %d agg on shared queue!\n",
2457 tid);
2458 goto release_locks;
2459 } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
2460 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2461 IWL_MVM_DQA_MIN_DATA_QUEUE,
2462 IWL_MVM_DQA_MAX_DATA_QUEUE);
2463 if (txq_id < 0) {
2464 ret = txq_id;
2465 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2466 goto release_locks;
2467 }
2468
2469
2470
2471
2472
2473 WARN_ON(mvm->queue_info[txq_id].status ==
2474 IWL_MVM_QUEUE_INACTIVE);
2475
2476
2477 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2478 }
2479
2480 spin_unlock(&mvm->queue_info_lock);
2481
2482 IWL_DEBUG_TX_QUEUES(mvm,
2483 "AGG for tid %d will be on queue #%d\n",
2484 tid, txq_id);
2485
2486 tid_data = &mvmsta->tid_data[tid];
2487 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2488 tid_data->txq_id = txq_id;
2489 *ssn = tid_data->ssn;
2490
2491 IWL_DEBUG_TX_QUEUES(mvm,
2492 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2493 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2494 tid_data->next_reclaimed);
2495
2496
2497
2498
2499
2500 normalized_ssn = tid_data->ssn;
2501 if (mvm->trans->cfg->gen2)
2502 normalized_ssn &= 0xff;
2503
2504 if (normalized_ssn == tid_data->next_reclaimed) {
2505 tid_data->state = IWL_AGG_STARTING;
2506 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2507 } else {
2508 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2509 }
2510
2511 ret = 0;
2512 goto out;
2513
2514release_locks:
2515 spin_unlock(&mvm->queue_info_lock);
2516out:
2517 spin_unlock_bh(&mvmsta->lock);
2518
2519 return ret;
2520}
2521
2522int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2523 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2524 bool amsdu)
2525{
2526 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2527 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2528 unsigned int wdg_timeout =
2529 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2530 int queue, ret;
2531 bool alloc_queue = true;
2532 enum iwl_mvm_queue_status queue_status;
2533 u16 ssn;
2534
2535 struct iwl_trans_txq_scd_cfg cfg = {
2536 .sta_id = mvmsta->sta_id,
2537 .tid = tid,
2538 .frame_limit = buf_size,
2539 .aggregate = true,
2540 };
2541
2542 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2543 != IWL_MAX_TID_COUNT);
2544
2545 if (!mvm->trans->cfg->gen2)
2546 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2547 else
2548 buf_size = min_t(int, buf_size,
2549 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF);
2550
2551 spin_lock_bh(&mvmsta->lock);
2552 ssn = tid_data->ssn;
2553 queue = tid_data->txq_id;
2554 tid_data->state = IWL_AGG_ON;
2555 mvmsta->agg_tids |= BIT(tid);
2556 tid_data->ssn = 0xffff;
2557 tid_data->amsdu_in_ampdu_allowed = amsdu;
2558 spin_unlock_bh(&mvmsta->lock);
2559
2560 if (iwl_mvm_has_new_tx_api(mvm)) {
2561
2562
2563
2564
2565 if (buf_size < mvmsta->max_agg_bufsize)
2566 return -ENOTSUPP;
2567
2568 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2569 if (ret)
2570 return -EIO;
2571 goto out;
2572 }
2573
2574 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2575
2576 spin_lock_bh(&mvm->queue_info_lock);
2577 queue_status = mvm->queue_info[queue].status;
2578 spin_unlock_bh(&mvm->queue_info_lock);
2579
2580
2581 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2582 alloc_queue = false;
2583
2584
2585
2586
2587
2588 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2589
2590
2591
2592
2593 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2594 BIT(queue));
2595 if (ret) {
2596 IWL_ERR(mvm,
2597 "Error draining queue before reconfig\n");
2598 return ret;
2599 }
2600
2601 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2602 mvmsta->sta_id, tid,
2603 buf_size, ssn);
2604 if (ret) {
2605 IWL_ERR(mvm,
2606 "Error reconfiguring TXQ #%d\n", queue);
2607 return ret;
2608 }
2609 }
2610
2611 if (alloc_queue)
2612 iwl_mvm_enable_txq(mvm, queue,
2613 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2614 &cfg, wdg_timeout);
2615
2616
2617 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2618 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2619 if (ret)
2620 return -EIO;
2621 }
2622
2623
2624 spin_lock_bh(&mvm->queue_info_lock);
2625 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2626 spin_unlock_bh(&mvm->queue_info_lock);
2627
2628out:
2629
2630
2631
2632
2633
2634
2635
2636 mvmsta->max_agg_bufsize =
2637 min(mvmsta->max_agg_bufsize, buf_size);
2638 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2639
2640 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2641 sta->addr, tid);
2642
2643 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
2644}
2645
2646static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2647 struct iwl_mvm_sta *mvmsta,
2648 u16 txq_id)
2649{
2650 if (iwl_mvm_has_new_tx_api(mvm))
2651 return;
2652
2653 spin_lock_bh(&mvm->queue_info_lock);
2654
2655
2656
2657
2658
2659
2660
2661 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2662 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2663
2664 spin_unlock_bh(&mvm->queue_info_lock);
2665}
2666
2667int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2668 struct ieee80211_sta *sta, u16 tid)
2669{
2670 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2671 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2672 u16 txq_id;
2673 int err;
2674
2675
2676
2677
2678
2679 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2680 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2681 return 0;
2682 }
2683
2684 spin_lock_bh(&mvmsta->lock);
2685
2686 txq_id = tid_data->txq_id;
2687
2688 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2689 mvmsta->sta_id, tid, txq_id, tid_data->state);
2690
2691 mvmsta->agg_tids &= ~BIT(tid);
2692
2693 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
2694
2695 switch (tid_data->state) {
2696 case IWL_AGG_ON:
2697 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2698
2699 IWL_DEBUG_TX_QUEUES(mvm,
2700 "ssn = %d, next_recl = %d\n",
2701 tid_data->ssn, tid_data->next_reclaimed);
2702
2703 tid_data->ssn = 0xffff;
2704 tid_data->state = IWL_AGG_OFF;
2705 spin_unlock_bh(&mvmsta->lock);
2706
2707 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2708
2709 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2710 return 0;
2711 case IWL_AGG_STARTING:
2712 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2713
2714
2715
2716
2717
2718
2719 lockdep_assert_held(&mvm->mutex);
2720
2721 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2722 tid_data->state = IWL_AGG_OFF;
2723 err = 0;
2724 break;
2725 default:
2726 IWL_ERR(mvm,
2727 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2728 mvmsta->sta_id, tid, tid_data->state);
2729 IWL_ERR(mvm,
2730 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2731 err = -EINVAL;
2732 }
2733
2734 spin_unlock_bh(&mvmsta->lock);
2735
2736 return err;
2737}
2738
2739int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2740 struct ieee80211_sta *sta, u16 tid)
2741{
2742 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2743 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2744 u16 txq_id;
2745 enum iwl_mvm_agg_state old_state;
2746
2747
2748
2749
2750
2751 spin_lock_bh(&mvmsta->lock);
2752 txq_id = tid_data->txq_id;
2753 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2754 mvmsta->sta_id, tid, txq_id, tid_data->state);
2755 old_state = tid_data->state;
2756 tid_data->state = IWL_AGG_OFF;
2757 mvmsta->agg_tids &= ~BIT(tid);
2758 spin_unlock_bh(&mvmsta->lock);
2759
2760 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
2761
2762 if (old_state >= IWL_AGG_ON) {
2763 iwl_mvm_drain_sta(mvm, mvmsta, true);
2764
2765 if (iwl_mvm_has_new_tx_api(mvm)) {
2766 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2767 BIT(tid), 0))
2768 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2769 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2770 } else {
2771 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2772 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2773 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
2774 }
2775
2776 iwl_mvm_drain_sta(mvm, mvmsta, false);
2777
2778 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2779 }
2780
2781 return 0;
2782}
2783
2784static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2785{
2786 int i, max = -1, max_offs = -1;
2787
2788 lockdep_assert_held(&mvm->mutex);
2789
2790
2791
2792
2793
2794
2795
2796 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2797 if (test_bit(i, mvm->fw_key_table))
2798 continue;
2799 if (mvm->fw_key_deleted[i] > max) {
2800 max = mvm->fw_key_deleted[i];
2801 max_offs = i;
2802 }
2803 }
2804
2805 if (max_offs < 0)
2806 return STA_KEY_IDX_INVALID;
2807
2808 return max_offs;
2809}
2810
2811static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2812 struct ieee80211_vif *vif,
2813 struct ieee80211_sta *sta)
2814{
2815 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2816
2817 if (sta)
2818 return iwl_mvm_sta_from_mac80211(sta);
2819
2820
2821
2822
2823
2824
2825 if (vif->type == NL80211_IFTYPE_STATION &&
2826 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
2827 u8 sta_id = mvmvif->ap_sta_id;
2828
2829 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2830 lockdep_is_held(&mvm->mutex));
2831
2832
2833
2834
2835
2836
2837 if (IS_ERR_OR_NULL(sta))
2838 return NULL;
2839
2840 return iwl_mvm_sta_from_mac80211(sta);
2841 }
2842
2843 return NULL;
2844}
2845
2846static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2847 u32 sta_id,
2848 struct ieee80211_key_conf *key, bool mcast,
2849 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2850 u8 key_offset)
2851{
2852 union {
2853 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2854 struct iwl_mvm_add_sta_key_cmd cmd;
2855 } u = {};
2856 __le16 key_flags;
2857 int ret;
2858 u32 status;
2859 u16 keyidx;
2860 u64 pn = 0;
2861 int i, size;
2862 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2863 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2864
2865 if (sta_id == IWL_MVM_INVALID_STA)
2866 return -EINVAL;
2867
2868 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
2869 STA_KEY_FLG_KEYID_MSK;
2870 key_flags = cpu_to_le16(keyidx);
2871 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2872
2873 switch (key->cipher) {
2874 case WLAN_CIPHER_SUITE_TKIP:
2875 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2876 if (new_api) {
2877 memcpy((void *)&u.cmd.tx_mic_key,
2878 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2879 IWL_MIC_KEY_SIZE);
2880
2881 memcpy((void *)&u.cmd.rx_mic_key,
2882 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2883 IWL_MIC_KEY_SIZE);
2884 pn = atomic64_read(&key->tx_pn);
2885
2886 } else {
2887 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2888 for (i = 0; i < 5; i++)
2889 u.cmd_v1.tkip_rx_ttak[i] =
2890 cpu_to_le16(tkip_p1k[i]);
2891 }
2892 memcpy(u.cmd.common.key, key->key, key->keylen);
2893 break;
2894 case WLAN_CIPHER_SUITE_CCMP:
2895 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2896 memcpy(u.cmd.common.key, key->key, key->keylen);
2897 if (new_api)
2898 pn = atomic64_read(&key->tx_pn);
2899 break;
2900 case WLAN_CIPHER_SUITE_WEP104:
2901 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2902
2903 case WLAN_CIPHER_SUITE_WEP40:
2904 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2905 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
2906 break;
2907 case WLAN_CIPHER_SUITE_GCMP_256:
2908 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2909
2910 case WLAN_CIPHER_SUITE_GCMP:
2911 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2912 memcpy(u.cmd.common.key, key->key, key->keylen);
2913 if (new_api)
2914 pn = atomic64_read(&key->tx_pn);
2915 break;
2916 default:
2917 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2918 memcpy(u.cmd.common.key, key->key, key->keylen);
2919 }
2920
2921 if (mcast)
2922 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2923
2924 u.cmd.common.key_offset = key_offset;
2925 u.cmd.common.key_flags = key_flags;
2926 u.cmd.common.sta_id = sta_id;
2927
2928 if (new_api) {
2929 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2930 size = sizeof(u.cmd);
2931 } else {
2932 size = sizeof(u.cmd_v1);
2933 }
2934
2935 status = ADD_STA_SUCCESS;
2936 if (cmd_flags & CMD_ASYNC)
2937 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2938 &u.cmd);
2939 else
2940 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2941 &u.cmd, &status);
2942
2943 switch (status) {
2944 case ADD_STA_SUCCESS:
2945 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2946 break;
2947 default:
2948 ret = -EIO;
2949 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2950 break;
2951 }
2952
2953 return ret;
2954}
2955
2956static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2957 struct ieee80211_key_conf *keyconf,
2958 u8 sta_id, bool remove_key)
2959{
2960 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2961
2962
2963 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2964 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2965 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2966 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2967 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2968 return -EINVAL;
2969
2970 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2971 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
2972 return -EINVAL;
2973
2974 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2975 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2976
2977 if (remove_key) {
2978 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2979 } else {
2980 struct ieee80211_key_seq seq;
2981 const u8 *pn;
2982
2983 switch (keyconf->cipher) {
2984 case WLAN_CIPHER_SUITE_AES_CMAC:
2985 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2986 break;
2987 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2988 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2989 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2990 break;
2991 default:
2992 return -EINVAL;
2993 }
2994
2995 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2996 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2997 igtk_cmd.ctrl_flags |=
2998 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
2999 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3000 pn = seq.aes_cmac.pn;
3001 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3002 ((u64) pn[4] << 8) |
3003 ((u64) pn[3] << 16) |
3004 ((u64) pn[2] << 24) |
3005 ((u64) pn[1] << 32) |
3006 ((u64) pn[0] << 40));
3007 }
3008
3009 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3010 remove_key ? "removing" : "installing",
3011 igtk_cmd.sta_id);
3012
3013 if (!iwl_mvm_has_new_rx_api(mvm)) {
3014 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3015 .ctrl_flags = igtk_cmd.ctrl_flags,
3016 .key_id = igtk_cmd.key_id,
3017 .sta_id = igtk_cmd.sta_id,
3018 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3019 };
3020
3021 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3022 ARRAY_SIZE(igtk_cmd_v1.igtk));
3023 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3024 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3025 }
3026 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3027 sizeof(igtk_cmd), &igtk_cmd);
3028}
3029
3030
3031static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3032 struct ieee80211_vif *vif,
3033 struct ieee80211_sta *sta)
3034{
3035 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3036
3037 if (sta)
3038 return sta->addr;
3039
3040 if (vif->type == NL80211_IFTYPE_STATION &&
3041 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3042 u8 sta_id = mvmvif->ap_sta_id;
3043 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3044 lockdep_is_held(&mvm->mutex));
3045 return sta->addr;
3046 }
3047
3048
3049 return NULL;
3050}
3051
3052static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3053 struct ieee80211_vif *vif,
3054 struct ieee80211_sta *sta,
3055 struct ieee80211_key_conf *keyconf,
3056 u8 key_offset,
3057 bool mcast)
3058{
3059 int ret;
3060 const u8 *addr;
3061 struct ieee80211_key_seq seq;
3062 u16 p1k[5];
3063 u32 sta_id;
3064
3065 if (sta) {
3066 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3067
3068 sta_id = mvm_sta->sta_id;
3069 } else if (vif->type == NL80211_IFTYPE_AP &&
3070 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3071 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3072
3073 sta_id = mvmvif->mcast_sta.sta_id;
3074 } else {
3075 IWL_ERR(mvm, "Failed to find station id\n");
3076 return -EINVAL;
3077 }
3078
3079 switch (keyconf->cipher) {
3080 case WLAN_CIPHER_SUITE_TKIP:
3081 if (vif->type == NL80211_IFTYPE_AP) {
3082 ret = -EINVAL;
3083 break;
3084 }
3085 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3086
3087 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3088 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3089 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3090 seq.tkip.iv32, p1k, 0, key_offset);
3091 break;
3092 case WLAN_CIPHER_SUITE_CCMP:
3093 case WLAN_CIPHER_SUITE_WEP40:
3094 case WLAN_CIPHER_SUITE_WEP104:
3095 case WLAN_CIPHER_SUITE_GCMP:
3096 case WLAN_CIPHER_SUITE_GCMP_256:
3097 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3098 0, NULL, 0, key_offset);
3099 break;
3100 default:
3101 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3102 0, NULL, 0, key_offset);
3103 }
3104
3105 return ret;
3106}
3107
3108static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3109 struct ieee80211_key_conf *keyconf,
3110 bool mcast)
3111{
3112 union {
3113 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3114 struct iwl_mvm_add_sta_key_cmd cmd;
3115 } u = {};
3116 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3117 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3118 __le16 key_flags;
3119 int ret, size;
3120 u32 status;
3121
3122 if (sta_id == IWL_MVM_INVALID_STA)
3123 return -EINVAL;
3124
3125 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3126 STA_KEY_FLG_KEYID_MSK);
3127 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3128 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3129
3130 if (mcast)
3131 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3132
3133
3134
3135
3136
3137 u.cmd.common.key_flags = key_flags;
3138 u.cmd.common.key_offset = keyconf->hw_key_idx;
3139 u.cmd.common.sta_id = sta_id;
3140
3141 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3142
3143 status = ADD_STA_SUCCESS;
3144 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3145 &status);
3146
3147 switch (status) {
3148 case ADD_STA_SUCCESS:
3149 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3150 break;
3151 default:
3152 ret = -EIO;
3153 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3154 break;
3155 }
3156
3157 return ret;
3158}
3159
3160int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3161 struct ieee80211_vif *vif,
3162 struct ieee80211_sta *sta,
3163 struct ieee80211_key_conf *keyconf,
3164 u8 key_offset)
3165{
3166 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3167 struct iwl_mvm_sta *mvm_sta;
3168 u8 sta_id = IWL_MVM_INVALID_STA;
3169 int ret;
3170 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3171
3172 lockdep_assert_held(&mvm->mutex);
3173
3174 if (vif->type != NL80211_IFTYPE_AP ||
3175 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3176
3177 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3178 if (!mvm_sta) {
3179 IWL_ERR(mvm, "Failed to find station\n");
3180 return -EINVAL;
3181 }
3182 sta_id = mvm_sta->sta_id;
3183
3184 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3185 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3186 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3187 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
3188 false);
3189 goto end;
3190 }
3191
3192
3193
3194
3195
3196
3197 if (!sta) {
3198 sta = rcu_dereference_protected(
3199 mvm->fw_id_to_mac_id[sta_id],
3200 lockdep_is_held(&mvm->mutex));
3201 if (IS_ERR_OR_NULL(sta)) {
3202 IWL_ERR(mvm, "Invalid station id\n");
3203 return -EINVAL;
3204 }
3205 }
3206
3207 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3208 return -EINVAL;
3209 }
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222 if (key_offset == STA_KEY_IDX_INVALID) {
3223 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3224 if (key_offset == STA_KEY_IDX_INVALID)
3225 return -ENOSPC;
3226 keyconf->hw_key_idx = key_offset;
3227 }
3228
3229 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3230 if (ret)
3231 goto end;
3232
3233
3234
3235
3236
3237
3238
3239 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3240 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3241 sta) {
3242 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3243 key_offset, !mcast);
3244 if (ret) {
3245 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3246 goto end;
3247 }
3248 }
3249
3250 __set_bit(key_offset, mvm->fw_key_table);
3251
3252end:
3253 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3254 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3255 sta ? sta->addr : zero_addr, ret);
3256 return ret;
3257}
3258
3259int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3260 struct ieee80211_vif *vif,
3261 struct ieee80211_sta *sta,
3262 struct ieee80211_key_conf *keyconf)
3263{
3264 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3265 struct iwl_mvm_sta *mvm_sta;
3266 u8 sta_id = IWL_MVM_INVALID_STA;
3267 int ret, i;
3268
3269 lockdep_assert_held(&mvm->mutex);
3270
3271
3272 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3273 if (mvm_sta)
3274 sta_id = mvm_sta->sta_id;
3275 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3276 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3277
3278
3279 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3280 keyconf->keyidx, sta_id);
3281
3282 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3283 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3284 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3285 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3286
3287 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3288 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3289 keyconf->hw_key_idx);
3290 return -ENOENT;
3291 }
3292
3293
3294 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3295 if (mvm->fw_key_deleted[i] < U8_MAX)
3296 mvm->fw_key_deleted[i]++;
3297 }
3298 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3299
3300 if (sta && !mvm_sta) {
3301 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3302 return 0;
3303 }
3304
3305 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3306 if (ret)
3307 return ret;
3308
3309
3310 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3311 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3312 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3313
3314 return ret;
3315}
3316
3317void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3318 struct ieee80211_vif *vif,
3319 struct ieee80211_key_conf *keyconf,
3320 struct ieee80211_sta *sta, u32 iv32,
3321 u16 *phase1key)
3322{
3323 struct iwl_mvm_sta *mvm_sta;
3324 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3325
3326 rcu_read_lock();
3327
3328 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3329 if (WARN_ON_ONCE(!mvm_sta))
3330 goto unlock;
3331 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3332 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
3333
3334 unlock:
3335 rcu_read_unlock();
3336}
3337
3338void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3339 struct ieee80211_sta *sta)
3340{
3341 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3342 struct iwl_mvm_add_sta_cmd cmd = {
3343 .add_modify = STA_MODE_MODIFY,
3344 .sta_id = mvmsta->sta_id,
3345 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3346 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3347 };
3348 int ret;
3349
3350 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3351 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3352 if (ret)
3353 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3354}
3355
3356void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3357 struct ieee80211_sta *sta,
3358 enum ieee80211_frame_release_type reason,
3359 u16 cnt, u16 tids, bool more_data,
3360 bool single_sta_queue)
3361{
3362 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3363 struct iwl_mvm_add_sta_cmd cmd = {
3364 .add_modify = STA_MODE_MODIFY,
3365 .sta_id = mvmsta->sta_id,
3366 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3367 .sleep_tx_count = cpu_to_le16(cnt),
3368 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3369 };
3370 int tid, ret;
3371 unsigned long _tids = tids;
3372
3373
3374
3375
3376
3377 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3378 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3379
3380
3381
3382
3383
3384
3385
3386
3387 if (single_sta_queue) {
3388 int remaining = cnt;
3389 int sleep_tx_count;
3390
3391 spin_lock_bh(&mvmsta->lock);
3392 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3393 struct iwl_mvm_tid_data *tid_data;
3394 u16 n_queued;
3395
3396 tid_data = &mvmsta->tid_data[tid];
3397
3398 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3399 if (n_queued > remaining) {
3400 more_data = true;
3401 remaining = 0;
3402 break;
3403 }
3404 remaining -= n_queued;
3405 }
3406 sleep_tx_count = cnt - remaining;
3407 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3408 mvmsta->sleep_tx_count = sleep_tx_count;
3409 spin_unlock_bh(&mvmsta->lock);
3410
3411 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3412 if (WARN_ON(cnt - remaining == 0)) {
3413 ieee80211_sta_eosp(sta);
3414 return;
3415 }
3416 }
3417
3418
3419 if (more_data)
3420 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3421
3422 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3423 mvmsta->next_status_eosp = true;
3424 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3425 } else {
3426 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3427 }
3428
3429
3430 iwl_trans_block_txq_ptrs(mvm->trans, true);
3431
3432 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3433 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3434 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3435 if (ret)
3436 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3437}
3438
3439void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3440 struct iwl_rx_cmd_buffer *rxb)
3441{
3442 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3443 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3444 struct ieee80211_sta *sta;
3445 u32 sta_id = le32_to_cpu(notif->sta_id);
3446
3447 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3448 return;
3449
3450 rcu_read_lock();
3451 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3452 if (!IS_ERR_OR_NULL(sta))
3453 ieee80211_sta_eosp(sta);
3454 rcu_read_unlock();
3455}
3456
3457void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3458 struct iwl_mvm_sta *mvmsta, bool disable)
3459{
3460 struct iwl_mvm_add_sta_cmd cmd = {
3461 .add_modify = STA_MODE_MODIFY,
3462 .sta_id = mvmsta->sta_id,
3463 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3464 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3465 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3466 };
3467 int ret;
3468
3469 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3470 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3471 if (ret)
3472 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3473}
3474
3475void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3476 struct ieee80211_sta *sta,
3477 bool disable)
3478{
3479 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3480
3481 spin_lock_bh(&mvm_sta->lock);
3482
3483 if (mvm_sta->disable_tx == disable) {
3484 spin_unlock_bh(&mvm_sta->lock);
3485 return;
3486 }
3487
3488 mvm_sta->disable_tx = disable;
3489
3490
3491 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3492
3493 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3494
3495 spin_unlock_bh(&mvm_sta->lock);
3496}
3497
3498static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3499 struct iwl_mvm_vif *mvmvif,
3500 struct iwl_mvm_int_sta *sta,
3501 bool disable)
3502{
3503 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3504 struct iwl_mvm_add_sta_cmd cmd = {
3505 .add_modify = STA_MODE_MODIFY,
3506 .sta_id = sta->sta_id,
3507 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3508 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3509 .mac_id_n_color = cpu_to_le32(id),
3510 };
3511 int ret;
3512
3513 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3514 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3515 if (ret)
3516 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3517}
3518
3519void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3520 struct iwl_mvm_vif *mvmvif,
3521 bool disable)
3522{
3523 struct ieee80211_sta *sta;
3524 struct iwl_mvm_sta *mvm_sta;
3525 int i;
3526
3527 lockdep_assert_held(&mvm->mutex);
3528
3529
3530 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3531 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3532 lockdep_is_held(&mvm->mutex));
3533 if (IS_ERR_OR_NULL(sta))
3534 continue;
3535
3536 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3537 if (mvm_sta->mac_id_n_color !=
3538 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3539 continue;
3540
3541 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3542 }
3543
3544 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3545 return;
3546
3547
3548 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3549 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3550 &mvmvif->mcast_sta, disable);
3551
3552
3553
3554
3555
3556 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3557 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3558 &mvmvif->bcast_sta, disable);
3559}
3560
3561void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3562{
3563 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3564 struct iwl_mvm_sta *mvmsta;
3565
3566 rcu_read_lock();
3567
3568 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3569
3570 if (!WARN_ON(!mvmsta))
3571 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3572
3573 rcu_read_unlock();
3574}
3575
3576u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3577{
3578 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3579
3580
3581
3582
3583
3584 if (mvm->trans->cfg->gen2)
3585 sn &= 0xff;
3586
3587 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3588}
3589