1
2
3
4
5
6
7#include <net/mac80211.h>
8
9#include "mvm.h"
10#include "sta.h"
11#include "rs.h"
12
13
14
15
16
17
18static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
19{
20 if (iwl_mvm_has_new_rx_api(mvm) ||
21 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
22 return sizeof(struct iwl_mvm_add_sta_cmd);
23 else
24 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
25}
26
27static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
28 enum nl80211_iftype iftype)
29{
30 int sta_id;
31 u32 reserved_ids = 0;
32
33 BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
34 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
35
36 lockdep_assert_held(&mvm->mutex);
37
38
39 if (iftype != NL80211_IFTYPE_STATION)
40 reserved_ids = BIT(0);
41
42
43 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
44 if (BIT(sta_id) & reserved_ids)
45 continue;
46
47 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
48 lockdep_is_held(&mvm->mutex)))
49 return sta_id;
50 }
51 return IWL_MVM_INVALID_STA;
52}
53
54
55int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
56 bool update, unsigned int flags)
57{
58 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
59 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
60 .sta_id = mvm_sta->sta_id,
61 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
62 .add_modify = update ? 1 : 0,
63 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
64 STA_FLG_MIMO_EN_MSK |
65 STA_FLG_RTS_MIMO_PROT),
66 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
67 };
68 int ret;
69 u32 status;
70 u32 agg_size = 0, mpdu_dens = 0;
71
72 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
73 add_sta_cmd.station_type = mvm_sta->sta_type;
74
75 if (!update || (flags & STA_MODIFY_QUEUES)) {
76 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
77
78 if (!iwl_mvm_has_new_tx_api(mvm)) {
79 add_sta_cmd.tfd_queue_msk =
80 cpu_to_le32(mvm_sta->tfd_queue_msk);
81
82 if (flags & STA_MODIFY_QUEUES)
83 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
84 } else {
85 WARN_ON(flags & STA_MODIFY_QUEUES);
86 }
87 }
88
89 switch (sta->bandwidth) {
90 case IEEE80211_STA_RX_BW_160:
91 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
92 fallthrough;
93 case IEEE80211_STA_RX_BW_80:
94 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
95 fallthrough;
96 case IEEE80211_STA_RX_BW_40:
97 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
98 fallthrough;
99 case IEEE80211_STA_RX_BW_20:
100 if (sta->ht_cap.ht_supported)
101 add_sta_cmd.station_flags |=
102 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
103 break;
104 }
105
106 switch (sta->rx_nss) {
107 case 1:
108 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
109 break;
110 case 2:
111 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
112 break;
113 case 3 ... 8:
114 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
115 break;
116 }
117
118 switch (sta->smps_mode) {
119 case IEEE80211_SMPS_AUTOMATIC:
120 case IEEE80211_SMPS_NUM_MODES:
121 WARN_ON(1);
122 break;
123 case IEEE80211_SMPS_STATIC:
124
125 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
126 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
127 break;
128 case IEEE80211_SMPS_DYNAMIC:
129 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
130 break;
131 case IEEE80211_SMPS_OFF:
132
133 break;
134 }
135
136 if (sta->ht_cap.ht_supported) {
137 add_sta_cmd.station_flags_msk |=
138 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
139 STA_FLG_AGG_MPDU_DENS_MSK);
140
141 mpdu_dens = sta->ht_cap.ampdu_density;
142 }
143
144 if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
145 add_sta_cmd.station_flags_msk |=
146 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
147 STA_FLG_AGG_MPDU_DENS_MSK);
148
149 mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa,
150 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
151 agg_size = le16_get_bits(sta->he_6ghz_capa.capa,
152 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
153 } else
154 if (sta->vht_cap.vht_supported) {
155 agg_size = sta->vht_cap.cap &
156 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
157 agg_size >>=
158 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
159 } else if (sta->ht_cap.ht_supported) {
160 agg_size = sta->ht_cap.ampdu_factor;
161 }
162
163
164
165
166
167
168
169
170
171 if (sta->he_cap.has_he)
172 agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
173 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
174
175
176 if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
177 agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
178 STA_FLG_MAX_AGG_SIZE_SHIFT);
179
180 add_sta_cmd.station_flags |=
181 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
182 add_sta_cmd.station_flags |=
183 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
184 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
185 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
186
187 if (sta->wme) {
188 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
189
190 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
191 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
192 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
193 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
194 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
195 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
196 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
197 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
198 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
199 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
200 }
201
202 status = ADD_STA_SUCCESS;
203 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
204 iwl_mvm_add_sta_cmd_size(mvm),
205 &add_sta_cmd, &status);
206 if (ret)
207 return ret;
208
209 switch (status & IWL_ADD_STA_STATUS_MASK) {
210 case ADD_STA_SUCCESS:
211 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
212 break;
213 default:
214 ret = -EIO;
215 IWL_ERR(mvm, "ADD_STA failed\n");
216 break;
217 }
218
219 return ret;
220}
221
222static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
223{
224 struct iwl_mvm_baid_data *data =
225 from_timer(data, t, session_timer);
226 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
227 struct iwl_mvm_baid_data *ba_data;
228 struct ieee80211_sta *sta;
229 struct iwl_mvm_sta *mvm_sta;
230 unsigned long timeout;
231
232 rcu_read_lock();
233
234 ba_data = rcu_dereference(*rcu_ptr);
235
236 if (WARN_ON(!ba_data))
237 goto unlock;
238
239 if (!ba_data->timeout)
240 goto unlock;
241
242 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
243 if (time_is_after_jiffies(timeout)) {
244 mod_timer(&ba_data->session_timer, timeout);
245 goto unlock;
246 }
247
248
249 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
250
251
252
253
254
255
256
257
258
259 if (!sta)
260 goto unlock;
261
262 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
263 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
264 sta->addr, ba_data->tid);
265unlock:
266 rcu_read_unlock();
267}
268
269
270static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
271 unsigned long disable_agg_tids,
272 bool remove_queue)
273{
274 struct iwl_mvm_add_sta_cmd cmd = {};
275 struct ieee80211_sta *sta;
276 struct iwl_mvm_sta *mvmsta;
277 u32 status;
278 u8 sta_id;
279
280 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
281 return -EINVAL;
282
283 sta_id = mvm->queue_info[queue].ra_sta_id;
284
285 rcu_read_lock();
286
287 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
288
289 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
290 rcu_read_unlock();
291 return -EINVAL;
292 }
293
294 mvmsta = iwl_mvm_sta_from_mac80211(sta);
295
296 mvmsta->tid_disable_agg |= disable_agg_tids;
297
298 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
299 cmd.sta_id = mvmsta->sta_id;
300 cmd.add_modify = STA_MODE_MODIFY;
301 cmd.modify_mask = STA_MODIFY_QUEUES;
302 if (disable_agg_tids)
303 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
304 if (remove_queue)
305 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
306 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
307 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
308
309 rcu_read_unlock();
310
311
312 status = ADD_STA_SUCCESS;
313 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
314 iwl_mvm_add_sta_cmd_size(mvm),
315 &cmd, &status);
316}
317
318static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
319 int queue, u8 tid, u8 flags)
320{
321 struct iwl_scd_txq_cfg_cmd cmd = {
322 .scd_queue = queue,
323 .action = SCD_CFG_DISABLE_QUEUE,
324 };
325 int ret;
326
327 if (iwl_mvm_has_new_tx_api(mvm)) {
328 iwl_trans_txq_free(mvm->trans, queue);
329
330 return 0;
331 }
332
333 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
334 return 0;
335
336 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
337
338 cmd.action = mvm->queue_info[queue].tid_bitmap ?
339 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
340 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
341 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
342
343 IWL_DEBUG_TX_QUEUES(mvm,
344 "Disabling TXQ #%d tids=0x%x\n",
345 queue,
346 mvm->queue_info[queue].tid_bitmap);
347
348
349 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
350 return 0;
351
352 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
353 cmd.tid = mvm->queue_info[queue].txq_tid;
354
355
356 WARN(mvm->queue_info[queue].tid_bitmap,
357 "TXQ #%d info out-of-sync - tids=0x%x\n",
358 queue, mvm->queue_info[queue].tid_bitmap);
359
360
361 mvm->queue_info[queue].tid_bitmap = 0;
362
363 if (sta) {
364 struct iwl_mvm_txq *mvmtxq =
365 iwl_mvm_txq_from_tid(sta, tid);
366
367 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
368 }
369
370
371 mvm->queue_info[queue].reserved = false;
372
373 iwl_trans_txq_disable(mvm->trans, queue, false);
374 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
375 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
376
377 if (ret)
378 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
379 queue, ret);
380 return ret;
381}
382
383static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
384{
385 struct ieee80211_sta *sta;
386 struct iwl_mvm_sta *mvmsta;
387 unsigned long tid_bitmap;
388 unsigned long agg_tids = 0;
389 u8 sta_id;
390 int tid;
391
392 lockdep_assert_held(&mvm->mutex);
393
394 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
395 return -EINVAL;
396
397 sta_id = mvm->queue_info[queue].ra_sta_id;
398 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
399
400 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
401 lockdep_is_held(&mvm->mutex));
402
403 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
404 return -EINVAL;
405
406 mvmsta = iwl_mvm_sta_from_mac80211(sta);
407
408 spin_lock_bh(&mvmsta->lock);
409 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
410 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
411 agg_tids |= BIT(tid);
412 }
413 spin_unlock_bh(&mvmsta->lock);
414
415 return agg_tids;
416}
417
418
419
420
421
422
423static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
424{
425 struct ieee80211_sta *sta;
426 struct iwl_mvm_sta *mvmsta;
427 unsigned long tid_bitmap;
428 unsigned long disable_agg_tids = 0;
429 u8 sta_id;
430 int tid;
431
432 lockdep_assert_held(&mvm->mutex);
433
434 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
435 return -EINVAL;
436
437 sta_id = mvm->queue_info[queue].ra_sta_id;
438 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
439
440 rcu_read_lock();
441
442 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
443
444 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
445 rcu_read_unlock();
446 return 0;
447 }
448
449 mvmsta = iwl_mvm_sta_from_mac80211(sta);
450
451 spin_lock_bh(&mvmsta->lock);
452
453 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
454 struct iwl_mvm_txq *mvmtxq =
455 iwl_mvm_txq_from_tid(sta, tid);
456
457 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
458 disable_agg_tids |= BIT(tid);
459 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
460
461 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
462 }
463
464 mvmsta->tfd_queue_msk &= ~BIT(queue);
465 spin_unlock_bh(&mvmsta->lock);
466
467 rcu_read_unlock();
468
469
470
471
472
473
474
475
476
477 synchronize_net();
478
479 return disable_agg_tids;
480}
481
482static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
483 struct ieee80211_sta *old_sta,
484 u8 new_sta_id)
485{
486 struct iwl_mvm_sta *mvmsta;
487 u8 sta_id, tid;
488 unsigned long disable_agg_tids = 0;
489 bool same_sta;
490 int ret;
491
492 lockdep_assert_held(&mvm->mutex);
493
494 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
495 return -EINVAL;
496
497 sta_id = mvm->queue_info[queue].ra_sta_id;
498 tid = mvm->queue_info[queue].txq_tid;
499
500 same_sta = sta_id == new_sta_id;
501
502 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
503 if (WARN_ON(!mvmsta))
504 return -EINVAL;
505
506 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
507
508 if (disable_agg_tids)
509 iwl_mvm_invalidate_sta_queue(mvm, queue,
510 disable_agg_tids, false);
511
512 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
513 if (ret) {
514 IWL_ERR(mvm,
515 "Failed to free inactive queue %d (ret=%d)\n",
516 queue, ret);
517
518 return ret;
519 }
520
521
522 if (!same_sta)
523 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
524
525 return 0;
526}
527
528static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
529 unsigned long tfd_queue_mask, u8 ac)
530{
531 int queue = 0;
532 u8 ac_to_queue[IEEE80211_NUM_ACS];
533 int i;
534
535
536
537
538
539 lockdep_assert_held(&mvm->mutex);
540
541 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
542 return -EINVAL;
543
544 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
545
546
547 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
548
549 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
550 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
551 continue;
552
553 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
554 }
555
556
557
558
559
560
561
562
563
564
565
566 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
567 queue = ac_to_queue[IEEE80211_AC_BE];
568
569 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
570 queue = ac_to_queue[ac];
571
572 else if (ac == IEEE80211_AC_VO &&
573 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
574 queue = ac_to_queue[IEEE80211_AC_VI];
575
576 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
577 queue = ac_to_queue[IEEE80211_AC_BK];
578
579 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
580 queue = ac_to_queue[IEEE80211_AC_VI];
581
582 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
583 queue = ac_to_queue[IEEE80211_AC_VO];
584
585
586 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
587 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
588 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
589 IWL_ERR(mvm, "No DATA queues available to share\n");
590 return -ENOSPC;
591 }
592
593 return queue;
594}
595
596
597
598
599
600
601
602static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
603 int ac, int ssn, unsigned int wdg_timeout,
604 bool force, struct iwl_mvm_txq *txq)
605{
606 struct iwl_scd_txq_cfg_cmd cmd = {
607 .scd_queue = queue,
608 .action = SCD_CFG_DISABLE_QUEUE,
609 };
610 bool shared_queue;
611 int ret;
612
613 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
614 return -EINVAL;
615
616
617
618
619
620
621
622
623
624 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
625 IWL_DEBUG_TX_QUEUES(mvm,
626 "No redirection needed on TXQ #%d\n",
627 queue);
628 return 0;
629 }
630
631 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
632 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
633 cmd.tid = mvm->queue_info[queue].txq_tid;
634 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
635
636 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
637 queue, iwl_mvm_ac_to_tx_fifo[ac]);
638
639
640 txq->stopped = true;
641
642 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
643 if (ret) {
644 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
645 queue);
646 ret = -EIO;
647 goto out;
648 }
649
650
651 iwl_trans_txq_disable(mvm->trans, queue, false);
652 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
653 if (ret)
654 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
655 ret);
656
657
658 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
659
660
661 mvm->queue_info[queue].txq_tid = tid;
662
663
664
665
666 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
667 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
668
669
670 mvm->queue_info[queue].mac80211_ac = ac;
671
672
673
674
675
676
677
678 if (shared_queue)
679 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
680
681out:
682
683 txq->stopped = false;
684
685 return ret;
686}
687
688static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
689 u8 minq, u8 maxq)
690{
691 int i;
692
693 lockdep_assert_held(&mvm->mutex);
694
695 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
696 "max queue %d >= num_of_queues (%d)", maxq,
697 mvm->trans->trans_cfg->base_params->num_of_queues))
698 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
699
700
701 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
702 return -ENOSPC;
703
704
705 for (i = minq; i <= maxq; i++)
706 if (mvm->queue_info[i].tid_bitmap == 0 &&
707 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
708 return i;
709
710 return -ENOSPC;
711}
712
713static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
714 u8 sta_id, u8 tid, unsigned int timeout)
715{
716 int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
717 mvm->trans->cfg->min_256_ba_txq_size);
718
719 if (tid == IWL_MAX_TID_COUNT) {
720 tid = IWL_MGMT_TID;
721 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
722 mvm->trans->cfg->min_txq_size);
723 }
724
725 do {
726 __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
727
728 queue = iwl_trans_txq_alloc(mvm->trans, enable,
729 sta_id, tid, SCD_QUEUE_CFG,
730 size, timeout);
731
732 if (queue < 0)
733 IWL_DEBUG_TX_QUEUES(mvm,
734 "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
735 size, sta_id, tid, queue);
736 size /= 2;
737 } while (queue < 0 && size >= 16);
738
739 if (queue < 0)
740 return queue;
741
742 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
743 queue, sta_id, tid);
744
745 return queue;
746}
747
748static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
749 struct ieee80211_sta *sta, u8 ac,
750 int tid)
751{
752 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
753 struct iwl_mvm_txq *mvmtxq =
754 iwl_mvm_txq_from_tid(sta, tid);
755 unsigned int wdg_timeout =
756 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
757 int queue = -1;
758
759 lockdep_assert_held(&mvm->mutex);
760
761 IWL_DEBUG_TX_QUEUES(mvm,
762 "Allocating queue for sta %d on tid %d\n",
763 mvmsta->sta_id, tid);
764 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
765 if (queue < 0)
766 return queue;
767
768 mvmtxq->txq_id = queue;
769 mvm->tvqm_info[queue].txq_tid = tid;
770 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
771
772 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
773
774 spin_lock_bh(&mvmsta->lock);
775 mvmsta->tid_data[tid].txq_id = queue;
776 spin_unlock_bh(&mvmsta->lock);
777
778 return 0;
779}
780
781static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
782 struct ieee80211_sta *sta,
783 int queue, u8 sta_id, u8 tid)
784{
785 bool enable_queue = true;
786
787
788 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
789 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
790 queue, tid);
791 return false;
792 }
793
794
795 if (mvm->queue_info[queue].tid_bitmap)
796 enable_queue = false;
797
798 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
799 mvm->queue_info[queue].ra_sta_id = sta_id;
800
801 if (enable_queue) {
802 if (tid != IWL_MAX_TID_COUNT)
803 mvm->queue_info[queue].mac80211_ac =
804 tid_to_mac80211_ac[tid];
805 else
806 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
807
808 mvm->queue_info[queue].txq_tid = tid;
809 }
810
811 if (sta) {
812 struct iwl_mvm_txq *mvmtxq =
813 iwl_mvm_txq_from_tid(sta, tid);
814
815 mvmtxq->txq_id = queue;
816 }
817
818 IWL_DEBUG_TX_QUEUES(mvm,
819 "Enabling TXQ #%d tids=0x%x\n",
820 queue, mvm->queue_info[queue].tid_bitmap);
821
822 return enable_queue;
823}
824
825static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
826 int queue, u16 ssn,
827 const struct iwl_trans_txq_scd_cfg *cfg,
828 unsigned int wdg_timeout)
829{
830 struct iwl_scd_txq_cfg_cmd cmd = {
831 .scd_queue = queue,
832 .action = SCD_CFG_ENABLE_QUEUE,
833 .window = cfg->frame_limit,
834 .sta_id = cfg->sta_id,
835 .ssn = cpu_to_le16(ssn),
836 .tx_fifo = cfg->fifo,
837 .aggregate = cfg->aggregate,
838 .tid = cfg->tid,
839 };
840 bool inc_ssn;
841
842 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
843 return false;
844
845
846 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
847 return false;
848
849 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
850 NULL, wdg_timeout);
851 if (inc_ssn)
852 le16_add_cpu(&cmd.ssn, 1);
853
854 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
855 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
856
857 return inc_ssn;
858}
859
860static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
861{
862 struct iwl_scd_txq_cfg_cmd cmd = {
863 .scd_queue = queue,
864 .action = SCD_CFG_UPDATE_QUEUE_TID,
865 };
866 int tid;
867 unsigned long tid_bitmap;
868 int ret;
869
870 lockdep_assert_held(&mvm->mutex);
871
872 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
873 return;
874
875 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
876
877 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
878 return;
879
880
881 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
882 cmd.tid = tid;
883 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
884
885 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
886 if (ret) {
887 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
888 queue, ret);
889 return;
890 }
891
892 mvm->queue_info[queue].txq_tid = tid;
893 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
894 queue, tid);
895}
896
897static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
898{
899 struct ieee80211_sta *sta;
900 struct iwl_mvm_sta *mvmsta;
901 u8 sta_id;
902 int tid = -1;
903 unsigned long tid_bitmap;
904 unsigned int wdg_timeout;
905 int ssn;
906 int ret = true;
907
908
909 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
910 return;
911
912 lockdep_assert_held(&mvm->mutex);
913
914 sta_id = mvm->queue_info[queue].ra_sta_id;
915 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
916
917
918 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
919 if (tid_bitmap != BIT(tid)) {
920 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
921 queue, tid_bitmap);
922 return;
923 }
924
925 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
926 tid);
927
928 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
929 lockdep_is_held(&mvm->mutex));
930
931 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
932 return;
933
934 mvmsta = iwl_mvm_sta_from_mac80211(sta);
935 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
936
937 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
938
939 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
940 tid_to_mac80211_ac[tid], ssn,
941 wdg_timeout, true,
942 iwl_mvm_txq_from_tid(sta, tid));
943 if (ret) {
944 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
945 return;
946 }
947
948
949 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
950 struct iwl_mvm_add_sta_cmd cmd = {0};
951
952 mvmsta->tid_disable_agg &= ~BIT(tid);
953
954 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
955 cmd.sta_id = mvmsta->sta_id;
956 cmd.add_modify = STA_MODE_MODIFY;
957 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
958 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
959 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
960
961 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
962 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
963 if (!ret) {
964 IWL_DEBUG_TX_QUEUES(mvm,
965 "TXQ #%d is now aggregated again\n",
966 queue);
967
968
969 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
970 }
971 }
972
973 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
974}
975
976
977
978
979
980
981
982
983static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
984 struct iwl_mvm_sta *mvmsta, int queue,
985 unsigned long tid_bitmap,
986 unsigned long *unshare_queues,
987 unsigned long *changetid_queues)
988{
989 int tid;
990
991 lockdep_assert_held(&mvmsta->lock);
992 lockdep_assert_held(&mvm->mutex);
993
994 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
995 return false;
996
997
998 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
999
1000 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1001 tid_bitmap &= ~BIT(tid);
1002
1003
1004 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1005 tid_bitmap &= ~BIT(tid);
1006 }
1007
1008
1009 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1010 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1011 return true;
1012 }
1013
1014
1015
1016
1017
1018 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1019 u16 tid_bitmap;
1020
1021 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1022 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1023
1024 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1038 set_bit(queue, changetid_queues);
1039
1040 IWL_DEBUG_TX_QUEUES(mvm,
1041 "Removing inactive TID %d from shared Q:%d\n",
1042 tid, queue);
1043 }
1044
1045 IWL_DEBUG_TX_QUEUES(mvm,
1046 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1047 mvm->queue_info[queue].tid_bitmap);
1048
1049
1050
1051
1052
1053 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1054
1055
1056 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1057 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1058 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1059 queue);
1060 set_bit(queue, unshare_queues);
1061 }
1062
1063 return false;
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1076{
1077 unsigned long now = jiffies;
1078 unsigned long unshare_queues = 0;
1079 unsigned long changetid_queues = 0;
1080 int i, ret, free_queue = -ENOSPC;
1081 struct ieee80211_sta *queue_owner = NULL;
1082
1083 lockdep_assert_held(&mvm->mutex);
1084
1085 if (iwl_mvm_has_new_tx_api(mvm))
1086 return -ENOSPC;
1087
1088 rcu_read_lock();
1089
1090
1091 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1092
1093 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1094 struct ieee80211_sta *sta;
1095 struct iwl_mvm_sta *mvmsta;
1096 u8 sta_id;
1097 int tid;
1098 unsigned long inactive_tid_bitmap = 0;
1099 unsigned long queue_tid_bitmap;
1100
1101 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1102 if (!queue_tid_bitmap)
1103 continue;
1104
1105
1106 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1107 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1108 continue;
1109
1110
1111 for_each_set_bit(tid, &queue_tid_bitmap,
1112 IWL_MAX_TID_COUNT + 1) {
1113 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1114 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1115 continue;
1116
1117 inactive_tid_bitmap |= BIT(tid);
1118 }
1119
1120
1121 if (!inactive_tid_bitmap)
1122 continue;
1123
1124
1125
1126
1127
1128
1129 sta_id = mvm->queue_info[i].ra_sta_id;
1130 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1131
1132
1133
1134
1135
1136
1137 if (IS_ERR_OR_NULL(sta))
1138 continue;
1139
1140 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1141
1142 spin_lock_bh(&mvmsta->lock);
1143 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1144 inactive_tid_bitmap,
1145 &unshare_queues,
1146 &changetid_queues);
1147 if (ret && free_queue < 0) {
1148 queue_owner = sta;
1149 free_queue = i;
1150 }
1151
1152 spin_unlock_bh(&mvmsta->lock);
1153 }
1154
1155
1156
1157 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1158 iwl_mvm_unshare_queue(mvm, i);
1159 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1160 iwl_mvm_change_queue_tid(mvm, i);
1161
1162 rcu_read_unlock();
1163
1164 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1165 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1166 alloc_for_sta);
1167 if (ret)
1168 return ret;
1169 }
1170
1171 return free_queue;
1172}
1173
1174static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1175 struct ieee80211_sta *sta, u8 ac, int tid)
1176{
1177 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1178 struct iwl_trans_txq_scd_cfg cfg = {
1179 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1180 .sta_id = mvmsta->sta_id,
1181 .tid = tid,
1182 .frame_limit = IWL_FRAME_LIMIT,
1183 };
1184 unsigned int wdg_timeout =
1185 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1186 int queue = -1;
1187 unsigned long disable_agg_tids = 0;
1188 enum iwl_mvm_agg_state queue_state;
1189 bool shared_queue = false, inc_ssn;
1190 int ssn;
1191 unsigned long tfd_queue_mask;
1192 int ret;
1193
1194 lockdep_assert_held(&mvm->mutex);
1195
1196 if (iwl_mvm_has_new_tx_api(mvm))
1197 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1198
1199 spin_lock_bh(&mvmsta->lock);
1200 tfd_queue_mask = mvmsta->tfd_queue_msk;
1201 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1202 spin_unlock_bh(&mvmsta->lock);
1203
1204 if (tid == IWL_MAX_TID_COUNT) {
1205 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1206 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1207 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1208 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1209 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1210 queue);
1211
1212
1213 }
1214
1215 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1216 (mvm->queue_info[mvmsta->reserved_queue].status ==
1217 IWL_MVM_QUEUE_RESERVED)) {
1218 queue = mvmsta->reserved_queue;
1219 mvm->queue_info[queue].reserved = true;
1220 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1221 }
1222
1223 if (queue < 0)
1224 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1225 IWL_MVM_DQA_MIN_DATA_QUEUE,
1226 IWL_MVM_DQA_MAX_DATA_QUEUE);
1227 if (queue < 0) {
1228
1229 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1230 }
1231
1232
1233 if (queue <= 0) {
1234 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1235 if (queue > 0) {
1236 shared_queue = true;
1237 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1238 }
1239 }
1240
1241
1242
1243
1244
1245
1246
1247 if (queue > 0 && !shared_queue)
1248 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1249
1250
1251 if (WARN_ON(queue <= 0)) {
1252 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1253 tid, cfg.sta_id);
1254 return queue;
1255 }
1256
1257
1258
1259
1260
1261
1262
1263 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1264 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1265
1266 IWL_DEBUG_TX_QUEUES(mvm,
1267 "Allocating %squeue #%d to sta %d on tid %d\n",
1268 shared_queue ? "shared " : "", queue,
1269 mvmsta->sta_id, tid);
1270
1271 if (shared_queue) {
1272
1273 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1274
1275 if (disable_agg_tids) {
1276 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1277 queue);
1278 iwl_mvm_invalidate_sta_queue(mvm, queue,
1279 disable_agg_tids, false);
1280 }
1281 }
1282
1283 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1284
1285
1286
1287
1288
1289
1290
1291 if (shared_queue)
1292 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1293
1294 spin_lock_bh(&mvmsta->lock);
1295
1296
1297
1298
1299
1300 if (inc_ssn) {
1301 mvmsta->tid_data[tid].seq_number += 0x10;
1302 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1303 }
1304 mvmsta->tid_data[tid].txq_id = queue;
1305 mvmsta->tfd_queue_msk |= BIT(queue);
1306 queue_state = mvmsta->tid_data[tid].state;
1307
1308 if (mvmsta->reserved_queue == queue)
1309 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1310 spin_unlock_bh(&mvmsta->lock);
1311
1312 if (!shared_queue) {
1313 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1314 if (ret)
1315 goto out_err;
1316
1317
1318 if (queue_state == IWL_AGG_ON) {
1319 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1320 if (ret)
1321 goto out_err;
1322 }
1323 } else {
1324
1325 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1326 wdg_timeout, false,
1327 iwl_mvm_txq_from_tid(sta, tid));
1328 if (ret)
1329 goto out_err;
1330 }
1331
1332 return 0;
1333
1334out_err:
1335 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
1336
1337 return ret;
1338}
1339
1340void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1341{
1342 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1343 add_stream_wk);
1344
1345 mutex_lock(&mvm->mutex);
1346
1347 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1348
1349 while (!list_empty(&mvm->add_stream_txqs)) {
1350 struct iwl_mvm_txq *mvmtxq;
1351 struct ieee80211_txq *txq;
1352 u8 tid;
1353
1354 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1355 struct iwl_mvm_txq, list);
1356
1357 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1358 drv_priv);
1359 tid = txq->tid;
1360 if (tid == IEEE80211_NUM_TIDS)
1361 tid = IWL_MAX_TID_COUNT;
1362
1363
1364
1365
1366
1367
1368
1369 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1370 list_del_init(&mvmtxq->list);
1371 continue;
1372 }
1373
1374 list_del_init(&mvmtxq->list);
1375 local_bh_disable();
1376 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1377 local_bh_enable();
1378 }
1379
1380 mutex_unlock(&mvm->mutex);
1381}
1382
1383static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1384 struct ieee80211_sta *sta,
1385 enum nl80211_iftype vif_type)
1386{
1387 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1388 int queue;
1389
1390
1391 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1392 return 0;
1393
1394
1395 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1396
1397
1398 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1399 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1400 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1401 IWL_MVM_QUEUE_FREE))
1402 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1403 else
1404 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1405 IWL_MVM_DQA_MIN_DATA_QUEUE,
1406 IWL_MVM_DQA_MAX_DATA_QUEUE);
1407 if (queue < 0) {
1408
1409 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1410 if (queue < 0) {
1411 IWL_ERR(mvm, "No available queues for new station\n");
1412 return -ENOSPC;
1413 }
1414 }
1415 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1416
1417 mvmsta->reserved_queue = queue;
1418
1419 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1420 queue, mvmsta->sta_id);
1421
1422 return 0;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1433 struct ieee80211_sta *sta)
1434{
1435 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1436 unsigned int wdg =
1437 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1438 int i;
1439 struct iwl_trans_txq_scd_cfg cfg = {
1440 .sta_id = mvm_sta->sta_id,
1441 .frame_limit = IWL_FRAME_LIMIT,
1442 };
1443
1444
1445 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1446 mvm->queue_info[mvm_sta->reserved_queue].status =
1447 IWL_MVM_QUEUE_RESERVED;
1448
1449 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1450 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1451 int txq_id = tid_data->txq_id;
1452 int ac;
1453
1454 if (txq_id == IWL_MVM_INVALID_QUEUE)
1455 continue;
1456
1457 ac = tid_to_mac80211_ac[i];
1458
1459 if (iwl_mvm_has_new_tx_api(mvm)) {
1460 IWL_DEBUG_TX_QUEUES(mvm,
1461 "Re-mapping sta %d tid %d\n",
1462 mvm_sta->sta_id, i);
1463 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1464 i, wdg);
1465
1466
1467
1468
1469
1470 if (txq_id < 0)
1471 txq_id = IWL_MVM_INVALID_QUEUE;
1472 tid_data->txq_id = txq_id;
1473
1474
1475
1476
1477
1478
1479
1480 tid_data->seq_number = 0;
1481 } else {
1482 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1483
1484 cfg.tid = i;
1485 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1486 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1487 txq_id ==
1488 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1489
1490 IWL_DEBUG_TX_QUEUES(mvm,
1491 "Re-mapping sta %d tid %d to queue %d\n",
1492 mvm_sta->sta_id, i, txq_id);
1493
1494 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1495 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1496 }
1497 }
1498}
1499
1500static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1501 struct iwl_mvm_int_sta *sta,
1502 const u8 *addr,
1503 u16 mac_id, u16 color)
1504{
1505 struct iwl_mvm_add_sta_cmd cmd;
1506 int ret;
1507 u32 status = ADD_STA_SUCCESS;
1508
1509 lockdep_assert_held(&mvm->mutex);
1510
1511 memset(&cmd, 0, sizeof(cmd));
1512 cmd.sta_id = sta->sta_id;
1513
1514 if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
1515 0) >= 12 &&
1516 sta->type == IWL_STA_AUX_ACTIVITY)
1517 cmd.mac_id_n_color = cpu_to_le32(mac_id);
1518 else
1519 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1520 color));
1521
1522 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1523 cmd.station_type = sta->type;
1524
1525 if (!iwl_mvm_has_new_tx_api(mvm))
1526 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1527 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1528
1529 if (addr)
1530 memcpy(cmd.addr, addr, ETH_ALEN);
1531
1532 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1533 iwl_mvm_add_sta_cmd_size(mvm),
1534 &cmd, &status);
1535 if (ret)
1536 return ret;
1537
1538 switch (status & IWL_ADD_STA_STATUS_MASK) {
1539 case ADD_STA_SUCCESS:
1540 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1541 return 0;
1542 default:
1543 ret = -EIO;
1544 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1545 status);
1546 break;
1547 }
1548 return ret;
1549}
1550
1551int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1552 struct ieee80211_vif *vif,
1553 struct ieee80211_sta *sta)
1554{
1555 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1556 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1557 struct iwl_mvm_rxq_dup_data *dup_data;
1558 int i, ret, sta_id;
1559 bool sta_update = false;
1560 unsigned int sta_flags = 0;
1561
1562 lockdep_assert_held(&mvm->mutex);
1563
1564 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1565 sta_id = iwl_mvm_find_free_sta_id(mvm,
1566 ieee80211_vif_type_p2p(vif));
1567 else
1568 sta_id = mvm_sta->sta_id;
1569
1570 if (sta_id == IWL_MVM_INVALID_STA)
1571 return -ENOSPC;
1572
1573 spin_lock_init(&mvm_sta->lock);
1574
1575
1576 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1577 struct iwl_mvm_int_sta tmp_sta = {
1578 .sta_id = sta_id,
1579 .type = mvm_sta->sta_type,
1580 };
1581
1582
1583
1584
1585
1586 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1587 mvmvif->id, mvmvif->color);
1588 if (ret)
1589 goto err;
1590
1591 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1592 sta_update = true;
1593 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1594 goto update_fw;
1595 }
1596
1597 mvm_sta->sta_id = sta_id;
1598 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1599 mvmvif->color);
1600 mvm_sta->vif = vif;
1601 if (!mvm->trans->trans_cfg->gen2)
1602 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1603 else
1604 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1605 mvm_sta->tx_protection = 0;
1606 mvm_sta->tt_tx_protection = false;
1607 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1608
1609
1610 mvm_sta->tid_disable_agg = 0xffff;
1611 mvm_sta->tfd_queue_msk = 0;
1612
1613
1614 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1615 u16 seq = mvm_sta->tid_data[i].seq_number;
1616 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1617 mvm_sta->tid_data[i].seq_number = seq;
1618
1619
1620
1621
1622
1623 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1624 }
1625
1626 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1627 struct iwl_mvm_txq *mvmtxq =
1628 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1629
1630 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1631 INIT_LIST_HEAD(&mvmtxq->list);
1632 atomic_set(&mvmtxq->tx_request, 0);
1633 }
1634
1635 mvm_sta->agg_tids = 0;
1636
1637 if (iwl_mvm_has_new_rx_api(mvm) &&
1638 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1639 int q;
1640
1641 dup_data = kcalloc(mvm->trans->num_rx_queues,
1642 sizeof(*dup_data), GFP_KERNEL);
1643 if (!dup_data)
1644 return -ENOMEM;
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1655 memset(dup_data[q].last_seq, 0xff,
1656 sizeof(dup_data[q].last_seq));
1657 mvm_sta->dup_data = dup_data;
1658 }
1659
1660 if (!iwl_mvm_has_new_tx_api(mvm)) {
1661 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1662 ieee80211_vif_type_p2p(vif));
1663 if (ret)
1664 goto err;
1665 }
1666
1667
1668
1669
1670
1671 if (iwl_mvm_has_tlc_offload(mvm))
1672 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1673 else
1674 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1675
1676 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1677
1678update_fw:
1679 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1680 if (ret)
1681 goto err;
1682
1683 if (vif->type == NL80211_IFTYPE_STATION) {
1684 if (!sta->tdls) {
1685 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1686 mvmvif->ap_sta_id = sta_id;
1687 } else {
1688 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1689 }
1690 }
1691
1692 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1693
1694 return 0;
1695
1696err:
1697 return ret;
1698}
1699
1700int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1701 bool drain)
1702{
1703 struct iwl_mvm_add_sta_cmd cmd = {};
1704 int ret;
1705 u32 status;
1706
1707 lockdep_assert_held(&mvm->mutex);
1708
1709 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1710 cmd.sta_id = mvmsta->sta_id;
1711 cmd.add_modify = STA_MODE_MODIFY;
1712 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1713 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1714
1715 status = ADD_STA_SUCCESS;
1716 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1717 iwl_mvm_add_sta_cmd_size(mvm),
1718 &cmd, &status);
1719 if (ret)
1720 return ret;
1721
1722 switch (status & IWL_ADD_STA_STATUS_MASK) {
1723 case ADD_STA_SUCCESS:
1724 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1725 mvmsta->sta_id);
1726 break;
1727 default:
1728 ret = -EIO;
1729 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1730 mvmsta->sta_id);
1731 break;
1732 }
1733
1734 return ret;
1735}
1736
1737
1738
1739
1740
1741
1742static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1743{
1744 struct ieee80211_sta *sta;
1745 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1746 .sta_id = sta_id,
1747 };
1748 int ret;
1749
1750 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1751 lockdep_is_held(&mvm->mutex));
1752
1753
1754 if (!sta) {
1755 IWL_ERR(mvm, "Invalid station id\n");
1756 return -EINVAL;
1757 }
1758
1759 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1760 sizeof(rm_sta_cmd), &rm_sta_cmd);
1761 if (ret) {
1762 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1763 return ret;
1764 }
1765
1766 return 0;
1767}
1768
1769static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1770 struct ieee80211_vif *vif,
1771 struct ieee80211_sta *sta)
1772{
1773 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1774 int i;
1775
1776 lockdep_assert_held(&mvm->mutex);
1777
1778 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1779 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1780 continue;
1781
1782 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1783 0);
1784 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1785 }
1786
1787 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1788 struct iwl_mvm_txq *mvmtxq =
1789 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1790
1791 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1792 }
1793}
1794
1795int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1796 struct iwl_mvm_sta *mvm_sta)
1797{
1798 int i;
1799
1800 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1801 u16 txq_id;
1802 int ret;
1803
1804 spin_lock_bh(&mvm_sta->lock);
1805 txq_id = mvm_sta->tid_data[i].txq_id;
1806 spin_unlock_bh(&mvm_sta->lock);
1807
1808 if (txq_id == IWL_MVM_INVALID_QUEUE)
1809 continue;
1810
1811 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1812 if (ret)
1813 return ret;
1814 }
1815
1816 return 0;
1817}
1818
1819int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1820 struct ieee80211_vif *vif,
1821 struct ieee80211_sta *sta)
1822{
1823 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1824 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1825 u8 sta_id = mvm_sta->sta_id;
1826 int ret;
1827
1828 lockdep_assert_held(&mvm->mutex);
1829
1830 if (iwl_mvm_has_new_rx_api(mvm))
1831 kfree(mvm_sta->dup_data);
1832
1833 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1834 if (ret)
1835 return ret;
1836
1837
1838 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1839 if (ret)
1840 return ret;
1841 if (iwl_mvm_has_new_tx_api(mvm)) {
1842 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1843 } else {
1844 u32 q_mask = mvm_sta->tfd_queue_msk;
1845
1846 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1847 q_mask);
1848 }
1849 if (ret)
1850 return ret;
1851
1852 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1853
1854 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1855
1856
1857 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1858 u8 reserved_txq = mvm_sta->reserved_queue;
1859 enum iwl_mvm_queue_status *status;
1860
1861
1862
1863
1864
1865
1866 status = &mvm->queue_info[reserved_txq].status;
1867 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1868 (*status != IWL_MVM_QUEUE_FREE),
1869 "sta_id %d reserved txq %d status %d",
1870 sta_id, reserved_txq, *status))
1871 return -EINVAL;
1872
1873 *status = IWL_MVM_QUEUE_FREE;
1874 }
1875
1876 if (vif->type == NL80211_IFTYPE_STATION &&
1877 mvmvif->ap_sta_id == sta_id) {
1878
1879 if (vif->bss_conf.assoc)
1880 return ret;
1881
1882
1883 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1884 }
1885
1886
1887
1888
1889
1890 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1891 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1892 cancel_delayed_work(&mvm->tdls_cs.dwork);
1893 }
1894
1895
1896
1897
1898
1899 spin_lock_bh(&mvm_sta->lock);
1900 spin_unlock_bh(&mvm_sta->lock);
1901
1902 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1903 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1904
1905 return ret;
1906}
1907
1908int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1909 struct ieee80211_vif *vif,
1910 u8 sta_id)
1911{
1912 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1913
1914 lockdep_assert_held(&mvm->mutex);
1915
1916 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1917 return ret;
1918}
1919
1920int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1921 struct iwl_mvm_int_sta *sta,
1922 u32 qmask, enum nl80211_iftype iftype,
1923 enum iwl_sta_type type)
1924{
1925 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1926 sta->sta_id == IWL_MVM_INVALID_STA) {
1927 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1928 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1929 return -ENOSPC;
1930 }
1931
1932 sta->tfd_queue_msk = qmask;
1933 sta->type = type;
1934
1935
1936 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1937 return 0;
1938}
1939
1940void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1941{
1942 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1943 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1944 sta->sta_id = IWL_MVM_INVALID_STA;
1945}
1946
1947static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
1948 u8 sta_id, u8 fifo)
1949{
1950 unsigned int wdg_timeout =
1951 mvm->trans->trans_cfg->base_params->wd_timeout;
1952 struct iwl_trans_txq_scd_cfg cfg = {
1953 .fifo = fifo,
1954 .sta_id = sta_id,
1955 .tid = IWL_MAX_TID_COUNT,
1956 .aggregate = false,
1957 .frame_limit = IWL_FRAME_LIMIT,
1958 };
1959
1960 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1961
1962 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
1963}
1964
1965static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
1966{
1967 unsigned int wdg_timeout =
1968 mvm->trans->trans_cfg->base_params->wd_timeout;
1969
1970 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
1971
1972 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
1973 wdg_timeout);
1974}
1975
1976static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
1977 int maccolor, u8 *addr,
1978 struct iwl_mvm_int_sta *sta,
1979 u16 *queue, int fifo)
1980{
1981 int ret;
1982
1983
1984 if (!iwl_mvm_has_new_tx_api(mvm))
1985 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
1986
1987 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
1988 if (ret) {
1989 if (!iwl_mvm_has_new_tx_api(mvm))
1990 iwl_mvm_disable_txq(mvm, NULL, *queue,
1991 IWL_MAX_TID_COUNT, 0);
1992 return ret;
1993 }
1994
1995
1996
1997
1998
1999 if (iwl_mvm_has_new_tx_api(mvm)) {
2000 int txq;
2001
2002 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2003 if (txq < 0) {
2004 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2005 return txq;
2006 }
2007
2008 *queue = txq;
2009 }
2010
2011 return 0;
2012}
2013
2014int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2015{
2016 int ret;
2017
2018 lockdep_assert_held(&mvm->mutex);
2019
2020
2021 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2022 NL80211_IFTYPE_UNSPECIFIED,
2023 IWL_STA_AUX_ACTIVITY);
2024 if (ret)
2025 return ret;
2026
2027
2028
2029
2030
2031 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2032 &mvm->aux_sta, &mvm->aux_queue,
2033 IWL_MVM_TX_FIFO_MCAST);
2034 if (ret) {
2035 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2036 return ret;
2037 }
2038
2039 return 0;
2040}
2041
2042int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2043{
2044 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2045
2046 lockdep_assert_held(&mvm->mutex);
2047
2048 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2049 NULL, &mvm->snif_sta,
2050 &mvm->snif_queue,
2051 IWL_MVM_TX_FIFO_BE);
2052}
2053
2054int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2055{
2056 int ret;
2057
2058 lockdep_assert_held(&mvm->mutex);
2059
2060 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2061 return -EINVAL;
2062
2063 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2064 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2065 if (ret)
2066 IWL_WARN(mvm, "Failed sending remove station\n");
2067
2068 return ret;
2069}
2070
2071int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2072{
2073 int ret;
2074
2075 lockdep_assert_held(&mvm->mutex);
2076
2077 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2078 return -EINVAL;
2079
2080 iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
2081 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2082 if (ret)
2083 IWL_WARN(mvm, "Failed sending remove station\n");
2084 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2085
2086 return ret;
2087}
2088
2089void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2090{
2091 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2092}
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2103{
2104 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2105 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2106 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2107 const u8 *baddr = _baddr;
2108 int queue;
2109 int ret;
2110 unsigned int wdg_timeout =
2111 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2112 struct iwl_trans_txq_scd_cfg cfg = {
2113 .fifo = IWL_MVM_TX_FIFO_VO,
2114 .sta_id = mvmvif->bcast_sta.sta_id,
2115 .tid = IWL_MAX_TID_COUNT,
2116 .aggregate = false,
2117 .frame_limit = IWL_FRAME_LIMIT,
2118 };
2119
2120 lockdep_assert_held(&mvm->mutex);
2121
2122 if (!iwl_mvm_has_new_tx_api(mvm)) {
2123 if (vif->type == NL80211_IFTYPE_AP ||
2124 vif->type == NL80211_IFTYPE_ADHOC) {
2125 queue = mvm->probe_queue;
2126 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2127 queue = mvm->p2p_dev_queue;
2128 } else {
2129 WARN(1, "Missing required TXQ for adding bcast STA\n");
2130 return -EINVAL;
2131 }
2132
2133 bsta->tfd_queue_msk |= BIT(queue);
2134
2135 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2136 }
2137
2138 if (vif->type == NL80211_IFTYPE_ADHOC)
2139 baddr = vif->bss_conf.bssid;
2140
2141 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2142 return -ENOSPC;
2143
2144 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2145 mvmvif->id, mvmvif->color);
2146 if (ret)
2147 return ret;
2148
2149
2150
2151
2152
2153 if (iwl_mvm_has_new_tx_api(mvm)) {
2154 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2155 IWL_MAX_TID_COUNT,
2156 wdg_timeout);
2157 if (queue < 0) {
2158 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2159 return queue;
2160 }
2161
2162 if (vif->type == NL80211_IFTYPE_AP ||
2163 vif->type == NL80211_IFTYPE_ADHOC)
2164 mvm->probe_queue = queue;
2165 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2166 mvm->p2p_dev_queue = queue;
2167 }
2168
2169 return 0;
2170}
2171
2172static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2173 struct ieee80211_vif *vif)
2174{
2175 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2176 int queue;
2177
2178 lockdep_assert_held(&mvm->mutex);
2179
2180 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2181
2182 switch (vif->type) {
2183 case NL80211_IFTYPE_AP:
2184 case NL80211_IFTYPE_ADHOC:
2185 queue = mvm->probe_queue;
2186 break;
2187 case NL80211_IFTYPE_P2P_DEVICE:
2188 queue = mvm->p2p_dev_queue;
2189 break;
2190 default:
2191 WARN(1, "Can't free bcast queue on vif type %d\n",
2192 vif->type);
2193 return;
2194 }
2195
2196 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
2197 if (iwl_mvm_has_new_tx_api(mvm))
2198 return;
2199
2200 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2201 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2202}
2203
2204
2205
2206int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2207{
2208 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2209 int ret;
2210
2211 lockdep_assert_held(&mvm->mutex);
2212
2213 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2214
2215 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2216 if (ret)
2217 IWL_WARN(mvm, "Failed sending remove station\n");
2218 return ret;
2219}
2220
2221int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2222{
2223 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2224
2225 lockdep_assert_held(&mvm->mutex);
2226
2227 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2228 ieee80211_vif_type_p2p(vif),
2229 IWL_STA_GENERAL_PURPOSE);
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2240{
2241 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2242 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2243 int ret;
2244
2245 lockdep_assert_held(&mvm->mutex);
2246
2247 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2248 if (ret)
2249 return ret;
2250
2251 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2252
2253 if (ret)
2254 iwl_mvm_dealloc_int_sta(mvm, bsta);
2255
2256 return ret;
2257}
2258
2259void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2260{
2261 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2262
2263 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2264}
2265
2266
2267
2268
2269
2270int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2271{
2272 int ret;
2273
2274 lockdep_assert_held(&mvm->mutex);
2275
2276 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2277
2278 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2279
2280 return ret;
2281}
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2292{
2293 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2294 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2295 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2296 const u8 *maddr = _maddr;
2297 struct iwl_trans_txq_scd_cfg cfg = {
2298 .fifo = vif->type == NL80211_IFTYPE_AP ?
2299 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2300 .sta_id = msta->sta_id,
2301 .tid = 0,
2302 .aggregate = false,
2303 .frame_limit = IWL_FRAME_LIMIT,
2304 };
2305 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2306 int ret;
2307
2308 lockdep_assert_held(&mvm->mutex);
2309
2310 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2311 vif->type != NL80211_IFTYPE_ADHOC))
2312 return -ENOTSUPP;
2313
2314
2315
2316
2317
2318
2319
2320 if (vif->type == NL80211_IFTYPE_ADHOC)
2321 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2322
2323
2324
2325
2326
2327 if (!iwl_mvm_has_new_tx_api(mvm) &&
2328 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2329 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2330 timeout);
2331 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2332 }
2333 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2334 mvmvif->id, mvmvif->color);
2335 if (ret)
2336 goto err;
2337
2338
2339
2340
2341
2342
2343
2344
2345 if (iwl_mvm_has_new_tx_api(mvm)) {
2346 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2347 0,
2348 timeout);
2349 if (queue < 0) {
2350 ret = queue;
2351 goto err;
2352 }
2353 mvmvif->cab_queue = queue;
2354 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2355 IWL_UCODE_TLV_API_STA_TYPE))
2356 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2357 timeout);
2358
2359 return 0;
2360err:
2361 iwl_mvm_dealloc_int_sta(mvm, msta);
2362 return ret;
2363}
2364
2365static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2366 struct ieee80211_key_conf *keyconf,
2367 bool mcast)
2368{
2369 union {
2370 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2371 struct iwl_mvm_add_sta_key_cmd cmd;
2372 } u = {};
2373 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2374 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2375 __le16 key_flags;
2376 int ret, size;
2377 u32 status;
2378
2379
2380 if (sta_id == IWL_MVM_INVALID_STA)
2381 return 0;
2382
2383 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2384 STA_KEY_FLG_KEYID_MSK);
2385 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2386 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2387
2388 if (mcast)
2389 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2390
2391
2392
2393
2394
2395 u.cmd.common.key_flags = key_flags;
2396 u.cmd.common.key_offset = keyconf->hw_key_idx;
2397 u.cmd.common.sta_id = sta_id;
2398
2399 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2400
2401 status = ADD_STA_SUCCESS;
2402 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2403 &status);
2404
2405 switch (status) {
2406 case ADD_STA_SUCCESS:
2407 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2408 break;
2409 default:
2410 ret = -EIO;
2411 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2412 break;
2413 }
2414
2415 return ret;
2416}
2417
2418
2419
2420
2421
2422int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2423{
2424 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2425 int ret;
2426
2427 lockdep_assert_held(&mvm->mutex);
2428
2429 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2430
2431 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2432
2433 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2434 if (ret)
2435 IWL_WARN(mvm, "Failed sending remove station\n");
2436
2437 return ret;
2438}
2439
2440#define IWL_MAX_RX_BA_SESSIONS 16
2441
2442static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2443{
2444 struct iwl_mvm_delba_data notif = {
2445 .baid = baid,
2446 };
2447
2448 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2449 ¬if, sizeof(notif));
2450};
2451
2452static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2453 struct iwl_mvm_baid_data *data)
2454{
2455 int i;
2456
2457 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2458
2459 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2460 int j;
2461 struct iwl_mvm_reorder_buffer *reorder_buf =
2462 &data->reorder_buf[i];
2463 struct iwl_mvm_reorder_buf_entry *entries =
2464 &data->entries[i * data->entries_per_queue];
2465
2466 spin_lock_bh(&reorder_buf->lock);
2467 if (likely(!reorder_buf->num_stored)) {
2468 spin_unlock_bh(&reorder_buf->lock);
2469 continue;
2470 }
2471
2472
2473
2474
2475
2476
2477 WARN_ON(1);
2478
2479 for (j = 0; j < reorder_buf->buf_size; j++)
2480 __skb_queue_purge(&entries[j].e.frames);
2481
2482
2483
2484
2485
2486
2487
2488
2489 reorder_buf->removed = true;
2490 spin_unlock_bh(&reorder_buf->lock);
2491 del_timer_sync(&reorder_buf->reorder_timer);
2492 }
2493}
2494
2495static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2496 struct iwl_mvm_baid_data *data,
2497 u16 ssn, u16 buf_size)
2498{
2499 int i;
2500
2501 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2502 struct iwl_mvm_reorder_buffer *reorder_buf =
2503 &data->reorder_buf[i];
2504 struct iwl_mvm_reorder_buf_entry *entries =
2505 &data->entries[i * data->entries_per_queue];
2506 int j;
2507
2508 reorder_buf->num_stored = 0;
2509 reorder_buf->head_sn = ssn;
2510 reorder_buf->buf_size = buf_size;
2511
2512 timer_setup(&reorder_buf->reorder_timer,
2513 iwl_mvm_reorder_timer_expired, 0);
2514 spin_lock_init(&reorder_buf->lock);
2515 reorder_buf->mvm = mvm;
2516 reorder_buf->queue = i;
2517 reorder_buf->valid = false;
2518 for (j = 0; j < reorder_buf->buf_size; j++)
2519 __skb_queue_head_init(&entries[j].e.frames);
2520 }
2521}
2522
2523int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2524 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2525{
2526 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2527 struct iwl_mvm_add_sta_cmd cmd = {};
2528 struct iwl_mvm_baid_data *baid_data = NULL;
2529 int ret;
2530 u32 status;
2531
2532 lockdep_assert_held(&mvm->mutex);
2533
2534 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2535 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2536 return -ENOSPC;
2537 }
2538
2539 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2540 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2541
2542
2543#ifndef __CHECKER__
2544
2545
2546
2547
2548
2549
2550 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2551 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2552#endif
2553
2554
2555
2556
2557
2558
2559 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2560
2561
2562
2563
2564
2565 baid_data = kzalloc(sizeof(*baid_data) +
2566 mvm->trans->num_rx_queues *
2567 reorder_buf_size,
2568 GFP_KERNEL);
2569 if (!baid_data)
2570 return -ENOMEM;
2571
2572
2573
2574
2575
2576 baid_data->entries_per_queue =
2577 reorder_buf_size / sizeof(baid_data->entries[0]);
2578 }
2579
2580 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2581 cmd.sta_id = mvm_sta->sta_id;
2582 cmd.add_modify = STA_MODE_MODIFY;
2583 if (start) {
2584 cmd.add_immediate_ba_tid = (u8) tid;
2585 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2586 cmd.rx_ba_window = cpu_to_le16(buf_size);
2587 } else {
2588 cmd.remove_immediate_ba_tid = (u8) tid;
2589 }
2590 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2591 STA_MODIFY_REMOVE_BA_TID;
2592
2593 status = ADD_STA_SUCCESS;
2594 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2595 iwl_mvm_add_sta_cmd_size(mvm),
2596 &cmd, &status);
2597 if (ret)
2598 goto out_free;
2599
2600 switch (status & IWL_ADD_STA_STATUS_MASK) {
2601 case ADD_STA_SUCCESS:
2602 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2603 start ? "start" : "stopp");
2604 break;
2605 case ADD_STA_IMMEDIATE_BA_FAILURE:
2606 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2607 ret = -ENOSPC;
2608 break;
2609 default:
2610 ret = -EIO;
2611 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2612 start ? "start" : "stopp", status);
2613 break;
2614 }
2615
2616 if (ret)
2617 goto out_free;
2618
2619 if (start) {
2620 u8 baid;
2621
2622 mvm->rx_ba_sessions++;
2623
2624 if (!iwl_mvm_has_new_rx_api(mvm))
2625 return 0;
2626
2627 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2628 ret = -EINVAL;
2629 goto out_free;
2630 }
2631 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2632 IWL_ADD_STA_BAID_SHIFT);
2633 baid_data->baid = baid;
2634 baid_data->timeout = timeout;
2635 baid_data->last_rx = jiffies;
2636 baid_data->rcu_ptr = &mvm->baid_map[baid];
2637 timer_setup(&baid_data->session_timer,
2638 iwl_mvm_rx_agg_session_expired, 0);
2639 baid_data->mvm = mvm;
2640 baid_data->tid = tid;
2641 baid_data->sta_id = mvm_sta->sta_id;
2642
2643 mvm_sta->tid_to_baid[tid] = baid;
2644 if (timeout)
2645 mod_timer(&baid_data->session_timer,
2646 TU_TO_EXP_TIME(timeout * 2));
2647
2648 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2649
2650
2651
2652
2653
2654
2655 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2656 mvm_sta->sta_id, tid, baid);
2657 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2658 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2659 } else {
2660 u8 baid = mvm_sta->tid_to_baid[tid];
2661
2662 if (mvm->rx_ba_sessions > 0)
2663
2664 mvm->rx_ba_sessions--;
2665 if (!iwl_mvm_has_new_rx_api(mvm))
2666 return 0;
2667
2668 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2669 return -EINVAL;
2670
2671 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2672 if (WARN_ON(!baid_data))
2673 return -EINVAL;
2674
2675
2676 iwl_mvm_free_reorder(mvm, baid_data);
2677 del_timer_sync(&baid_data->session_timer);
2678 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2679 kfree_rcu(baid_data, rcu_head);
2680 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2681 }
2682 return 0;
2683
2684out_free:
2685 kfree(baid_data);
2686 return ret;
2687}
2688
2689int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2690 int tid, u8 queue, bool start)
2691{
2692 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2693 struct iwl_mvm_add_sta_cmd cmd = {};
2694 int ret;
2695 u32 status;
2696
2697 lockdep_assert_held(&mvm->mutex);
2698
2699 if (start) {
2700 mvm_sta->tfd_queue_msk |= BIT(queue);
2701 mvm_sta->tid_disable_agg &= ~BIT(tid);
2702 } else {
2703
2704 mvm_sta->tid_disable_agg |= BIT(tid);
2705 }
2706
2707 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2708 cmd.sta_id = mvm_sta->sta_id;
2709 cmd.add_modify = STA_MODE_MODIFY;
2710 if (!iwl_mvm_has_new_tx_api(mvm))
2711 cmd.modify_mask = STA_MODIFY_QUEUES;
2712 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2713 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2714 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2715
2716 status = ADD_STA_SUCCESS;
2717 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2718 iwl_mvm_add_sta_cmd_size(mvm),
2719 &cmd, &status);
2720 if (ret)
2721 return ret;
2722
2723 switch (status & IWL_ADD_STA_STATUS_MASK) {
2724 case ADD_STA_SUCCESS:
2725 break;
2726 default:
2727 ret = -EIO;
2728 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2729 start ? "start" : "stopp", status);
2730 break;
2731 }
2732
2733 return ret;
2734}
2735
2736const u8 tid_to_mac80211_ac[] = {
2737 IEEE80211_AC_BE,
2738 IEEE80211_AC_BK,
2739 IEEE80211_AC_BK,
2740 IEEE80211_AC_BE,
2741 IEEE80211_AC_VI,
2742 IEEE80211_AC_VI,
2743 IEEE80211_AC_VO,
2744 IEEE80211_AC_VO,
2745 IEEE80211_AC_VO,
2746};
2747
2748static const u8 tid_to_ucode_ac[] = {
2749 AC_BE,
2750 AC_BK,
2751 AC_BK,
2752 AC_BE,
2753 AC_VI,
2754 AC_VI,
2755 AC_VO,
2756 AC_VO,
2757};
2758
2759int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2760 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2761{
2762 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2763 struct iwl_mvm_tid_data *tid_data;
2764 u16 normalized_ssn;
2765 u16 txq_id;
2766 int ret;
2767
2768 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2769 return -EINVAL;
2770
2771 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2772 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2773 IWL_ERR(mvm,
2774 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2775 mvmsta->tid_data[tid].state);
2776 return -ENXIO;
2777 }
2778
2779 lockdep_assert_held(&mvm->mutex);
2780
2781 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2782 iwl_mvm_has_new_tx_api(mvm)) {
2783 u8 ac = tid_to_mac80211_ac[tid];
2784
2785 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2786 if (ret)
2787 return ret;
2788 }
2789
2790 spin_lock_bh(&mvmsta->lock);
2791
2792
2793
2794
2795
2796
2797
2798 txq_id = mvmsta->tid_data[tid].txq_id;
2799 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2800 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2801 IWL_MVM_DQA_MIN_DATA_QUEUE,
2802 IWL_MVM_DQA_MAX_DATA_QUEUE);
2803 if (ret < 0) {
2804 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2805 goto out;
2806 }
2807
2808 txq_id = ret;
2809
2810
2811 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2812 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2813 ret = -ENXIO;
2814 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2815 tid, IWL_MAX_HW_QUEUES - 1);
2816 goto out;
2817
2818 } else if (unlikely(mvm->queue_info[txq_id].status ==
2819 IWL_MVM_QUEUE_SHARED)) {
2820 ret = -ENXIO;
2821 IWL_DEBUG_TX_QUEUES(mvm,
2822 "Can't start tid %d agg on shared queue!\n",
2823 tid);
2824 goto out;
2825 }
2826
2827 IWL_DEBUG_TX_QUEUES(mvm,
2828 "AGG for tid %d will be on queue #%d\n",
2829 tid, txq_id);
2830
2831 tid_data = &mvmsta->tid_data[tid];
2832 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2833 tid_data->txq_id = txq_id;
2834 *ssn = tid_data->ssn;
2835
2836 IWL_DEBUG_TX_QUEUES(mvm,
2837 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2838 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2839 tid_data->next_reclaimed);
2840
2841
2842
2843
2844
2845 normalized_ssn = tid_data->ssn;
2846 if (mvm->trans->trans_cfg->gen2)
2847 normalized_ssn &= 0xff;
2848
2849 if (normalized_ssn == tid_data->next_reclaimed) {
2850 tid_data->state = IWL_AGG_STARTING;
2851 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
2852 } else {
2853 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2854 ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
2855 }
2856
2857out:
2858 spin_unlock_bh(&mvmsta->lock);
2859
2860 return ret;
2861}
2862
2863int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2864 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2865 bool amsdu)
2866{
2867 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2868 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2869 unsigned int wdg_timeout =
2870 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2871 int queue, ret;
2872 bool alloc_queue = true;
2873 enum iwl_mvm_queue_status queue_status;
2874 u16 ssn;
2875
2876 struct iwl_trans_txq_scd_cfg cfg = {
2877 .sta_id = mvmsta->sta_id,
2878 .tid = tid,
2879 .frame_limit = buf_size,
2880 .aggregate = true,
2881 };
2882
2883
2884
2885
2886
2887 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2888 return -EINVAL;
2889
2890 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2891 != IWL_MAX_TID_COUNT);
2892
2893 spin_lock_bh(&mvmsta->lock);
2894 ssn = tid_data->ssn;
2895 queue = tid_data->txq_id;
2896 tid_data->state = IWL_AGG_ON;
2897 mvmsta->agg_tids |= BIT(tid);
2898 tid_data->ssn = 0xffff;
2899 tid_data->amsdu_in_ampdu_allowed = amsdu;
2900 spin_unlock_bh(&mvmsta->lock);
2901
2902 if (iwl_mvm_has_new_tx_api(mvm)) {
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914 if (buf_size < IWL_FRAME_LIMIT)
2915 return -ENOTSUPP;
2916
2917 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2918 if (ret)
2919 return -EIO;
2920 goto out;
2921 }
2922
2923 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2924
2925 queue_status = mvm->queue_info[queue].status;
2926
2927
2928 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2929 alloc_queue = false;
2930
2931
2932
2933
2934
2935 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2936
2937
2938
2939
2940 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2941 BIT(queue));
2942 if (ret) {
2943 IWL_ERR(mvm,
2944 "Error draining queue before reconfig\n");
2945 return ret;
2946 }
2947
2948 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2949 mvmsta->sta_id, tid,
2950 buf_size, ssn);
2951 if (ret) {
2952 IWL_ERR(mvm,
2953 "Error reconfiguring TXQ #%d\n", queue);
2954 return ret;
2955 }
2956 }
2957
2958 if (alloc_queue)
2959 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2960 &cfg, wdg_timeout);
2961
2962
2963 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2964 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2965 if (ret)
2966 return -EIO;
2967 }
2968
2969
2970 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2971
2972out:
2973
2974
2975
2976
2977
2978
2979
2980 mvmsta->max_agg_bufsize =
2981 min(mvmsta->max_agg_bufsize, buf_size);
2982 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2983
2984 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2985 sta->addr, tid);
2986
2987 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
2988}
2989
2990static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2991 struct iwl_mvm_sta *mvmsta,
2992 struct iwl_mvm_tid_data *tid_data)
2993{
2994 u16 txq_id = tid_data->txq_id;
2995
2996 lockdep_assert_held(&mvm->mutex);
2997
2998 if (iwl_mvm_has_new_tx_api(mvm))
2999 return;
3000
3001
3002
3003
3004
3005
3006
3007
3008 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3009 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3010 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3011 }
3012}
3013
3014int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3015 struct ieee80211_sta *sta, u16 tid)
3016{
3017 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3018 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3019 u16 txq_id;
3020 int err;
3021
3022
3023
3024
3025
3026 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3027 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3028 return 0;
3029 }
3030
3031 spin_lock_bh(&mvmsta->lock);
3032
3033 txq_id = tid_data->txq_id;
3034
3035 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3036 mvmsta->sta_id, tid, txq_id, tid_data->state);
3037
3038 mvmsta->agg_tids &= ~BIT(tid);
3039
3040 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3041
3042 switch (tid_data->state) {
3043 case IWL_AGG_ON:
3044 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3045
3046 IWL_DEBUG_TX_QUEUES(mvm,
3047 "ssn = %d, next_recl = %d\n",
3048 tid_data->ssn, tid_data->next_reclaimed);
3049
3050 tid_data->ssn = 0xffff;
3051 tid_data->state = IWL_AGG_OFF;
3052 spin_unlock_bh(&mvmsta->lock);
3053
3054 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3055
3056 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3057 return 0;
3058 case IWL_AGG_STARTING:
3059 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3060
3061
3062
3063
3064
3065
3066 lockdep_assert_held(&mvm->mutex);
3067
3068 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3069 tid_data->state = IWL_AGG_OFF;
3070 err = 0;
3071 break;
3072 default:
3073 IWL_ERR(mvm,
3074 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3075 mvmsta->sta_id, tid, tid_data->state);
3076 IWL_ERR(mvm,
3077 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3078 err = -EINVAL;
3079 }
3080
3081 spin_unlock_bh(&mvmsta->lock);
3082
3083 return err;
3084}
3085
3086int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3087 struct ieee80211_sta *sta, u16 tid)
3088{
3089 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3090 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3091 u16 txq_id;
3092 enum iwl_mvm_agg_state old_state;
3093
3094
3095
3096
3097
3098 spin_lock_bh(&mvmsta->lock);
3099 txq_id = tid_data->txq_id;
3100 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3101 mvmsta->sta_id, tid, txq_id, tid_data->state);
3102 old_state = tid_data->state;
3103 tid_data->state = IWL_AGG_OFF;
3104 mvmsta->agg_tids &= ~BIT(tid);
3105 spin_unlock_bh(&mvmsta->lock);
3106
3107 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3108
3109 if (old_state >= IWL_AGG_ON) {
3110 iwl_mvm_drain_sta(mvm, mvmsta, true);
3111
3112 if (iwl_mvm_has_new_tx_api(mvm)) {
3113 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3114 BIT(tid)))
3115 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3116 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3117 } else {
3118 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3119 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3120 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3121 }
3122
3123 iwl_mvm_drain_sta(mvm, mvmsta, false);
3124
3125 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3126 }
3127
3128 return 0;
3129}
3130
3131static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3132{
3133 int i, max = -1, max_offs = -1;
3134
3135 lockdep_assert_held(&mvm->mutex);
3136
3137
3138
3139
3140
3141
3142
3143 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3144 if (test_bit(i, mvm->fw_key_table))
3145 continue;
3146 if (mvm->fw_key_deleted[i] > max) {
3147 max = mvm->fw_key_deleted[i];
3148 max_offs = i;
3149 }
3150 }
3151
3152 if (max_offs < 0)
3153 return STA_KEY_IDX_INVALID;
3154
3155 return max_offs;
3156}
3157
3158static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3159 struct ieee80211_vif *vif,
3160 struct ieee80211_sta *sta)
3161{
3162 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3163
3164 if (sta)
3165 return iwl_mvm_sta_from_mac80211(sta);
3166
3167
3168
3169
3170
3171
3172 if (vif->type == NL80211_IFTYPE_STATION &&
3173 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3174 u8 sta_id = mvmvif->ap_sta_id;
3175
3176 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3177 lockdep_is_held(&mvm->mutex));
3178
3179
3180
3181
3182
3183
3184 if (IS_ERR_OR_NULL(sta))
3185 return NULL;
3186
3187 return iwl_mvm_sta_from_mac80211(sta);
3188 }
3189
3190 return NULL;
3191}
3192
3193static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3194 u32 sta_id,
3195 struct ieee80211_key_conf *key, bool mcast,
3196 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3197 u8 key_offset, bool mfp)
3198{
3199 union {
3200 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3201 struct iwl_mvm_add_sta_key_cmd cmd;
3202 } u = {};
3203 __le16 key_flags;
3204 int ret;
3205 u32 status;
3206 u16 keyidx;
3207 u64 pn = 0;
3208 int i, size;
3209 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3210 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3211
3212 if (sta_id == IWL_MVM_INVALID_STA)
3213 return -EINVAL;
3214
3215 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3216 STA_KEY_FLG_KEYID_MSK;
3217 key_flags = cpu_to_le16(keyidx);
3218 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3219
3220 switch (key->cipher) {
3221 case WLAN_CIPHER_SUITE_TKIP:
3222 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3223 if (new_api) {
3224 memcpy((void *)&u.cmd.tx_mic_key,
3225 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3226 IWL_MIC_KEY_SIZE);
3227
3228 memcpy((void *)&u.cmd.rx_mic_key,
3229 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3230 IWL_MIC_KEY_SIZE);
3231 pn = atomic64_read(&key->tx_pn);
3232
3233 } else {
3234 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3235 for (i = 0; i < 5; i++)
3236 u.cmd_v1.tkip_rx_ttak[i] =
3237 cpu_to_le16(tkip_p1k[i]);
3238 }
3239 memcpy(u.cmd.common.key, key->key, key->keylen);
3240 break;
3241 case WLAN_CIPHER_SUITE_CCMP:
3242 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3243 memcpy(u.cmd.common.key, key->key, key->keylen);
3244 if (new_api)
3245 pn = atomic64_read(&key->tx_pn);
3246 break;
3247 case WLAN_CIPHER_SUITE_WEP104:
3248 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3249 fallthrough;
3250 case WLAN_CIPHER_SUITE_WEP40:
3251 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3252 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3253 break;
3254 case WLAN_CIPHER_SUITE_GCMP_256:
3255 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3256 fallthrough;
3257 case WLAN_CIPHER_SUITE_GCMP:
3258 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3259 memcpy(u.cmd.common.key, key->key, key->keylen);
3260 if (new_api)
3261 pn = atomic64_read(&key->tx_pn);
3262 break;
3263 default:
3264 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3265 memcpy(u.cmd.common.key, key->key, key->keylen);
3266 }
3267
3268 if (mcast)
3269 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3270 if (mfp)
3271 key_flags |= cpu_to_le16(STA_KEY_MFP);
3272
3273 u.cmd.common.key_offset = key_offset;
3274 u.cmd.common.key_flags = key_flags;
3275 u.cmd.common.sta_id = sta_id;
3276
3277 if (new_api) {
3278 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3279 size = sizeof(u.cmd);
3280 } else {
3281 size = sizeof(u.cmd_v1);
3282 }
3283
3284 status = ADD_STA_SUCCESS;
3285 if (cmd_flags & CMD_ASYNC)
3286 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3287 &u.cmd);
3288 else
3289 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3290 &u.cmd, &status);
3291
3292 switch (status) {
3293 case ADD_STA_SUCCESS:
3294 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3295 break;
3296 default:
3297 ret = -EIO;
3298 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3299 break;
3300 }
3301
3302 return ret;
3303}
3304
3305static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3306 struct ieee80211_key_conf *keyconf,
3307 u8 sta_id, bool remove_key)
3308{
3309 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3310
3311
3312 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3313 (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3314 keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3315 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3316 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3317 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3318 return -EINVAL;
3319
3320 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3321 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3322 return -EINVAL;
3323
3324 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3325 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3326
3327 if (remove_key) {
3328
3329 if (sta_id == IWL_MVM_INVALID_STA)
3330 return 0;
3331
3332 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3333 } else {
3334 struct ieee80211_key_seq seq;
3335 const u8 *pn;
3336
3337 switch (keyconf->cipher) {
3338 case WLAN_CIPHER_SUITE_AES_CMAC:
3339 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3340 break;
3341 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3342 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3343 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3344 break;
3345 default:
3346 return -EINVAL;
3347 }
3348
3349 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3350 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3351 igtk_cmd.ctrl_flags |=
3352 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3353 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3354 pn = seq.aes_cmac.pn;
3355 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3356 ((u64) pn[4] << 8) |
3357 ((u64) pn[3] << 16) |
3358 ((u64) pn[2] << 24) |
3359 ((u64) pn[1] << 32) |
3360 ((u64) pn[0] << 40));
3361 }
3362
3363 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3364 remove_key ? "removing" : "installing",
3365 keyconf->keyidx >= 6 ? "B" : "",
3366 keyconf->keyidx, igtk_cmd.sta_id);
3367
3368 if (!iwl_mvm_has_new_rx_api(mvm)) {
3369 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3370 .ctrl_flags = igtk_cmd.ctrl_flags,
3371 .key_id = igtk_cmd.key_id,
3372 .sta_id = igtk_cmd.sta_id,
3373 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3374 };
3375
3376 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3377 ARRAY_SIZE(igtk_cmd_v1.igtk));
3378 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3379 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3380 }
3381 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3382 sizeof(igtk_cmd), &igtk_cmd);
3383}
3384
3385
3386static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3387 struct ieee80211_vif *vif,
3388 struct ieee80211_sta *sta)
3389{
3390 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3391
3392 if (sta)
3393 return sta->addr;
3394
3395 if (vif->type == NL80211_IFTYPE_STATION &&
3396 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3397 u8 sta_id = mvmvif->ap_sta_id;
3398 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3399 lockdep_is_held(&mvm->mutex));
3400 return sta->addr;
3401 }
3402
3403
3404 return NULL;
3405}
3406
3407static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3408 struct ieee80211_vif *vif,
3409 struct ieee80211_sta *sta,
3410 struct ieee80211_key_conf *keyconf,
3411 u8 key_offset,
3412 bool mcast)
3413{
3414 int ret;
3415 const u8 *addr;
3416 struct ieee80211_key_seq seq;
3417 u16 p1k[5];
3418 u32 sta_id;
3419 bool mfp = false;
3420
3421 if (sta) {
3422 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3423
3424 sta_id = mvm_sta->sta_id;
3425 mfp = sta->mfp;
3426 } else if (vif->type == NL80211_IFTYPE_AP &&
3427 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3428 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3429
3430 sta_id = mvmvif->mcast_sta.sta_id;
3431 } else {
3432 IWL_ERR(mvm, "Failed to find station id\n");
3433 return -EINVAL;
3434 }
3435
3436 switch (keyconf->cipher) {
3437 case WLAN_CIPHER_SUITE_TKIP:
3438 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3439
3440 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3441 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3442 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3443 seq.tkip.iv32, p1k, 0, key_offset,
3444 mfp);
3445 break;
3446 case WLAN_CIPHER_SUITE_CCMP:
3447 case WLAN_CIPHER_SUITE_WEP40:
3448 case WLAN_CIPHER_SUITE_WEP104:
3449 case WLAN_CIPHER_SUITE_GCMP:
3450 case WLAN_CIPHER_SUITE_GCMP_256:
3451 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3452 0, NULL, 0, key_offset, mfp);
3453 break;
3454 default:
3455 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3456 0, NULL, 0, key_offset, mfp);
3457 }
3458
3459 return ret;
3460}
3461
3462int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3463 struct ieee80211_vif *vif,
3464 struct ieee80211_sta *sta,
3465 struct ieee80211_key_conf *keyconf,
3466 u8 key_offset)
3467{
3468 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3469 struct iwl_mvm_sta *mvm_sta;
3470 u8 sta_id = IWL_MVM_INVALID_STA;
3471 int ret;
3472 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3473
3474 lockdep_assert_held(&mvm->mutex);
3475
3476 if (vif->type != NL80211_IFTYPE_AP ||
3477 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3478
3479 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3480 if (!mvm_sta) {
3481 IWL_ERR(mvm, "Failed to find station\n");
3482 return -EINVAL;
3483 }
3484 sta_id = mvm_sta->sta_id;
3485
3486
3487
3488
3489
3490
3491 if (!sta) {
3492 sta = rcu_dereference_protected(
3493 mvm->fw_id_to_mac_id[sta_id],
3494 lockdep_is_held(&mvm->mutex));
3495 if (IS_ERR_OR_NULL(sta)) {
3496 IWL_ERR(mvm, "Invalid station id\n");
3497 return -EINVAL;
3498 }
3499 }
3500
3501 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3502 return -EINVAL;
3503 } else {
3504 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3505
3506 sta_id = mvmvif->mcast_sta.sta_id;
3507 }
3508
3509 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3510 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3511 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3512 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3513 goto end;
3514 }
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527 if (key_offset == STA_KEY_IDX_INVALID) {
3528 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3529 if (key_offset == STA_KEY_IDX_INVALID)
3530 return -ENOSPC;
3531 keyconf->hw_key_idx = key_offset;
3532 }
3533
3534 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3535 if (ret)
3536 goto end;
3537
3538
3539
3540
3541
3542
3543
3544 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3545 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3546 sta) {
3547 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3548 key_offset, !mcast);
3549 if (ret) {
3550 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3551 goto end;
3552 }
3553 }
3554
3555 __set_bit(key_offset, mvm->fw_key_table);
3556
3557end:
3558 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3559 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3560 sta ? sta->addr : zero_addr, ret);
3561 return ret;
3562}
3563
3564int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3565 struct ieee80211_vif *vif,
3566 struct ieee80211_sta *sta,
3567 struct ieee80211_key_conf *keyconf)
3568{
3569 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3570 struct iwl_mvm_sta *mvm_sta;
3571 u8 sta_id = IWL_MVM_INVALID_STA;
3572 int ret, i;
3573
3574 lockdep_assert_held(&mvm->mutex);
3575
3576
3577 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3578 if (mvm_sta)
3579 sta_id = mvm_sta->sta_id;
3580 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3581 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3582
3583
3584 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3585 keyconf->keyidx, sta_id);
3586
3587 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3588 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3589 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3590 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3591
3592 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3593 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3594 keyconf->hw_key_idx);
3595 return -ENOENT;
3596 }
3597
3598
3599 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3600 if (mvm->fw_key_deleted[i] < U8_MAX)
3601 mvm->fw_key_deleted[i]++;
3602 }
3603 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3604
3605 if (sta && !mvm_sta) {
3606 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3607 return 0;
3608 }
3609
3610 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3611 if (ret)
3612 return ret;
3613
3614
3615 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3616 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3617 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3618
3619 return ret;
3620}
3621
3622void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3623 struct ieee80211_vif *vif,
3624 struct ieee80211_key_conf *keyconf,
3625 struct ieee80211_sta *sta, u32 iv32,
3626 u16 *phase1key)
3627{
3628 struct iwl_mvm_sta *mvm_sta;
3629 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3630 bool mfp = sta ? sta->mfp : false;
3631
3632 rcu_read_lock();
3633
3634 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3635 if (WARN_ON_ONCE(!mvm_sta))
3636 goto unlock;
3637 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3638 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3639 mfp);
3640
3641 unlock:
3642 rcu_read_unlock();
3643}
3644
3645void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3646 struct ieee80211_sta *sta)
3647{
3648 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3649 struct iwl_mvm_add_sta_cmd cmd = {
3650 .add_modify = STA_MODE_MODIFY,
3651 .sta_id = mvmsta->sta_id,
3652 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3653 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3654 };
3655 int ret;
3656
3657 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3658 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3659 if (ret)
3660 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3661}
3662
3663void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3664 struct ieee80211_sta *sta,
3665 enum ieee80211_frame_release_type reason,
3666 u16 cnt, u16 tids, bool more_data,
3667 bool single_sta_queue)
3668{
3669 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3670 struct iwl_mvm_add_sta_cmd cmd = {
3671 .add_modify = STA_MODE_MODIFY,
3672 .sta_id = mvmsta->sta_id,
3673 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3674 .sleep_tx_count = cpu_to_le16(cnt),
3675 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3676 };
3677 int tid, ret;
3678 unsigned long _tids = tids;
3679
3680
3681
3682
3683
3684 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3685 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3686
3687
3688
3689
3690
3691
3692
3693
3694 if (single_sta_queue) {
3695 int remaining = cnt;
3696 int sleep_tx_count;
3697
3698 spin_lock_bh(&mvmsta->lock);
3699 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3700 struct iwl_mvm_tid_data *tid_data;
3701 u16 n_queued;
3702
3703 tid_data = &mvmsta->tid_data[tid];
3704
3705 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3706 if (n_queued > remaining) {
3707 more_data = true;
3708 remaining = 0;
3709 break;
3710 }
3711 remaining -= n_queued;
3712 }
3713 sleep_tx_count = cnt - remaining;
3714 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3715 mvmsta->sleep_tx_count = sleep_tx_count;
3716 spin_unlock_bh(&mvmsta->lock);
3717
3718 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3719 if (WARN_ON(cnt - remaining == 0)) {
3720 ieee80211_sta_eosp(sta);
3721 return;
3722 }
3723 }
3724
3725
3726 if (more_data)
3727 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3728
3729 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3730 mvmsta->next_status_eosp = true;
3731 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3732 } else {
3733 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3734 }
3735
3736
3737 iwl_trans_block_txq_ptrs(mvm->trans, true);
3738
3739 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3740 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3741 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3742 if (ret)
3743 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3744}
3745
3746void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3747 struct iwl_rx_cmd_buffer *rxb)
3748{
3749 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3750 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3751 struct ieee80211_sta *sta;
3752 u32 sta_id = le32_to_cpu(notif->sta_id);
3753
3754 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3755 return;
3756
3757 rcu_read_lock();
3758 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3759 if (!IS_ERR_OR_NULL(sta))
3760 ieee80211_sta_eosp(sta);
3761 rcu_read_unlock();
3762}
3763
3764void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3765 struct iwl_mvm_sta *mvmsta, bool disable)
3766{
3767 struct iwl_mvm_add_sta_cmd cmd = {
3768 .add_modify = STA_MODE_MODIFY,
3769 .sta_id = mvmsta->sta_id,
3770 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3771 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3772 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3773 };
3774 int ret;
3775
3776 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3777 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3778 if (ret)
3779 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3780}
3781
3782void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3783 struct ieee80211_sta *sta,
3784 bool disable)
3785{
3786 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3787
3788 spin_lock_bh(&mvm_sta->lock);
3789
3790 if (mvm_sta->disable_tx == disable) {
3791 spin_unlock_bh(&mvm_sta->lock);
3792 return;
3793 }
3794
3795 mvm_sta->disable_tx = disable;
3796
3797
3798
3799
3800
3801 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
3802 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3803
3804 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3805
3806 spin_unlock_bh(&mvm_sta->lock);
3807}
3808
3809static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3810 struct iwl_mvm_vif *mvmvif,
3811 struct iwl_mvm_int_sta *sta,
3812 bool disable)
3813{
3814 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3815 struct iwl_mvm_add_sta_cmd cmd = {
3816 .add_modify = STA_MODE_MODIFY,
3817 .sta_id = sta->sta_id,
3818 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3819 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3820 .mac_id_n_color = cpu_to_le32(id),
3821 };
3822 int ret;
3823
3824 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3825 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3826 if (ret)
3827 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3828}
3829
3830void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3831 struct iwl_mvm_vif *mvmvif,
3832 bool disable)
3833{
3834 struct ieee80211_sta *sta;
3835 struct iwl_mvm_sta *mvm_sta;
3836 int i;
3837
3838 rcu_read_lock();
3839
3840
3841 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
3842 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
3843 if (IS_ERR_OR_NULL(sta))
3844 continue;
3845
3846 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3847 if (mvm_sta->mac_id_n_color !=
3848 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3849 continue;
3850
3851 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3852 }
3853
3854 rcu_read_unlock();
3855
3856 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3857 return;
3858
3859
3860 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3861 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3862 &mvmvif->mcast_sta, disable);
3863
3864
3865
3866
3867
3868 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3869 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3870 &mvmvif->bcast_sta, disable);
3871}
3872
3873void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3874{
3875 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3876 struct iwl_mvm_sta *mvmsta;
3877
3878 rcu_read_lock();
3879
3880 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3881
3882 if (!WARN_ON(!mvmsta))
3883 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3884
3885 rcu_read_unlock();
3886}
3887
3888u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3889{
3890 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3891
3892
3893
3894
3895
3896 if (mvm->trans->trans_cfg->gen2)
3897 sn &= 0xff;
3898
3899 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3900}
3901
3902int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3903 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
3904 u8 *key, u32 key_len)
3905{
3906 int ret;
3907 u16 queue;
3908 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3909 struct ieee80211_key_conf *keyconf;
3910
3911 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
3912 NL80211_IFTYPE_UNSPECIFIED,
3913 IWL_STA_LINK);
3914 if (ret)
3915 return ret;
3916
3917 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
3918 addr, sta, &queue,
3919 IWL_MVM_TX_FIFO_BE);
3920 if (ret)
3921 goto out;
3922
3923 keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
3924 if (!keyconf) {
3925 ret = -ENOBUFS;
3926 goto out;
3927 }
3928
3929 keyconf->cipher = cipher;
3930 memcpy(keyconf->key, key, key_len);
3931 keyconf->keylen = key_len;
3932
3933 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
3934 0, NULL, 0, 0, true);
3935 kfree(keyconf);
3936 return 0;
3937out:
3938 iwl_mvm_dealloc_int_sta(mvm, sta);
3939 return ret;
3940}
3941