1
2
3
4
5
6
7#include <net/mac80211.h>
8
9#include "mvm.h"
10#include "sta.h"
11#include "rs.h"
12
13
14
15
16
17
18static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
19{
20 if (iwl_mvm_has_new_rx_api(mvm) ||
21 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
22 return sizeof(struct iwl_mvm_add_sta_cmd);
23 else
24 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
25}
26
27static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
28 enum nl80211_iftype iftype)
29{
30 int sta_id;
31 u32 reserved_ids = 0;
32
33 BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
34 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
35
36 lockdep_assert_held(&mvm->mutex);
37
38
39 if (iftype != NL80211_IFTYPE_STATION)
40 reserved_ids = BIT(0);
41
42
43 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
44 if (BIT(sta_id) & reserved_ids)
45 continue;
46
47 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
48 lockdep_is_held(&mvm->mutex)))
49 return sta_id;
50 }
51 return IWL_MVM_INVALID_STA;
52}
53
54
55int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
56 bool update, unsigned int flags)
57{
58 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
59 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
60 .sta_id = mvm_sta->sta_id,
61 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
62 .add_modify = update ? 1 : 0,
63 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
64 STA_FLG_MIMO_EN_MSK |
65 STA_FLG_RTS_MIMO_PROT),
66 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
67 };
68 int ret;
69 u32 status;
70 u32 agg_size = 0, mpdu_dens = 0;
71
72 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
73 add_sta_cmd.station_type = mvm_sta->sta_type;
74
75 if (!update || (flags & STA_MODIFY_QUEUES)) {
76 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
77
78 if (!iwl_mvm_has_new_tx_api(mvm)) {
79 add_sta_cmd.tfd_queue_msk =
80 cpu_to_le32(mvm_sta->tfd_queue_msk);
81
82 if (flags & STA_MODIFY_QUEUES)
83 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
84 } else {
85 WARN_ON(flags & STA_MODIFY_QUEUES);
86 }
87 }
88
89 switch (sta->bandwidth) {
90 case IEEE80211_STA_RX_BW_160:
91 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
92 fallthrough;
93 case IEEE80211_STA_RX_BW_80:
94 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
95 fallthrough;
96 case IEEE80211_STA_RX_BW_40:
97 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
98 fallthrough;
99 case IEEE80211_STA_RX_BW_20:
100 if (sta->ht_cap.ht_supported)
101 add_sta_cmd.station_flags |=
102 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
103 break;
104 }
105
106 switch (sta->rx_nss) {
107 case 1:
108 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
109 break;
110 case 2:
111 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
112 break;
113 case 3 ... 8:
114 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
115 break;
116 }
117
118 switch (sta->smps_mode) {
119 case IEEE80211_SMPS_AUTOMATIC:
120 case IEEE80211_SMPS_NUM_MODES:
121 WARN_ON(1);
122 break;
123 case IEEE80211_SMPS_STATIC:
124
125 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
126 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
127 break;
128 case IEEE80211_SMPS_DYNAMIC:
129 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
130 break;
131 case IEEE80211_SMPS_OFF:
132
133 break;
134 }
135
136 if (sta->ht_cap.ht_supported) {
137 add_sta_cmd.station_flags_msk |=
138 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
139 STA_FLG_AGG_MPDU_DENS_MSK);
140
141 mpdu_dens = sta->ht_cap.ampdu_density;
142 }
143
144 if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
145 add_sta_cmd.station_flags_msk |=
146 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
147 STA_FLG_AGG_MPDU_DENS_MSK);
148
149 mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa,
150 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
151 agg_size = le16_get_bits(sta->he_6ghz_capa.capa,
152 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
153 } else
154 if (sta->vht_cap.vht_supported) {
155 agg_size = sta->vht_cap.cap &
156 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
157 agg_size >>=
158 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
159 } else if (sta->ht_cap.ht_supported) {
160 agg_size = sta->ht_cap.ampdu_factor;
161 }
162
163
164
165
166
167
168
169
170
171 if (sta->he_cap.has_he)
172 agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
173 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
174
175
176 if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
177 agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
178 STA_FLG_MAX_AGG_SIZE_SHIFT);
179
180 add_sta_cmd.station_flags |=
181 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
182 add_sta_cmd.station_flags |=
183 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
184 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
185 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
186
187 if (sta->wme) {
188 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
189
190 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
191 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
192 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
193 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
194 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
195 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
196 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
197 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
198 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
199 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
200 }
201
202 status = ADD_STA_SUCCESS;
203 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
204 iwl_mvm_add_sta_cmd_size(mvm),
205 &add_sta_cmd, &status);
206 if (ret)
207 return ret;
208
209 switch (status & IWL_ADD_STA_STATUS_MASK) {
210 case ADD_STA_SUCCESS:
211 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
212 break;
213 default:
214 ret = -EIO;
215 IWL_ERR(mvm, "ADD_STA failed\n");
216 break;
217 }
218
219 return ret;
220}
221
222static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
223{
224 struct iwl_mvm_baid_data *data =
225 from_timer(data, t, session_timer);
226 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
227 struct iwl_mvm_baid_data *ba_data;
228 struct ieee80211_sta *sta;
229 struct iwl_mvm_sta *mvm_sta;
230 unsigned long timeout;
231
232 rcu_read_lock();
233
234 ba_data = rcu_dereference(*rcu_ptr);
235
236 if (WARN_ON(!ba_data))
237 goto unlock;
238
239 if (!ba_data->timeout)
240 goto unlock;
241
242 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
243 if (time_is_after_jiffies(timeout)) {
244 mod_timer(&ba_data->session_timer, timeout);
245 goto unlock;
246 }
247
248
249 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
250
251
252
253
254
255
256
257
258
259 if (!sta)
260 goto unlock;
261
262 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
263 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
264 sta->addr, ba_data->tid);
265unlock:
266 rcu_read_unlock();
267}
268
269
270static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
271 unsigned long disable_agg_tids,
272 bool remove_queue)
273{
274 struct iwl_mvm_add_sta_cmd cmd = {};
275 struct ieee80211_sta *sta;
276 struct iwl_mvm_sta *mvmsta;
277 u32 status;
278 u8 sta_id;
279
280 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
281 return -EINVAL;
282
283 sta_id = mvm->queue_info[queue].ra_sta_id;
284
285 rcu_read_lock();
286
287 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
288
289 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
290 rcu_read_unlock();
291 return -EINVAL;
292 }
293
294 mvmsta = iwl_mvm_sta_from_mac80211(sta);
295
296 mvmsta->tid_disable_agg |= disable_agg_tids;
297
298 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
299 cmd.sta_id = mvmsta->sta_id;
300 cmd.add_modify = STA_MODE_MODIFY;
301 cmd.modify_mask = STA_MODIFY_QUEUES;
302 if (disable_agg_tids)
303 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
304 if (remove_queue)
305 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
306 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
307 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
308
309 rcu_read_unlock();
310
311
312 status = ADD_STA_SUCCESS;
313 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
314 iwl_mvm_add_sta_cmd_size(mvm),
315 &cmd, &status);
316}
317
318static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
319 u16 *queueptr, u8 tid, u8 flags)
320{
321 int queue = *queueptr;
322 struct iwl_scd_txq_cfg_cmd cmd = {
323 .scd_queue = queue,
324 .action = SCD_CFG_DISABLE_QUEUE,
325 };
326 int ret;
327
328 if (iwl_mvm_has_new_tx_api(mvm)) {
329 iwl_trans_txq_free(mvm->trans, queue);
330 *queueptr = IWL_MVM_INVALID_QUEUE;
331
332 return 0;
333 }
334
335 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
336 return 0;
337
338 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
339
340 cmd.action = mvm->queue_info[queue].tid_bitmap ?
341 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
342 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
343 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
344
345 IWL_DEBUG_TX_QUEUES(mvm,
346 "Disabling TXQ #%d tids=0x%x\n",
347 queue,
348 mvm->queue_info[queue].tid_bitmap);
349
350
351 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
352 return 0;
353
354 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
355 cmd.tid = mvm->queue_info[queue].txq_tid;
356
357
358 WARN(mvm->queue_info[queue].tid_bitmap,
359 "TXQ #%d info out-of-sync - tids=0x%x\n",
360 queue, mvm->queue_info[queue].tid_bitmap);
361
362
363 mvm->queue_info[queue].tid_bitmap = 0;
364
365 if (sta) {
366 struct iwl_mvm_txq *mvmtxq =
367 iwl_mvm_txq_from_tid(sta, tid);
368
369 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
370 }
371
372
373 mvm->queue_info[queue].reserved = false;
374
375 iwl_trans_txq_disable(mvm->trans, queue, false);
376 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
377 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
378
379 if (ret)
380 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
381 queue, ret);
382 return ret;
383}
384
385static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
386{
387 struct ieee80211_sta *sta;
388 struct iwl_mvm_sta *mvmsta;
389 unsigned long tid_bitmap;
390 unsigned long agg_tids = 0;
391 u8 sta_id;
392 int tid;
393
394 lockdep_assert_held(&mvm->mutex);
395
396 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
397 return -EINVAL;
398
399 sta_id = mvm->queue_info[queue].ra_sta_id;
400 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
401
402 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
403 lockdep_is_held(&mvm->mutex));
404
405 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
406 return -EINVAL;
407
408 mvmsta = iwl_mvm_sta_from_mac80211(sta);
409
410 spin_lock_bh(&mvmsta->lock);
411 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
412 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
413 agg_tids |= BIT(tid);
414 }
415 spin_unlock_bh(&mvmsta->lock);
416
417 return agg_tids;
418}
419
420
421
422
423
424
425static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
426{
427 struct ieee80211_sta *sta;
428 struct iwl_mvm_sta *mvmsta;
429 unsigned long tid_bitmap;
430 unsigned long disable_agg_tids = 0;
431 u8 sta_id;
432 int tid;
433
434 lockdep_assert_held(&mvm->mutex);
435
436 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
437 return -EINVAL;
438
439 sta_id = mvm->queue_info[queue].ra_sta_id;
440 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
441
442 rcu_read_lock();
443
444 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
445
446 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
447 rcu_read_unlock();
448 return 0;
449 }
450
451 mvmsta = iwl_mvm_sta_from_mac80211(sta);
452
453 spin_lock_bh(&mvmsta->lock);
454
455 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
456 struct iwl_mvm_txq *mvmtxq =
457 iwl_mvm_txq_from_tid(sta, tid);
458
459 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
460 disable_agg_tids |= BIT(tid);
461 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
462
463 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
464 }
465
466 mvmsta->tfd_queue_msk &= ~BIT(queue);
467 spin_unlock_bh(&mvmsta->lock);
468
469 rcu_read_unlock();
470
471
472
473
474
475
476
477
478
479 synchronize_net();
480
481 return disable_agg_tids;
482}
483
484static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
485 struct ieee80211_sta *old_sta,
486 u8 new_sta_id)
487{
488 struct iwl_mvm_sta *mvmsta;
489 u8 sta_id, tid;
490 unsigned long disable_agg_tids = 0;
491 bool same_sta;
492 u16 queue_tmp = queue;
493 int ret;
494
495 lockdep_assert_held(&mvm->mutex);
496
497 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
498 return -EINVAL;
499
500 sta_id = mvm->queue_info[queue].ra_sta_id;
501 tid = mvm->queue_info[queue].txq_tid;
502
503 same_sta = sta_id == new_sta_id;
504
505 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
506 if (WARN_ON(!mvmsta))
507 return -EINVAL;
508
509 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
510
511 if (disable_agg_tids)
512 iwl_mvm_invalidate_sta_queue(mvm, queue,
513 disable_agg_tids, false);
514
515 ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
516 if (ret) {
517 IWL_ERR(mvm,
518 "Failed to free inactive queue %d (ret=%d)\n",
519 queue, ret);
520
521 return ret;
522 }
523
524
525 if (!same_sta)
526 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
527
528 return 0;
529}
530
531static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
532 unsigned long tfd_queue_mask, u8 ac)
533{
534 int queue = 0;
535 u8 ac_to_queue[IEEE80211_NUM_ACS];
536 int i;
537
538
539
540
541
542 lockdep_assert_held(&mvm->mutex);
543
544 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
545 return -EINVAL;
546
547 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
548
549
550 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
551
552 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
553 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
554 continue;
555
556 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
557 }
558
559
560
561
562
563
564
565
566
567
568
569 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
570 queue = ac_to_queue[IEEE80211_AC_BE];
571
572 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
573 queue = ac_to_queue[ac];
574
575 else if (ac == IEEE80211_AC_VO &&
576 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
577 queue = ac_to_queue[IEEE80211_AC_VI];
578
579 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
580 queue = ac_to_queue[IEEE80211_AC_BK];
581
582 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
583 queue = ac_to_queue[IEEE80211_AC_VI];
584
585 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
586 queue = ac_to_queue[IEEE80211_AC_VO];
587
588
589 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
590 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
591 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
592 IWL_ERR(mvm, "No DATA queues available to share\n");
593 return -ENOSPC;
594 }
595
596 return queue;
597}
598
599
600
601
602
603
604
605static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
606 int ac, int ssn, unsigned int wdg_timeout,
607 bool force, struct iwl_mvm_txq *txq)
608{
609 struct iwl_scd_txq_cfg_cmd cmd = {
610 .scd_queue = queue,
611 .action = SCD_CFG_DISABLE_QUEUE,
612 };
613 bool shared_queue;
614 int ret;
615
616 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
617 return -EINVAL;
618
619
620
621
622
623
624
625
626
627 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
628 IWL_DEBUG_TX_QUEUES(mvm,
629 "No redirection needed on TXQ #%d\n",
630 queue);
631 return 0;
632 }
633
634 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
635 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
636 cmd.tid = mvm->queue_info[queue].txq_tid;
637 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
638
639 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
640 queue, iwl_mvm_ac_to_tx_fifo[ac]);
641
642
643 txq->stopped = true;
644
645 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
646 if (ret) {
647 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
648 queue);
649 ret = -EIO;
650 goto out;
651 }
652
653
654 iwl_trans_txq_disable(mvm->trans, queue, false);
655 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
656 if (ret)
657 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
658 ret);
659
660
661 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
662
663
664 mvm->queue_info[queue].txq_tid = tid;
665
666
667
668
669 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
670 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
671
672
673 mvm->queue_info[queue].mac80211_ac = ac;
674
675
676
677
678
679
680
681 if (shared_queue)
682 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
683
684out:
685
686 txq->stopped = false;
687
688 return ret;
689}
690
691static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
692 u8 minq, u8 maxq)
693{
694 int i;
695
696 lockdep_assert_held(&mvm->mutex);
697
698 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
699 "max queue %d >= num_of_queues (%d)", maxq,
700 mvm->trans->trans_cfg->base_params->num_of_queues))
701 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
702
703
704 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
705 return -ENOSPC;
706
707
708 for (i = minq; i <= maxq; i++)
709 if (mvm->queue_info[i].tid_bitmap == 0 &&
710 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
711 return i;
712
713 return -ENOSPC;
714}
715
716static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
717 u8 sta_id, u8 tid, unsigned int timeout)
718{
719 int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
720 mvm->trans->cfg->min_256_ba_txq_size);
721
722 if (tid == IWL_MAX_TID_COUNT) {
723 tid = IWL_MGMT_TID;
724 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
725 mvm->trans->cfg->min_txq_size);
726 }
727
728 do {
729 __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
730
731 queue = iwl_trans_txq_alloc(mvm->trans, enable,
732 sta_id, tid, SCD_QUEUE_CFG,
733 size, timeout);
734
735 if (queue < 0)
736 IWL_DEBUG_TX_QUEUES(mvm,
737 "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
738 size, sta_id, tid, queue);
739 size /= 2;
740 } while (queue < 0 && size >= 16);
741
742 if (queue < 0)
743 return queue;
744
745 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
746 queue, sta_id, tid);
747
748 return queue;
749}
750
751static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
752 struct ieee80211_sta *sta, u8 ac,
753 int tid)
754{
755 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
756 struct iwl_mvm_txq *mvmtxq =
757 iwl_mvm_txq_from_tid(sta, tid);
758 unsigned int wdg_timeout =
759 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
760 int queue = -1;
761
762 lockdep_assert_held(&mvm->mutex);
763
764 IWL_DEBUG_TX_QUEUES(mvm,
765 "Allocating queue for sta %d on tid %d\n",
766 mvmsta->sta_id, tid);
767 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
768 if (queue < 0)
769 return queue;
770
771 mvmtxq->txq_id = queue;
772 mvm->tvqm_info[queue].txq_tid = tid;
773 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
774
775 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
776
777 spin_lock_bh(&mvmsta->lock);
778 mvmsta->tid_data[tid].txq_id = queue;
779 spin_unlock_bh(&mvmsta->lock);
780
781 return 0;
782}
783
784static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
785 struct ieee80211_sta *sta,
786 int queue, u8 sta_id, u8 tid)
787{
788 bool enable_queue = true;
789
790
791 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
792 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
793 queue, tid);
794 return false;
795 }
796
797
798 if (mvm->queue_info[queue].tid_bitmap)
799 enable_queue = false;
800
801 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
802 mvm->queue_info[queue].ra_sta_id = sta_id;
803
804 if (enable_queue) {
805 if (tid != IWL_MAX_TID_COUNT)
806 mvm->queue_info[queue].mac80211_ac =
807 tid_to_mac80211_ac[tid];
808 else
809 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
810
811 mvm->queue_info[queue].txq_tid = tid;
812 }
813
814 if (sta) {
815 struct iwl_mvm_txq *mvmtxq =
816 iwl_mvm_txq_from_tid(sta, tid);
817
818 mvmtxq->txq_id = queue;
819 }
820
821 IWL_DEBUG_TX_QUEUES(mvm,
822 "Enabling TXQ #%d tids=0x%x\n",
823 queue, mvm->queue_info[queue].tid_bitmap);
824
825 return enable_queue;
826}
827
828static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
829 int queue, u16 ssn,
830 const struct iwl_trans_txq_scd_cfg *cfg,
831 unsigned int wdg_timeout)
832{
833 struct iwl_scd_txq_cfg_cmd cmd = {
834 .scd_queue = queue,
835 .action = SCD_CFG_ENABLE_QUEUE,
836 .window = cfg->frame_limit,
837 .sta_id = cfg->sta_id,
838 .ssn = cpu_to_le16(ssn),
839 .tx_fifo = cfg->fifo,
840 .aggregate = cfg->aggregate,
841 .tid = cfg->tid,
842 };
843 bool inc_ssn;
844
845 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
846 return false;
847
848
849 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
850 return false;
851
852 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
853 NULL, wdg_timeout);
854 if (inc_ssn)
855 le16_add_cpu(&cmd.ssn, 1);
856
857 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
858 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
859
860 return inc_ssn;
861}
862
863static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
864{
865 struct iwl_scd_txq_cfg_cmd cmd = {
866 .scd_queue = queue,
867 .action = SCD_CFG_UPDATE_QUEUE_TID,
868 };
869 int tid;
870 unsigned long tid_bitmap;
871 int ret;
872
873 lockdep_assert_held(&mvm->mutex);
874
875 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
876 return;
877
878 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
879
880 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
881 return;
882
883
884 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
885 cmd.tid = tid;
886 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
887
888 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
889 if (ret) {
890 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
891 queue, ret);
892 return;
893 }
894
895 mvm->queue_info[queue].txq_tid = tid;
896 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
897 queue, tid);
898}
899
900static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
901{
902 struct ieee80211_sta *sta;
903 struct iwl_mvm_sta *mvmsta;
904 u8 sta_id;
905 int tid = -1;
906 unsigned long tid_bitmap;
907 unsigned int wdg_timeout;
908 int ssn;
909 int ret = true;
910
911
912 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
913 return;
914
915 lockdep_assert_held(&mvm->mutex);
916
917 sta_id = mvm->queue_info[queue].ra_sta_id;
918 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
919
920
921 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
922 if (tid_bitmap != BIT(tid)) {
923 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
924 queue, tid_bitmap);
925 return;
926 }
927
928 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
929 tid);
930
931 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
932 lockdep_is_held(&mvm->mutex));
933
934 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
935 return;
936
937 mvmsta = iwl_mvm_sta_from_mac80211(sta);
938 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
939
940 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
941
942 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
943 tid_to_mac80211_ac[tid], ssn,
944 wdg_timeout, true,
945 iwl_mvm_txq_from_tid(sta, tid));
946 if (ret) {
947 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
948 return;
949 }
950
951
952 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
953 struct iwl_mvm_add_sta_cmd cmd = {0};
954
955 mvmsta->tid_disable_agg &= ~BIT(tid);
956
957 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
958 cmd.sta_id = mvmsta->sta_id;
959 cmd.add_modify = STA_MODE_MODIFY;
960 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
961 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
962 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
963
964 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
965 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
966 if (!ret) {
967 IWL_DEBUG_TX_QUEUES(mvm,
968 "TXQ #%d is now aggregated again\n",
969 queue);
970
971
972 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
973 }
974 }
975
976 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
977}
978
979
980
981
982
983
984
985
986static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
987 struct iwl_mvm_sta *mvmsta, int queue,
988 unsigned long tid_bitmap,
989 unsigned long *unshare_queues,
990 unsigned long *changetid_queues)
991{
992 int tid;
993
994 lockdep_assert_held(&mvmsta->lock);
995 lockdep_assert_held(&mvm->mutex);
996
997 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
998 return false;
999
1000
1001 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1002
1003 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1004 tid_bitmap &= ~BIT(tid);
1005
1006
1007 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1008 tid_bitmap &= ~BIT(tid);
1009 }
1010
1011
1012 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1013 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1014 return true;
1015 }
1016
1017
1018
1019
1020
1021 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1022 u16 tid_bitmap;
1023
1024 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1025 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1026
1027 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1041 set_bit(queue, changetid_queues);
1042
1043 IWL_DEBUG_TX_QUEUES(mvm,
1044 "Removing inactive TID %d from shared Q:%d\n",
1045 tid, queue);
1046 }
1047
1048 IWL_DEBUG_TX_QUEUES(mvm,
1049 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1050 mvm->queue_info[queue].tid_bitmap);
1051
1052
1053
1054
1055
1056 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1057
1058
1059 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1060 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1061 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1062 queue);
1063 set_bit(queue, unshare_queues);
1064 }
1065
1066 return false;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1079{
1080 unsigned long now = jiffies;
1081 unsigned long unshare_queues = 0;
1082 unsigned long changetid_queues = 0;
1083 int i, ret, free_queue = -ENOSPC;
1084 struct ieee80211_sta *queue_owner = NULL;
1085
1086 lockdep_assert_held(&mvm->mutex);
1087
1088 if (iwl_mvm_has_new_tx_api(mvm))
1089 return -ENOSPC;
1090
1091 rcu_read_lock();
1092
1093
1094 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1095
1096 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1097 struct ieee80211_sta *sta;
1098 struct iwl_mvm_sta *mvmsta;
1099 u8 sta_id;
1100 int tid;
1101 unsigned long inactive_tid_bitmap = 0;
1102 unsigned long queue_tid_bitmap;
1103
1104 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1105 if (!queue_tid_bitmap)
1106 continue;
1107
1108
1109 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1110 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1111 continue;
1112
1113
1114 for_each_set_bit(tid, &queue_tid_bitmap,
1115 IWL_MAX_TID_COUNT + 1) {
1116 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1117 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1118 continue;
1119
1120 inactive_tid_bitmap |= BIT(tid);
1121 }
1122
1123
1124 if (!inactive_tid_bitmap)
1125 continue;
1126
1127
1128
1129
1130
1131
1132 sta_id = mvm->queue_info[i].ra_sta_id;
1133 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1134
1135
1136
1137
1138
1139
1140 if (IS_ERR_OR_NULL(sta))
1141 continue;
1142
1143 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1144
1145 spin_lock_bh(&mvmsta->lock);
1146 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1147 inactive_tid_bitmap,
1148 &unshare_queues,
1149 &changetid_queues);
1150 if (ret && free_queue < 0) {
1151 queue_owner = sta;
1152 free_queue = i;
1153 }
1154
1155 spin_unlock_bh(&mvmsta->lock);
1156 }
1157
1158
1159
1160 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1161 iwl_mvm_unshare_queue(mvm, i);
1162 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1163 iwl_mvm_change_queue_tid(mvm, i);
1164
1165 rcu_read_unlock();
1166
1167 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1168 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1169 alloc_for_sta);
1170 if (ret)
1171 return ret;
1172 }
1173
1174 return free_queue;
1175}
1176
1177static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1178 struct ieee80211_sta *sta, u8 ac, int tid)
1179{
1180 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1181 struct iwl_trans_txq_scd_cfg cfg = {
1182 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1183 .sta_id = mvmsta->sta_id,
1184 .tid = tid,
1185 .frame_limit = IWL_FRAME_LIMIT,
1186 };
1187 unsigned int wdg_timeout =
1188 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1189 int queue = -1;
1190 u16 queue_tmp;
1191 unsigned long disable_agg_tids = 0;
1192 enum iwl_mvm_agg_state queue_state;
1193 bool shared_queue = false, inc_ssn;
1194 int ssn;
1195 unsigned long tfd_queue_mask;
1196 int ret;
1197
1198 lockdep_assert_held(&mvm->mutex);
1199
1200 if (iwl_mvm_has_new_tx_api(mvm))
1201 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1202
1203 spin_lock_bh(&mvmsta->lock);
1204 tfd_queue_mask = mvmsta->tfd_queue_msk;
1205 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1206 spin_unlock_bh(&mvmsta->lock);
1207
1208 if (tid == IWL_MAX_TID_COUNT) {
1209 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1210 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1211 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1212 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1213 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1214 queue);
1215
1216
1217 }
1218
1219 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1220 (mvm->queue_info[mvmsta->reserved_queue].status ==
1221 IWL_MVM_QUEUE_RESERVED)) {
1222 queue = mvmsta->reserved_queue;
1223 mvm->queue_info[queue].reserved = true;
1224 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1225 }
1226
1227 if (queue < 0)
1228 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1229 IWL_MVM_DQA_MIN_DATA_QUEUE,
1230 IWL_MVM_DQA_MAX_DATA_QUEUE);
1231 if (queue < 0) {
1232
1233 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1234 }
1235
1236
1237 if (queue <= 0) {
1238 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1239 if (queue > 0) {
1240 shared_queue = true;
1241 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1242 }
1243 }
1244
1245
1246
1247
1248
1249
1250
1251 if (queue > 0 && !shared_queue)
1252 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1253
1254
1255 if (WARN_ON(queue <= 0)) {
1256 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1257 tid, cfg.sta_id);
1258 return queue;
1259 }
1260
1261
1262
1263
1264
1265
1266
1267 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1268 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1269
1270 IWL_DEBUG_TX_QUEUES(mvm,
1271 "Allocating %squeue #%d to sta %d on tid %d\n",
1272 shared_queue ? "shared " : "", queue,
1273 mvmsta->sta_id, tid);
1274
1275 if (shared_queue) {
1276
1277 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1278
1279 if (disable_agg_tids) {
1280 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1281 queue);
1282 iwl_mvm_invalidate_sta_queue(mvm, queue,
1283 disable_agg_tids, false);
1284 }
1285 }
1286
1287 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1288
1289
1290
1291
1292
1293
1294
1295 if (shared_queue)
1296 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1297
1298 spin_lock_bh(&mvmsta->lock);
1299
1300
1301
1302
1303
1304 if (inc_ssn) {
1305 mvmsta->tid_data[tid].seq_number += 0x10;
1306 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1307 }
1308 mvmsta->tid_data[tid].txq_id = queue;
1309 mvmsta->tfd_queue_msk |= BIT(queue);
1310 queue_state = mvmsta->tid_data[tid].state;
1311
1312 if (mvmsta->reserved_queue == queue)
1313 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1314 spin_unlock_bh(&mvmsta->lock);
1315
1316 if (!shared_queue) {
1317 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1318 if (ret)
1319 goto out_err;
1320
1321
1322 if (queue_state == IWL_AGG_ON) {
1323 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1324 if (ret)
1325 goto out_err;
1326 }
1327 } else {
1328
1329 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1330 wdg_timeout, false,
1331 iwl_mvm_txq_from_tid(sta, tid));
1332 if (ret)
1333 goto out_err;
1334 }
1335
1336 return 0;
1337
1338out_err:
1339 queue_tmp = queue;
1340 iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
1341
1342 return ret;
1343}
1344
1345void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1346{
1347 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1348 add_stream_wk);
1349
1350 mutex_lock(&mvm->mutex);
1351
1352 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1353
1354 while (!list_empty(&mvm->add_stream_txqs)) {
1355 struct iwl_mvm_txq *mvmtxq;
1356 struct ieee80211_txq *txq;
1357 u8 tid;
1358
1359 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1360 struct iwl_mvm_txq, list);
1361
1362 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1363 drv_priv);
1364 tid = txq->tid;
1365 if (tid == IEEE80211_NUM_TIDS)
1366 tid = IWL_MAX_TID_COUNT;
1367
1368
1369
1370
1371
1372
1373
1374 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1375 list_del_init(&mvmtxq->list);
1376 continue;
1377 }
1378
1379 list_del_init(&mvmtxq->list);
1380 local_bh_disable();
1381 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1382 local_bh_enable();
1383 }
1384
1385 mutex_unlock(&mvm->mutex);
1386}
1387
1388static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1389 struct ieee80211_sta *sta,
1390 enum nl80211_iftype vif_type)
1391{
1392 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1393 int queue;
1394
1395
1396 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1397 return 0;
1398
1399
1400 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1401
1402
1403 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1404 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1405 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1406 IWL_MVM_QUEUE_FREE))
1407 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1408 else
1409 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1410 IWL_MVM_DQA_MIN_DATA_QUEUE,
1411 IWL_MVM_DQA_MAX_DATA_QUEUE);
1412 if (queue < 0) {
1413
1414 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1415 if (queue < 0) {
1416 IWL_ERR(mvm, "No available queues for new station\n");
1417 return -ENOSPC;
1418 }
1419 }
1420 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1421
1422 mvmsta->reserved_queue = queue;
1423
1424 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1425 queue, mvmsta->sta_id);
1426
1427 return 0;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1438 struct ieee80211_sta *sta)
1439{
1440 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1441 unsigned int wdg =
1442 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1443 int i;
1444 struct iwl_trans_txq_scd_cfg cfg = {
1445 .sta_id = mvm_sta->sta_id,
1446 .frame_limit = IWL_FRAME_LIMIT,
1447 };
1448
1449
1450 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1451 mvm->queue_info[mvm_sta->reserved_queue].status =
1452 IWL_MVM_QUEUE_RESERVED;
1453
1454 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1455 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1456 int txq_id = tid_data->txq_id;
1457 int ac;
1458
1459 if (txq_id == IWL_MVM_INVALID_QUEUE)
1460 continue;
1461
1462 ac = tid_to_mac80211_ac[i];
1463
1464 if (iwl_mvm_has_new_tx_api(mvm)) {
1465 IWL_DEBUG_TX_QUEUES(mvm,
1466 "Re-mapping sta %d tid %d\n",
1467 mvm_sta->sta_id, i);
1468 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1469 i, wdg);
1470
1471
1472
1473
1474
1475 if (txq_id < 0)
1476 txq_id = IWL_MVM_INVALID_QUEUE;
1477 tid_data->txq_id = txq_id;
1478
1479
1480
1481
1482
1483
1484
1485 tid_data->seq_number = 0;
1486 } else {
1487 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1488
1489 cfg.tid = i;
1490 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1491 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1492 txq_id ==
1493 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1494
1495 IWL_DEBUG_TX_QUEUES(mvm,
1496 "Re-mapping sta %d tid %d to queue %d\n",
1497 mvm_sta->sta_id, i, txq_id);
1498
1499 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1500 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1501 }
1502 }
1503}
1504
1505static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1506 struct iwl_mvm_int_sta *sta,
1507 const u8 *addr,
1508 u16 mac_id, u16 color)
1509{
1510 struct iwl_mvm_add_sta_cmd cmd;
1511 int ret;
1512 u32 status = ADD_STA_SUCCESS;
1513
1514 lockdep_assert_held(&mvm->mutex);
1515
1516 memset(&cmd, 0, sizeof(cmd));
1517 cmd.sta_id = sta->sta_id;
1518
1519 if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
1520 0) >= 12 &&
1521 sta->type == IWL_STA_AUX_ACTIVITY)
1522 cmd.mac_id_n_color = cpu_to_le32(mac_id);
1523 else
1524 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1525 color));
1526
1527 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1528 cmd.station_type = sta->type;
1529
1530 if (!iwl_mvm_has_new_tx_api(mvm))
1531 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1532 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1533
1534 if (addr)
1535 memcpy(cmd.addr, addr, ETH_ALEN);
1536
1537 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1538 iwl_mvm_add_sta_cmd_size(mvm),
1539 &cmd, &status);
1540 if (ret)
1541 return ret;
1542
1543 switch (status & IWL_ADD_STA_STATUS_MASK) {
1544 case ADD_STA_SUCCESS:
1545 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1546 return 0;
1547 default:
1548 ret = -EIO;
1549 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1550 status);
1551 break;
1552 }
1553 return ret;
1554}
1555
1556int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1557 struct ieee80211_vif *vif,
1558 struct ieee80211_sta *sta)
1559{
1560 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1561 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1562 struct iwl_mvm_rxq_dup_data *dup_data;
1563 int i, ret, sta_id;
1564 bool sta_update = false;
1565 unsigned int sta_flags = 0;
1566
1567 lockdep_assert_held(&mvm->mutex);
1568
1569 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1570 sta_id = iwl_mvm_find_free_sta_id(mvm,
1571 ieee80211_vif_type_p2p(vif));
1572 else
1573 sta_id = mvm_sta->sta_id;
1574
1575 if (sta_id == IWL_MVM_INVALID_STA)
1576 return -ENOSPC;
1577
1578 spin_lock_init(&mvm_sta->lock);
1579
1580
1581 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1582 struct iwl_mvm_int_sta tmp_sta = {
1583 .sta_id = sta_id,
1584 .type = mvm_sta->sta_type,
1585 };
1586
1587
1588
1589
1590
1591 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1592 mvmvif->id, mvmvif->color);
1593 if (ret)
1594 goto err;
1595
1596 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1597 sta_update = true;
1598 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1599 goto update_fw;
1600 }
1601
1602 mvm_sta->sta_id = sta_id;
1603 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1604 mvmvif->color);
1605 mvm_sta->vif = vif;
1606 if (!mvm->trans->trans_cfg->gen2)
1607 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1608 else
1609 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1610 mvm_sta->tx_protection = 0;
1611 mvm_sta->tt_tx_protection = false;
1612 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1613
1614
1615 mvm_sta->tid_disable_agg = 0xffff;
1616 mvm_sta->tfd_queue_msk = 0;
1617
1618
1619 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1620 u16 seq = mvm_sta->tid_data[i].seq_number;
1621 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1622 mvm_sta->tid_data[i].seq_number = seq;
1623
1624
1625
1626
1627
1628 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1629 }
1630
1631 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1632 struct iwl_mvm_txq *mvmtxq =
1633 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1634
1635 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1636 INIT_LIST_HEAD(&mvmtxq->list);
1637 atomic_set(&mvmtxq->tx_request, 0);
1638 }
1639
1640 mvm_sta->agg_tids = 0;
1641
1642 if (iwl_mvm_has_new_rx_api(mvm) &&
1643 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1644 int q;
1645
1646 dup_data = kcalloc(mvm->trans->num_rx_queues,
1647 sizeof(*dup_data), GFP_KERNEL);
1648 if (!dup_data)
1649 return -ENOMEM;
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1660 memset(dup_data[q].last_seq, 0xff,
1661 sizeof(dup_data[q].last_seq));
1662 mvm_sta->dup_data = dup_data;
1663 }
1664
1665 if (!iwl_mvm_has_new_tx_api(mvm)) {
1666 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1667 ieee80211_vif_type_p2p(vif));
1668 if (ret)
1669 goto err;
1670 }
1671
1672
1673
1674
1675
1676 if (iwl_mvm_has_tlc_offload(mvm))
1677 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1678 else
1679 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1680
1681 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1682
1683update_fw:
1684 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1685 if (ret)
1686 goto err;
1687
1688 if (vif->type == NL80211_IFTYPE_STATION) {
1689 if (!sta->tdls) {
1690 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1691 mvmvif->ap_sta_id = sta_id;
1692 } else {
1693 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1694 }
1695 }
1696
1697 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1698
1699 return 0;
1700
1701err:
1702 return ret;
1703}
1704
1705int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1706 bool drain)
1707{
1708 struct iwl_mvm_add_sta_cmd cmd = {};
1709 int ret;
1710 u32 status;
1711
1712 lockdep_assert_held(&mvm->mutex);
1713
1714 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1715 cmd.sta_id = mvmsta->sta_id;
1716 cmd.add_modify = STA_MODE_MODIFY;
1717 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1718 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1719
1720 status = ADD_STA_SUCCESS;
1721 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1722 iwl_mvm_add_sta_cmd_size(mvm),
1723 &cmd, &status);
1724 if (ret)
1725 return ret;
1726
1727 switch (status & IWL_ADD_STA_STATUS_MASK) {
1728 case ADD_STA_SUCCESS:
1729 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1730 mvmsta->sta_id);
1731 break;
1732 default:
1733 ret = -EIO;
1734 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1735 mvmsta->sta_id);
1736 break;
1737 }
1738
1739 return ret;
1740}
1741
1742
1743
1744
1745
1746
1747static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1748{
1749 struct ieee80211_sta *sta;
1750 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1751 .sta_id = sta_id,
1752 };
1753 int ret;
1754
1755 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1756 lockdep_is_held(&mvm->mutex));
1757
1758
1759 if (!sta) {
1760 IWL_ERR(mvm, "Invalid station id\n");
1761 return -EINVAL;
1762 }
1763
1764 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1765 sizeof(rm_sta_cmd), &rm_sta_cmd);
1766 if (ret) {
1767 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1768 return ret;
1769 }
1770
1771 return 0;
1772}
1773
1774static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1775 struct ieee80211_vif *vif,
1776 struct ieee80211_sta *sta)
1777{
1778 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1779 int i;
1780
1781 lockdep_assert_held(&mvm->mutex);
1782
1783 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1784 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1785 continue;
1786
1787 iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
1788 0);
1789 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1790 }
1791
1792 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1793 struct iwl_mvm_txq *mvmtxq =
1794 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1795
1796 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1797 }
1798}
1799
1800int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1801 struct iwl_mvm_sta *mvm_sta)
1802{
1803 int i;
1804
1805 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1806 u16 txq_id;
1807 int ret;
1808
1809 spin_lock_bh(&mvm_sta->lock);
1810 txq_id = mvm_sta->tid_data[i].txq_id;
1811 spin_unlock_bh(&mvm_sta->lock);
1812
1813 if (txq_id == IWL_MVM_INVALID_QUEUE)
1814 continue;
1815
1816 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1817 if (ret)
1818 return ret;
1819 }
1820
1821 return 0;
1822}
1823
1824int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1825 struct ieee80211_vif *vif,
1826 struct ieee80211_sta *sta)
1827{
1828 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1829 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1830 u8 sta_id = mvm_sta->sta_id;
1831 int ret;
1832
1833 lockdep_assert_held(&mvm->mutex);
1834
1835 if (iwl_mvm_has_new_rx_api(mvm))
1836 kfree(mvm_sta->dup_data);
1837
1838 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1839 if (ret)
1840 return ret;
1841
1842
1843 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1844 if (ret)
1845 return ret;
1846 if (iwl_mvm_has_new_tx_api(mvm)) {
1847 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1848 } else {
1849 u32 q_mask = mvm_sta->tfd_queue_msk;
1850
1851 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1852 q_mask);
1853 }
1854 if (ret)
1855 return ret;
1856
1857 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1858
1859 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1860
1861
1862 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1863 u8 reserved_txq = mvm_sta->reserved_queue;
1864 enum iwl_mvm_queue_status *status;
1865
1866
1867
1868
1869
1870
1871 status = &mvm->queue_info[reserved_txq].status;
1872 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1873 (*status != IWL_MVM_QUEUE_FREE),
1874 "sta_id %d reserved txq %d status %d",
1875 sta_id, reserved_txq, *status))
1876 return -EINVAL;
1877
1878 *status = IWL_MVM_QUEUE_FREE;
1879 }
1880
1881 if (vif->type == NL80211_IFTYPE_STATION &&
1882 mvmvif->ap_sta_id == sta_id) {
1883
1884 if (vif->bss_conf.assoc)
1885 return ret;
1886
1887
1888 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1889 }
1890
1891
1892
1893
1894
1895 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1896 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1897 cancel_delayed_work(&mvm->tdls_cs.dwork);
1898 }
1899
1900
1901
1902
1903
1904 spin_lock_bh(&mvm_sta->lock);
1905 spin_unlock_bh(&mvm_sta->lock);
1906
1907 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1908 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1909
1910 return ret;
1911}
1912
1913int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1914 struct ieee80211_vif *vif,
1915 u8 sta_id)
1916{
1917 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1918
1919 lockdep_assert_held(&mvm->mutex);
1920
1921 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1922 return ret;
1923}
1924
1925int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1926 struct iwl_mvm_int_sta *sta,
1927 u32 qmask, enum nl80211_iftype iftype,
1928 enum iwl_sta_type type)
1929{
1930 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1931 sta->sta_id == IWL_MVM_INVALID_STA) {
1932 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1933 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1934 return -ENOSPC;
1935 }
1936
1937 sta->tfd_queue_msk = qmask;
1938 sta->type = type;
1939
1940
1941 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1942 return 0;
1943}
1944
1945void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1946{
1947 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1948 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1949 sta->sta_id = IWL_MVM_INVALID_STA;
1950}
1951
1952static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
1953 u8 sta_id, u8 fifo)
1954{
1955 unsigned int wdg_timeout =
1956 mvm->trans->trans_cfg->base_params->wd_timeout;
1957 struct iwl_trans_txq_scd_cfg cfg = {
1958 .fifo = fifo,
1959 .sta_id = sta_id,
1960 .tid = IWL_MAX_TID_COUNT,
1961 .aggregate = false,
1962 .frame_limit = IWL_FRAME_LIMIT,
1963 };
1964
1965 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1966
1967 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
1968}
1969
1970static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
1971{
1972 unsigned int wdg_timeout =
1973 mvm->trans->trans_cfg->base_params->wd_timeout;
1974
1975 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
1976
1977 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
1978 wdg_timeout);
1979}
1980
1981static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
1982 int maccolor, u8 *addr,
1983 struct iwl_mvm_int_sta *sta,
1984 u16 *queue, int fifo)
1985{
1986 int ret;
1987
1988
1989 if (!iwl_mvm_has_new_tx_api(mvm))
1990 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
1991
1992 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
1993 if (ret) {
1994 if (!iwl_mvm_has_new_tx_api(mvm))
1995 iwl_mvm_disable_txq(mvm, NULL, queue,
1996 IWL_MAX_TID_COUNT, 0);
1997 return ret;
1998 }
1999
2000
2001
2002
2003
2004 if (iwl_mvm_has_new_tx_api(mvm)) {
2005 int txq;
2006
2007 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2008 if (txq < 0) {
2009 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2010 return txq;
2011 }
2012
2013 *queue = txq;
2014 }
2015
2016 return 0;
2017}
2018
2019int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2020{
2021 int ret;
2022
2023 lockdep_assert_held(&mvm->mutex);
2024
2025
2026 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2027 NL80211_IFTYPE_UNSPECIFIED,
2028 IWL_STA_AUX_ACTIVITY);
2029 if (ret)
2030 return ret;
2031
2032
2033
2034
2035
2036 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2037 &mvm->aux_sta, &mvm->aux_queue,
2038 IWL_MVM_TX_FIFO_MCAST);
2039 if (ret) {
2040 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2041 return ret;
2042 }
2043
2044 return 0;
2045}
2046
2047int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2048{
2049 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2050
2051 lockdep_assert_held(&mvm->mutex);
2052
2053 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2054 NULL, &mvm->snif_sta,
2055 &mvm->snif_queue,
2056 IWL_MVM_TX_FIFO_BE);
2057}
2058
2059int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2060{
2061 int ret;
2062
2063 lockdep_assert_held(&mvm->mutex);
2064
2065 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2066 return -EINVAL;
2067
2068 iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2069 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2070 if (ret)
2071 IWL_WARN(mvm, "Failed sending remove station\n");
2072
2073 return ret;
2074}
2075
2076int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2077{
2078 int ret;
2079
2080 lockdep_assert_held(&mvm->mutex);
2081
2082 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2083 return -EINVAL;
2084
2085 iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
2086 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2087 if (ret)
2088 IWL_WARN(mvm, "Failed sending remove station\n");
2089 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2090
2091 return ret;
2092}
2093
2094void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2095{
2096 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2108{
2109 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2110 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2111 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2112 const u8 *baddr = _baddr;
2113 int queue;
2114 int ret;
2115 unsigned int wdg_timeout =
2116 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2117 struct iwl_trans_txq_scd_cfg cfg = {
2118 .fifo = IWL_MVM_TX_FIFO_VO,
2119 .sta_id = mvmvif->bcast_sta.sta_id,
2120 .tid = IWL_MAX_TID_COUNT,
2121 .aggregate = false,
2122 .frame_limit = IWL_FRAME_LIMIT,
2123 };
2124
2125 lockdep_assert_held(&mvm->mutex);
2126
2127 if (!iwl_mvm_has_new_tx_api(mvm)) {
2128 if (vif->type == NL80211_IFTYPE_AP ||
2129 vif->type == NL80211_IFTYPE_ADHOC) {
2130 queue = mvm->probe_queue;
2131 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2132 queue = mvm->p2p_dev_queue;
2133 } else {
2134 WARN(1, "Missing required TXQ for adding bcast STA\n");
2135 return -EINVAL;
2136 }
2137
2138 bsta->tfd_queue_msk |= BIT(queue);
2139
2140 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2141 }
2142
2143 if (vif->type == NL80211_IFTYPE_ADHOC)
2144 baddr = vif->bss_conf.bssid;
2145
2146 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2147 return -ENOSPC;
2148
2149 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2150 mvmvif->id, mvmvif->color);
2151 if (ret)
2152 return ret;
2153
2154
2155
2156
2157
2158 if (iwl_mvm_has_new_tx_api(mvm)) {
2159 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2160 IWL_MAX_TID_COUNT,
2161 wdg_timeout);
2162 if (queue < 0) {
2163 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2164 return queue;
2165 }
2166
2167 if (vif->type == NL80211_IFTYPE_AP ||
2168 vif->type == NL80211_IFTYPE_ADHOC)
2169 mvm->probe_queue = queue;
2170 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2171 mvm->p2p_dev_queue = queue;
2172 }
2173
2174 return 0;
2175}
2176
2177static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2178 struct ieee80211_vif *vif)
2179{
2180 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2181 u16 *queueptr, queue;
2182
2183 lockdep_assert_held(&mvm->mutex);
2184
2185 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2186
2187 switch (vif->type) {
2188 case NL80211_IFTYPE_AP:
2189 case NL80211_IFTYPE_ADHOC:
2190 queueptr = &mvm->probe_queue;
2191 break;
2192 case NL80211_IFTYPE_P2P_DEVICE:
2193 queueptr = &mvm->p2p_dev_queue;
2194 break;
2195 default:
2196 WARN(1, "Can't free bcast queue on vif type %d\n",
2197 vif->type);
2198 return;
2199 }
2200
2201 queue = *queueptr;
2202 iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
2203 if (iwl_mvm_has_new_tx_api(mvm))
2204 return;
2205
2206 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2207 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2208}
2209
2210
2211
2212int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2213{
2214 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2215 int ret;
2216
2217 lockdep_assert_held(&mvm->mutex);
2218
2219 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2220
2221 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2222 if (ret)
2223 IWL_WARN(mvm, "Failed sending remove station\n");
2224 return ret;
2225}
2226
2227int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2228{
2229 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2230
2231 lockdep_assert_held(&mvm->mutex);
2232
2233 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2234 ieee80211_vif_type_p2p(vif),
2235 IWL_STA_GENERAL_PURPOSE);
2236}
2237
2238
2239
2240
2241
2242
2243
2244
2245int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2246{
2247 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2248 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2249 int ret;
2250
2251 lockdep_assert_held(&mvm->mutex);
2252
2253 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2254 if (ret)
2255 return ret;
2256
2257 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2258
2259 if (ret)
2260 iwl_mvm_dealloc_int_sta(mvm, bsta);
2261
2262 return ret;
2263}
2264
2265void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2266{
2267 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2268
2269 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2270}
2271
2272
2273
2274
2275
2276int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2277{
2278 int ret;
2279
2280 lockdep_assert_held(&mvm->mutex);
2281
2282 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2283
2284 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2285
2286 return ret;
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2298{
2299 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2300 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2301 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2302 const u8 *maddr = _maddr;
2303 struct iwl_trans_txq_scd_cfg cfg = {
2304 .fifo = vif->type == NL80211_IFTYPE_AP ?
2305 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2306 .sta_id = msta->sta_id,
2307 .tid = 0,
2308 .aggregate = false,
2309 .frame_limit = IWL_FRAME_LIMIT,
2310 };
2311 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2312 int ret;
2313
2314 lockdep_assert_held(&mvm->mutex);
2315
2316 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2317 vif->type != NL80211_IFTYPE_ADHOC))
2318 return -ENOTSUPP;
2319
2320
2321
2322
2323
2324
2325
2326 if (vif->type == NL80211_IFTYPE_ADHOC)
2327 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2328
2329
2330
2331
2332
2333 if (!iwl_mvm_has_new_tx_api(mvm) &&
2334 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2335 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2336 timeout);
2337 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2338 }
2339 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2340 mvmvif->id, mvmvif->color);
2341 if (ret)
2342 goto err;
2343
2344
2345
2346
2347
2348
2349
2350
2351 if (iwl_mvm_has_new_tx_api(mvm)) {
2352 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2353 0,
2354 timeout);
2355 if (queue < 0) {
2356 ret = queue;
2357 goto err;
2358 }
2359 mvmvif->cab_queue = queue;
2360 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2361 IWL_UCODE_TLV_API_STA_TYPE))
2362 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2363 timeout);
2364
2365 return 0;
2366err:
2367 iwl_mvm_dealloc_int_sta(mvm, msta);
2368 return ret;
2369}
2370
2371static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2372 struct ieee80211_key_conf *keyconf,
2373 bool mcast)
2374{
2375 union {
2376 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2377 struct iwl_mvm_add_sta_key_cmd cmd;
2378 } u = {};
2379 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2380 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2381 __le16 key_flags;
2382 int ret, size;
2383 u32 status;
2384
2385
2386 if (sta_id == IWL_MVM_INVALID_STA)
2387 return 0;
2388
2389 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2390 STA_KEY_FLG_KEYID_MSK);
2391 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2392 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2393
2394 if (mcast)
2395 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2396
2397
2398
2399
2400
2401 u.cmd.common.key_flags = key_flags;
2402 u.cmd.common.key_offset = keyconf->hw_key_idx;
2403 u.cmd.common.sta_id = sta_id;
2404
2405 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2406
2407 status = ADD_STA_SUCCESS;
2408 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2409 &status);
2410
2411 switch (status) {
2412 case ADD_STA_SUCCESS:
2413 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2414 break;
2415 default:
2416 ret = -EIO;
2417 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2418 break;
2419 }
2420
2421 return ret;
2422}
2423
2424
2425
2426
2427
2428int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2429{
2430 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2431 int ret;
2432
2433 lockdep_assert_held(&mvm->mutex);
2434
2435 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2436
2437 iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
2438
2439 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2440 if (ret)
2441 IWL_WARN(mvm, "Failed sending remove station\n");
2442
2443 return ret;
2444}
2445
2446#define IWL_MAX_RX_BA_SESSIONS 16
2447
2448static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2449{
2450 struct iwl_mvm_delba_data notif = {
2451 .baid = baid,
2452 };
2453
2454 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2455 ¬if, sizeof(notif));
2456};
2457
2458static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2459 struct iwl_mvm_baid_data *data)
2460{
2461 int i;
2462
2463 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2464
2465 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2466 int j;
2467 struct iwl_mvm_reorder_buffer *reorder_buf =
2468 &data->reorder_buf[i];
2469 struct iwl_mvm_reorder_buf_entry *entries =
2470 &data->entries[i * data->entries_per_queue];
2471
2472 spin_lock_bh(&reorder_buf->lock);
2473 if (likely(!reorder_buf->num_stored)) {
2474 spin_unlock_bh(&reorder_buf->lock);
2475 continue;
2476 }
2477
2478
2479
2480
2481
2482
2483 WARN_ON(1);
2484
2485 for (j = 0; j < reorder_buf->buf_size; j++)
2486 __skb_queue_purge(&entries[j].e.frames);
2487
2488
2489
2490
2491
2492
2493
2494
2495 reorder_buf->removed = true;
2496 spin_unlock_bh(&reorder_buf->lock);
2497 del_timer_sync(&reorder_buf->reorder_timer);
2498 }
2499}
2500
2501static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2502 struct iwl_mvm_baid_data *data,
2503 u16 ssn, u16 buf_size)
2504{
2505 int i;
2506
2507 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2508 struct iwl_mvm_reorder_buffer *reorder_buf =
2509 &data->reorder_buf[i];
2510 struct iwl_mvm_reorder_buf_entry *entries =
2511 &data->entries[i * data->entries_per_queue];
2512 int j;
2513
2514 reorder_buf->num_stored = 0;
2515 reorder_buf->head_sn = ssn;
2516 reorder_buf->buf_size = buf_size;
2517
2518 timer_setup(&reorder_buf->reorder_timer,
2519 iwl_mvm_reorder_timer_expired, 0);
2520 spin_lock_init(&reorder_buf->lock);
2521 reorder_buf->mvm = mvm;
2522 reorder_buf->queue = i;
2523 reorder_buf->valid = false;
2524 for (j = 0; j < reorder_buf->buf_size; j++)
2525 __skb_queue_head_init(&entries[j].e.frames);
2526 }
2527}
2528
2529int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2530 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2531{
2532 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2533 struct iwl_mvm_add_sta_cmd cmd = {};
2534 struct iwl_mvm_baid_data *baid_data = NULL;
2535 int ret;
2536 u32 status;
2537
2538 lockdep_assert_held(&mvm->mutex);
2539
2540 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2541 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2542 return -ENOSPC;
2543 }
2544
2545 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2546 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2547
2548
2549#ifndef __CHECKER__
2550
2551
2552
2553
2554
2555
2556 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2557 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2558#endif
2559
2560
2561
2562
2563
2564
2565 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2566
2567
2568
2569
2570
2571 baid_data = kzalloc(sizeof(*baid_data) +
2572 mvm->trans->num_rx_queues *
2573 reorder_buf_size,
2574 GFP_KERNEL);
2575 if (!baid_data)
2576 return -ENOMEM;
2577
2578
2579
2580
2581
2582 baid_data->entries_per_queue =
2583 reorder_buf_size / sizeof(baid_data->entries[0]);
2584 }
2585
2586 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2587 cmd.sta_id = mvm_sta->sta_id;
2588 cmd.add_modify = STA_MODE_MODIFY;
2589 if (start) {
2590 cmd.add_immediate_ba_tid = (u8) tid;
2591 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2592 cmd.rx_ba_window = cpu_to_le16(buf_size);
2593 } else {
2594 cmd.remove_immediate_ba_tid = (u8) tid;
2595 }
2596 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2597 STA_MODIFY_REMOVE_BA_TID;
2598
2599 status = ADD_STA_SUCCESS;
2600 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2601 iwl_mvm_add_sta_cmd_size(mvm),
2602 &cmd, &status);
2603 if (ret)
2604 goto out_free;
2605
2606 switch (status & IWL_ADD_STA_STATUS_MASK) {
2607 case ADD_STA_SUCCESS:
2608 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2609 start ? "start" : "stopp");
2610 break;
2611 case ADD_STA_IMMEDIATE_BA_FAILURE:
2612 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2613 ret = -ENOSPC;
2614 break;
2615 default:
2616 ret = -EIO;
2617 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2618 start ? "start" : "stopp", status);
2619 break;
2620 }
2621
2622 if (ret)
2623 goto out_free;
2624
2625 if (start) {
2626 u8 baid;
2627
2628 mvm->rx_ba_sessions++;
2629
2630 if (!iwl_mvm_has_new_rx_api(mvm))
2631 return 0;
2632
2633 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2634 ret = -EINVAL;
2635 goto out_free;
2636 }
2637 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2638 IWL_ADD_STA_BAID_SHIFT);
2639 baid_data->baid = baid;
2640 baid_data->timeout = timeout;
2641 baid_data->last_rx = jiffies;
2642 baid_data->rcu_ptr = &mvm->baid_map[baid];
2643 timer_setup(&baid_data->session_timer,
2644 iwl_mvm_rx_agg_session_expired, 0);
2645 baid_data->mvm = mvm;
2646 baid_data->tid = tid;
2647 baid_data->sta_id = mvm_sta->sta_id;
2648
2649 mvm_sta->tid_to_baid[tid] = baid;
2650 if (timeout)
2651 mod_timer(&baid_data->session_timer,
2652 TU_TO_EXP_TIME(timeout * 2));
2653
2654 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2655
2656
2657
2658
2659
2660
2661 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2662 mvm_sta->sta_id, tid, baid);
2663 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2664 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2665 } else {
2666 u8 baid = mvm_sta->tid_to_baid[tid];
2667
2668 if (mvm->rx_ba_sessions > 0)
2669
2670 mvm->rx_ba_sessions--;
2671 if (!iwl_mvm_has_new_rx_api(mvm))
2672 return 0;
2673
2674 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2675 return -EINVAL;
2676
2677 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2678 if (WARN_ON(!baid_data))
2679 return -EINVAL;
2680
2681
2682 iwl_mvm_free_reorder(mvm, baid_data);
2683 del_timer_sync(&baid_data->session_timer);
2684 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2685 kfree_rcu(baid_data, rcu_head);
2686 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2687
2688
2689
2690
2691
2692
2693
2694
2695 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
2696 true, NULL, 0);
2697 }
2698 return 0;
2699
2700out_free:
2701 kfree(baid_data);
2702 return ret;
2703}
2704
2705int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2706 int tid, u8 queue, bool start)
2707{
2708 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2709 struct iwl_mvm_add_sta_cmd cmd = {};
2710 int ret;
2711 u32 status;
2712
2713 lockdep_assert_held(&mvm->mutex);
2714
2715 if (start) {
2716 mvm_sta->tfd_queue_msk |= BIT(queue);
2717 mvm_sta->tid_disable_agg &= ~BIT(tid);
2718 } else {
2719
2720 mvm_sta->tid_disable_agg |= BIT(tid);
2721 }
2722
2723 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2724 cmd.sta_id = mvm_sta->sta_id;
2725 cmd.add_modify = STA_MODE_MODIFY;
2726 if (!iwl_mvm_has_new_tx_api(mvm))
2727 cmd.modify_mask = STA_MODIFY_QUEUES;
2728 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2729 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2730 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2731
2732 status = ADD_STA_SUCCESS;
2733 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2734 iwl_mvm_add_sta_cmd_size(mvm),
2735 &cmd, &status);
2736 if (ret)
2737 return ret;
2738
2739 switch (status & IWL_ADD_STA_STATUS_MASK) {
2740 case ADD_STA_SUCCESS:
2741 break;
2742 default:
2743 ret = -EIO;
2744 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2745 start ? "start" : "stopp", status);
2746 break;
2747 }
2748
2749 return ret;
2750}
2751
2752const u8 tid_to_mac80211_ac[] = {
2753 IEEE80211_AC_BE,
2754 IEEE80211_AC_BK,
2755 IEEE80211_AC_BK,
2756 IEEE80211_AC_BE,
2757 IEEE80211_AC_VI,
2758 IEEE80211_AC_VI,
2759 IEEE80211_AC_VO,
2760 IEEE80211_AC_VO,
2761 IEEE80211_AC_VO,
2762};
2763
2764static const u8 tid_to_ucode_ac[] = {
2765 AC_BE,
2766 AC_BK,
2767 AC_BK,
2768 AC_BE,
2769 AC_VI,
2770 AC_VI,
2771 AC_VO,
2772 AC_VO,
2773};
2774
2775int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2776 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2777{
2778 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2779 struct iwl_mvm_tid_data *tid_data;
2780 u16 normalized_ssn;
2781 u16 txq_id;
2782 int ret;
2783
2784 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2785 return -EINVAL;
2786
2787 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2788 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2789 IWL_ERR(mvm,
2790 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2791 mvmsta->tid_data[tid].state);
2792 return -ENXIO;
2793 }
2794
2795 lockdep_assert_held(&mvm->mutex);
2796
2797 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2798 iwl_mvm_has_new_tx_api(mvm)) {
2799 u8 ac = tid_to_mac80211_ac[tid];
2800
2801 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2802 if (ret)
2803 return ret;
2804 }
2805
2806 spin_lock_bh(&mvmsta->lock);
2807
2808
2809
2810
2811
2812
2813
2814 txq_id = mvmsta->tid_data[tid].txq_id;
2815 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2816 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2817 IWL_MVM_DQA_MIN_DATA_QUEUE,
2818 IWL_MVM_DQA_MAX_DATA_QUEUE);
2819 if (ret < 0) {
2820 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2821 goto out;
2822 }
2823
2824 txq_id = ret;
2825
2826
2827 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2828 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2829 ret = -ENXIO;
2830 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2831 tid, IWL_MAX_HW_QUEUES - 1);
2832 goto out;
2833
2834 } else if (unlikely(mvm->queue_info[txq_id].status ==
2835 IWL_MVM_QUEUE_SHARED)) {
2836 ret = -ENXIO;
2837 IWL_DEBUG_TX_QUEUES(mvm,
2838 "Can't start tid %d agg on shared queue!\n",
2839 tid);
2840 goto out;
2841 }
2842
2843 IWL_DEBUG_TX_QUEUES(mvm,
2844 "AGG for tid %d will be on queue #%d\n",
2845 tid, txq_id);
2846
2847 tid_data = &mvmsta->tid_data[tid];
2848 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2849 tid_data->txq_id = txq_id;
2850 *ssn = tid_data->ssn;
2851
2852 IWL_DEBUG_TX_QUEUES(mvm,
2853 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2854 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2855 tid_data->next_reclaimed);
2856
2857
2858
2859
2860
2861 normalized_ssn = tid_data->ssn;
2862 if (mvm->trans->trans_cfg->gen2)
2863 normalized_ssn &= 0xff;
2864
2865 if (normalized_ssn == tid_data->next_reclaimed) {
2866 tid_data->state = IWL_AGG_STARTING;
2867 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
2868 } else {
2869 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2870 ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
2871 }
2872
2873out:
2874 spin_unlock_bh(&mvmsta->lock);
2875
2876 return ret;
2877}
2878
2879int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2880 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2881 bool amsdu)
2882{
2883 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2884 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2885 unsigned int wdg_timeout =
2886 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2887 int queue, ret;
2888 bool alloc_queue = true;
2889 enum iwl_mvm_queue_status queue_status;
2890 u16 ssn;
2891
2892 struct iwl_trans_txq_scd_cfg cfg = {
2893 .sta_id = mvmsta->sta_id,
2894 .tid = tid,
2895 .frame_limit = buf_size,
2896 .aggregate = true,
2897 };
2898
2899
2900
2901
2902
2903 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2904 return -EINVAL;
2905
2906 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2907 != IWL_MAX_TID_COUNT);
2908
2909 spin_lock_bh(&mvmsta->lock);
2910 ssn = tid_data->ssn;
2911 queue = tid_data->txq_id;
2912 tid_data->state = IWL_AGG_ON;
2913 mvmsta->agg_tids |= BIT(tid);
2914 tid_data->ssn = 0xffff;
2915 tid_data->amsdu_in_ampdu_allowed = amsdu;
2916 spin_unlock_bh(&mvmsta->lock);
2917
2918 if (iwl_mvm_has_new_tx_api(mvm)) {
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930 if (buf_size < IWL_FRAME_LIMIT)
2931 return -ENOTSUPP;
2932
2933 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2934 if (ret)
2935 return -EIO;
2936 goto out;
2937 }
2938
2939 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2940
2941 queue_status = mvm->queue_info[queue].status;
2942
2943
2944 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2945 alloc_queue = false;
2946
2947
2948
2949
2950
2951 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2952
2953
2954
2955
2956 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2957 BIT(queue));
2958 if (ret) {
2959 IWL_ERR(mvm,
2960 "Error draining queue before reconfig\n");
2961 return ret;
2962 }
2963
2964 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2965 mvmsta->sta_id, tid,
2966 buf_size, ssn);
2967 if (ret) {
2968 IWL_ERR(mvm,
2969 "Error reconfiguring TXQ #%d\n", queue);
2970 return ret;
2971 }
2972 }
2973
2974 if (alloc_queue)
2975 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2976 &cfg, wdg_timeout);
2977
2978
2979 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2980 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2981 if (ret)
2982 return -EIO;
2983 }
2984
2985
2986 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2987
2988out:
2989
2990
2991
2992
2993
2994
2995
2996 mvmsta->max_agg_bufsize =
2997 min(mvmsta->max_agg_bufsize, buf_size);
2998 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2999
3000 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3001 sta->addr, tid);
3002
3003 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3004}
3005
3006static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3007 struct iwl_mvm_sta *mvmsta,
3008 struct iwl_mvm_tid_data *tid_data)
3009{
3010 u16 txq_id = tid_data->txq_id;
3011
3012 lockdep_assert_held(&mvm->mutex);
3013
3014 if (iwl_mvm_has_new_tx_api(mvm))
3015 return;
3016
3017
3018
3019
3020
3021
3022
3023
3024 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3025 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3026 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3027 }
3028}
3029
3030int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3031 struct ieee80211_sta *sta, u16 tid)
3032{
3033 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3034 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3035 u16 txq_id;
3036 int err;
3037
3038
3039
3040
3041
3042 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3043 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3044 return 0;
3045 }
3046
3047 spin_lock_bh(&mvmsta->lock);
3048
3049 txq_id = tid_data->txq_id;
3050
3051 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3052 mvmsta->sta_id, tid, txq_id, tid_data->state);
3053
3054 mvmsta->agg_tids &= ~BIT(tid);
3055
3056 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3057
3058 switch (tid_data->state) {
3059 case IWL_AGG_ON:
3060 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3061
3062 IWL_DEBUG_TX_QUEUES(mvm,
3063 "ssn = %d, next_recl = %d\n",
3064 tid_data->ssn, tid_data->next_reclaimed);
3065
3066 tid_data->ssn = 0xffff;
3067 tid_data->state = IWL_AGG_OFF;
3068 spin_unlock_bh(&mvmsta->lock);
3069
3070 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3071
3072 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3073 return 0;
3074 case IWL_AGG_STARTING:
3075 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3076
3077
3078
3079
3080
3081
3082 lockdep_assert_held(&mvm->mutex);
3083
3084 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3085 tid_data->state = IWL_AGG_OFF;
3086 err = 0;
3087 break;
3088 default:
3089 IWL_ERR(mvm,
3090 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3091 mvmsta->sta_id, tid, tid_data->state);
3092 IWL_ERR(mvm,
3093 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3094 err = -EINVAL;
3095 }
3096
3097 spin_unlock_bh(&mvmsta->lock);
3098
3099 return err;
3100}
3101
3102int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3103 struct ieee80211_sta *sta, u16 tid)
3104{
3105 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3106 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3107 u16 txq_id;
3108 enum iwl_mvm_agg_state old_state;
3109
3110
3111
3112
3113
3114 spin_lock_bh(&mvmsta->lock);
3115 txq_id = tid_data->txq_id;
3116 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3117 mvmsta->sta_id, tid, txq_id, tid_data->state);
3118 old_state = tid_data->state;
3119 tid_data->state = IWL_AGG_OFF;
3120 mvmsta->agg_tids &= ~BIT(tid);
3121 spin_unlock_bh(&mvmsta->lock);
3122
3123 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3124
3125 if (old_state >= IWL_AGG_ON) {
3126 iwl_mvm_drain_sta(mvm, mvmsta, true);
3127
3128 if (iwl_mvm_has_new_tx_api(mvm)) {
3129 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3130 BIT(tid)))
3131 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3132 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3133 } else {
3134 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3135 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3136 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3137 }
3138
3139 iwl_mvm_drain_sta(mvm, mvmsta, false);
3140
3141 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3142 }
3143
3144 return 0;
3145}
3146
3147static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3148{
3149 int i, max = -1, max_offs = -1;
3150
3151 lockdep_assert_held(&mvm->mutex);
3152
3153
3154
3155
3156
3157
3158
3159 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3160 if (test_bit(i, mvm->fw_key_table))
3161 continue;
3162 if (mvm->fw_key_deleted[i] > max) {
3163 max = mvm->fw_key_deleted[i];
3164 max_offs = i;
3165 }
3166 }
3167
3168 if (max_offs < 0)
3169 return STA_KEY_IDX_INVALID;
3170
3171 return max_offs;
3172}
3173
3174static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3175 struct ieee80211_vif *vif,
3176 struct ieee80211_sta *sta)
3177{
3178 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3179
3180 if (sta)
3181 return iwl_mvm_sta_from_mac80211(sta);
3182
3183
3184
3185
3186
3187
3188 if (vif->type == NL80211_IFTYPE_STATION &&
3189 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3190 u8 sta_id = mvmvif->ap_sta_id;
3191
3192 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3193 lockdep_is_held(&mvm->mutex));
3194
3195
3196
3197
3198
3199
3200 if (IS_ERR_OR_NULL(sta))
3201 return NULL;
3202
3203 return iwl_mvm_sta_from_mac80211(sta);
3204 }
3205
3206 return NULL;
3207}
3208
3209static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3210{
3211 int i;
3212
3213 for (i = len - 1; i >= 0; i--) {
3214 if (pn1[i] > pn2[i])
3215 return 1;
3216 if (pn1[i] < pn2[i])
3217 return -1;
3218 }
3219
3220 return 0;
3221}
3222
3223static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3224 u32 sta_id,
3225 struct ieee80211_key_conf *key, bool mcast,
3226 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3227 u8 key_offset, bool mfp)
3228{
3229 union {
3230 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3231 struct iwl_mvm_add_sta_key_cmd cmd;
3232 } u = {};
3233 __le16 key_flags;
3234 int ret;
3235 u32 status;
3236 u16 keyidx;
3237 u64 pn = 0;
3238 int i, size;
3239 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3240 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3241 int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
3242 ADD_STA_KEY,
3243 new_api ? 2 : 1);
3244
3245 if (sta_id == IWL_MVM_INVALID_STA)
3246 return -EINVAL;
3247
3248 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3249 STA_KEY_FLG_KEYID_MSK;
3250 key_flags = cpu_to_le16(keyidx);
3251 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3252
3253 switch (key->cipher) {
3254 case WLAN_CIPHER_SUITE_TKIP:
3255 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3256 if (api_ver >= 2) {
3257 memcpy((void *)&u.cmd.tx_mic_key,
3258 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3259 IWL_MIC_KEY_SIZE);
3260
3261 memcpy((void *)&u.cmd.rx_mic_key,
3262 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3263 IWL_MIC_KEY_SIZE);
3264 pn = atomic64_read(&key->tx_pn);
3265
3266 } else {
3267 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3268 for (i = 0; i < 5; i++)
3269 u.cmd_v1.tkip_rx_ttak[i] =
3270 cpu_to_le16(tkip_p1k[i]);
3271 }
3272 memcpy(u.cmd.common.key, key->key, key->keylen);
3273 break;
3274 case WLAN_CIPHER_SUITE_CCMP:
3275 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3276 memcpy(u.cmd.common.key, key->key, key->keylen);
3277 if (api_ver >= 2)
3278 pn = atomic64_read(&key->tx_pn);
3279 break;
3280 case WLAN_CIPHER_SUITE_WEP104:
3281 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3282 fallthrough;
3283 case WLAN_CIPHER_SUITE_WEP40:
3284 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3285 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3286 break;
3287 case WLAN_CIPHER_SUITE_GCMP_256:
3288 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3289 fallthrough;
3290 case WLAN_CIPHER_SUITE_GCMP:
3291 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3292 memcpy(u.cmd.common.key, key->key, key->keylen);
3293 if (api_ver >= 2)
3294 pn = atomic64_read(&key->tx_pn);
3295 break;
3296 default:
3297 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3298 memcpy(u.cmd.common.key, key->key, key->keylen);
3299 }
3300
3301 if (mcast)
3302 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3303 if (mfp)
3304 key_flags |= cpu_to_le16(STA_KEY_MFP);
3305
3306 u.cmd.common.key_offset = key_offset;
3307 u.cmd.common.key_flags = key_flags;
3308 u.cmd.common.sta_id = sta_id;
3309
3310 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3311 i = 0;
3312 else
3313 i = -1;
3314
3315 for (; i < IEEE80211_NUM_TIDS; i++) {
3316 struct ieee80211_key_seq seq = {};
3317 u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3318 int rx_pn_len = 8;
3319
3320 int hole = api_ver >= 3 ? 0 : 2;
3321
3322 ieee80211_get_key_rx_seq(key, i, &seq);
3323
3324 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3325 rx_pn[0] = seq.tkip.iv16;
3326 rx_pn[1] = seq.tkip.iv16 >> 8;
3327 rx_pn[2 + hole] = seq.tkip.iv32;
3328 rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3329 rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3330 rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3331 } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3332 rx_pn = seq.hw.seq;
3333 rx_pn_len = seq.hw.seq_len;
3334 } else {
3335 rx_pn[0] = seq.ccmp.pn[0];
3336 rx_pn[1] = seq.ccmp.pn[1];
3337 rx_pn[2 + hole] = seq.ccmp.pn[2];
3338 rx_pn[3 + hole] = seq.ccmp.pn[3];
3339 rx_pn[4 + hole] = seq.ccmp.pn[4];
3340 rx_pn[5 + hole] = seq.ccmp.pn[5];
3341 }
3342
3343 if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3344 rx_pn_len) > 0)
3345 memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3346 rx_pn_len);
3347 }
3348
3349 if (api_ver >= 2) {
3350 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3351 size = sizeof(u.cmd);
3352 } else {
3353 size = sizeof(u.cmd_v1);
3354 }
3355
3356 status = ADD_STA_SUCCESS;
3357 if (cmd_flags & CMD_ASYNC)
3358 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3359 &u.cmd);
3360 else
3361 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3362 &u.cmd, &status);
3363
3364 switch (status) {
3365 case ADD_STA_SUCCESS:
3366 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3367 break;
3368 default:
3369 ret = -EIO;
3370 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3371 break;
3372 }
3373
3374 return ret;
3375}
3376
3377static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3378 struct ieee80211_key_conf *keyconf,
3379 u8 sta_id, bool remove_key)
3380{
3381 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3382
3383
3384 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3385 (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3386 keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3387 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3388 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3389 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3390 return -EINVAL;
3391
3392 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3393 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3394 return -EINVAL;
3395
3396 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3397 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3398
3399 if (remove_key) {
3400
3401 if (sta_id == IWL_MVM_INVALID_STA)
3402 return 0;
3403
3404 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3405 } else {
3406 struct ieee80211_key_seq seq;
3407 const u8 *pn;
3408
3409 switch (keyconf->cipher) {
3410 case WLAN_CIPHER_SUITE_AES_CMAC:
3411 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3412 break;
3413 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3414 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3415 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3416 break;
3417 default:
3418 return -EINVAL;
3419 }
3420
3421 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3422 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3423 igtk_cmd.ctrl_flags |=
3424 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3425 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3426 pn = seq.aes_cmac.pn;
3427 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3428 ((u64) pn[4] << 8) |
3429 ((u64) pn[3] << 16) |
3430 ((u64) pn[2] << 24) |
3431 ((u64) pn[1] << 32) |
3432 ((u64) pn[0] << 40));
3433 }
3434
3435 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3436 remove_key ? "removing" : "installing",
3437 keyconf->keyidx >= 6 ? "B" : "",
3438 keyconf->keyidx, igtk_cmd.sta_id);
3439
3440 if (!iwl_mvm_has_new_rx_api(mvm)) {
3441 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3442 .ctrl_flags = igtk_cmd.ctrl_flags,
3443 .key_id = igtk_cmd.key_id,
3444 .sta_id = igtk_cmd.sta_id,
3445 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3446 };
3447
3448 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3449 ARRAY_SIZE(igtk_cmd_v1.igtk));
3450 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3451 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3452 }
3453 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3454 sizeof(igtk_cmd), &igtk_cmd);
3455}
3456
3457
3458static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3459 struct ieee80211_vif *vif,
3460 struct ieee80211_sta *sta)
3461{
3462 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3463
3464 if (sta)
3465 return sta->addr;
3466
3467 if (vif->type == NL80211_IFTYPE_STATION &&
3468 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3469 u8 sta_id = mvmvif->ap_sta_id;
3470 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3471 lockdep_is_held(&mvm->mutex));
3472 return sta->addr;
3473 }
3474
3475
3476 return NULL;
3477}
3478
3479static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3480 struct ieee80211_vif *vif,
3481 struct ieee80211_sta *sta,
3482 struct ieee80211_key_conf *keyconf,
3483 u8 key_offset,
3484 bool mcast)
3485{
3486 const u8 *addr;
3487 struct ieee80211_key_seq seq;
3488 u16 p1k[5];
3489 u32 sta_id;
3490 bool mfp = false;
3491
3492 if (sta) {
3493 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3494
3495 sta_id = mvm_sta->sta_id;
3496 mfp = sta->mfp;
3497 } else if (vif->type == NL80211_IFTYPE_AP &&
3498 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3499 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3500
3501 sta_id = mvmvif->mcast_sta.sta_id;
3502 } else {
3503 IWL_ERR(mvm, "Failed to find station id\n");
3504 return -EINVAL;
3505 }
3506
3507 if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3508 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3509
3510 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3511 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3512
3513 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3514 seq.tkip.iv32, p1k, 0, key_offset,
3515 mfp);
3516 }
3517
3518 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3519 0, NULL, 0, key_offset, mfp);
3520}
3521
3522int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3523 struct ieee80211_vif *vif,
3524 struct ieee80211_sta *sta,
3525 struct ieee80211_key_conf *keyconf,
3526 u8 key_offset)
3527{
3528 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3529 struct iwl_mvm_sta *mvm_sta;
3530 u8 sta_id = IWL_MVM_INVALID_STA;
3531 int ret;
3532 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3533
3534 lockdep_assert_held(&mvm->mutex);
3535
3536 if (vif->type != NL80211_IFTYPE_AP ||
3537 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3538
3539 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3540 if (!mvm_sta) {
3541 IWL_ERR(mvm, "Failed to find station\n");
3542 return -EINVAL;
3543 }
3544 sta_id = mvm_sta->sta_id;
3545
3546
3547
3548
3549
3550
3551 if (!sta) {
3552 sta = rcu_dereference_protected(
3553 mvm->fw_id_to_mac_id[sta_id],
3554 lockdep_is_held(&mvm->mutex));
3555 if (IS_ERR_OR_NULL(sta)) {
3556 IWL_ERR(mvm, "Invalid station id\n");
3557 return -EINVAL;
3558 }
3559 }
3560
3561 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3562 return -EINVAL;
3563 } else {
3564 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3565
3566 sta_id = mvmvif->mcast_sta.sta_id;
3567 }
3568
3569 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3570 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3571 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3572 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3573 goto end;
3574 }
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587 if (key_offset == STA_KEY_IDX_INVALID) {
3588 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3589 if (key_offset == STA_KEY_IDX_INVALID)
3590 return -ENOSPC;
3591 keyconf->hw_key_idx = key_offset;
3592 }
3593
3594 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3595 if (ret)
3596 goto end;
3597
3598
3599
3600
3601
3602
3603
3604 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3605 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3606 sta) {
3607 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3608 key_offset, !mcast);
3609 if (ret) {
3610 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3611 goto end;
3612 }
3613 }
3614
3615 __set_bit(key_offset, mvm->fw_key_table);
3616
3617end:
3618 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3619 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3620 sta ? sta->addr : zero_addr, ret);
3621 return ret;
3622}
3623
3624int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3625 struct ieee80211_vif *vif,
3626 struct ieee80211_sta *sta,
3627 struct ieee80211_key_conf *keyconf)
3628{
3629 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3630 struct iwl_mvm_sta *mvm_sta;
3631 u8 sta_id = IWL_MVM_INVALID_STA;
3632 int ret, i;
3633
3634 lockdep_assert_held(&mvm->mutex);
3635
3636
3637 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3638 if (mvm_sta)
3639 sta_id = mvm_sta->sta_id;
3640 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3641 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3642
3643
3644 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3645 keyconf->keyidx, sta_id);
3646
3647 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3648 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3649 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3650 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3651
3652 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3653 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3654 keyconf->hw_key_idx);
3655 return -ENOENT;
3656 }
3657
3658
3659 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3660 if (mvm->fw_key_deleted[i] < U8_MAX)
3661 mvm->fw_key_deleted[i]++;
3662 }
3663 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3664
3665 if (sta && !mvm_sta) {
3666 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3667 return 0;
3668 }
3669
3670 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3671 if (ret)
3672 return ret;
3673
3674
3675 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3676 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3677 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3678
3679 return ret;
3680}
3681
3682void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3683 struct ieee80211_vif *vif,
3684 struct ieee80211_key_conf *keyconf,
3685 struct ieee80211_sta *sta, u32 iv32,
3686 u16 *phase1key)
3687{
3688 struct iwl_mvm_sta *mvm_sta;
3689 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3690 bool mfp = sta ? sta->mfp : false;
3691
3692 rcu_read_lock();
3693
3694 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3695 if (WARN_ON_ONCE(!mvm_sta))
3696 goto unlock;
3697 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3698 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3699 mfp);
3700
3701 unlock:
3702 rcu_read_unlock();
3703}
3704
3705void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3706 struct ieee80211_sta *sta)
3707{
3708 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3709 struct iwl_mvm_add_sta_cmd cmd = {
3710 .add_modify = STA_MODE_MODIFY,
3711 .sta_id = mvmsta->sta_id,
3712 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3713 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3714 };
3715 int ret;
3716
3717 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3718 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3719 if (ret)
3720 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3721}
3722
3723void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3724 struct ieee80211_sta *sta,
3725 enum ieee80211_frame_release_type reason,
3726 u16 cnt, u16 tids, bool more_data,
3727 bool single_sta_queue)
3728{
3729 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3730 struct iwl_mvm_add_sta_cmd cmd = {
3731 .add_modify = STA_MODE_MODIFY,
3732 .sta_id = mvmsta->sta_id,
3733 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3734 .sleep_tx_count = cpu_to_le16(cnt),
3735 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3736 };
3737 int tid, ret;
3738 unsigned long _tids = tids;
3739
3740
3741
3742
3743
3744 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3745 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3746
3747
3748
3749
3750
3751
3752
3753
3754 if (single_sta_queue) {
3755 int remaining = cnt;
3756 int sleep_tx_count;
3757
3758 spin_lock_bh(&mvmsta->lock);
3759 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3760 struct iwl_mvm_tid_data *tid_data;
3761 u16 n_queued;
3762
3763 tid_data = &mvmsta->tid_data[tid];
3764
3765 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3766 if (n_queued > remaining) {
3767 more_data = true;
3768 remaining = 0;
3769 break;
3770 }
3771 remaining -= n_queued;
3772 }
3773 sleep_tx_count = cnt - remaining;
3774 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3775 mvmsta->sleep_tx_count = sleep_tx_count;
3776 spin_unlock_bh(&mvmsta->lock);
3777
3778 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3779 if (WARN_ON(cnt - remaining == 0)) {
3780 ieee80211_sta_eosp(sta);
3781 return;
3782 }
3783 }
3784
3785
3786 if (more_data)
3787 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3788
3789 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3790 mvmsta->next_status_eosp = true;
3791 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3792 } else {
3793 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3794 }
3795
3796
3797 iwl_trans_block_txq_ptrs(mvm->trans, true);
3798
3799 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3800 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3801 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3802 if (ret)
3803 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3804}
3805
3806void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3807 struct iwl_rx_cmd_buffer *rxb)
3808{
3809 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3810 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3811 struct ieee80211_sta *sta;
3812 u32 sta_id = le32_to_cpu(notif->sta_id);
3813
3814 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3815 return;
3816
3817 rcu_read_lock();
3818 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3819 if (!IS_ERR_OR_NULL(sta))
3820 ieee80211_sta_eosp(sta);
3821 rcu_read_unlock();
3822}
3823
3824void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3825 struct iwl_mvm_sta *mvmsta, bool disable)
3826{
3827 struct iwl_mvm_add_sta_cmd cmd = {
3828 .add_modify = STA_MODE_MODIFY,
3829 .sta_id = mvmsta->sta_id,
3830 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3831 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3832 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3833 };
3834 int ret;
3835
3836 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3837 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3838 if (ret)
3839 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3840}
3841
3842void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3843 struct ieee80211_sta *sta,
3844 bool disable)
3845{
3846 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3847
3848 spin_lock_bh(&mvm_sta->lock);
3849
3850 if (mvm_sta->disable_tx == disable) {
3851 spin_unlock_bh(&mvm_sta->lock);
3852 return;
3853 }
3854
3855 mvm_sta->disable_tx = disable;
3856
3857
3858
3859
3860
3861 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
3862 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3863
3864 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3865
3866 spin_unlock_bh(&mvm_sta->lock);
3867}
3868
3869static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3870 struct iwl_mvm_vif *mvmvif,
3871 struct iwl_mvm_int_sta *sta,
3872 bool disable)
3873{
3874 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3875 struct iwl_mvm_add_sta_cmd cmd = {
3876 .add_modify = STA_MODE_MODIFY,
3877 .sta_id = sta->sta_id,
3878 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3879 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3880 .mac_id_n_color = cpu_to_le32(id),
3881 };
3882 int ret;
3883
3884 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3885 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3886 if (ret)
3887 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3888}
3889
3890void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3891 struct iwl_mvm_vif *mvmvif,
3892 bool disable)
3893{
3894 struct ieee80211_sta *sta;
3895 struct iwl_mvm_sta *mvm_sta;
3896 int i;
3897
3898 rcu_read_lock();
3899
3900
3901 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
3902 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
3903 if (IS_ERR_OR_NULL(sta))
3904 continue;
3905
3906 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3907 if (mvm_sta->mac_id_n_color !=
3908 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3909 continue;
3910
3911 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3912 }
3913
3914 rcu_read_unlock();
3915
3916 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3917 return;
3918
3919
3920 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3921 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3922 &mvmvif->mcast_sta, disable);
3923
3924
3925
3926
3927
3928 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3929 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3930 &mvmvif->bcast_sta, disable);
3931}
3932
3933void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3934{
3935 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3936 struct iwl_mvm_sta *mvmsta;
3937
3938 rcu_read_lock();
3939
3940 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3941
3942 if (!WARN_ON(!mvmsta))
3943 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3944
3945 rcu_read_unlock();
3946}
3947
3948u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3949{
3950 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3951
3952
3953
3954
3955
3956 if (mvm->trans->trans_cfg->gen2)
3957 sn &= 0xff;
3958
3959 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3960}
3961
3962int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3963 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
3964 u8 *key, u32 key_len)
3965{
3966 int ret;
3967 u16 queue;
3968 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3969 struct ieee80211_key_conf *keyconf;
3970
3971 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
3972 NL80211_IFTYPE_UNSPECIFIED,
3973 IWL_STA_LINK);
3974 if (ret)
3975 return ret;
3976
3977 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
3978 addr, sta, &queue,
3979 IWL_MVM_TX_FIFO_BE);
3980 if (ret)
3981 goto out;
3982
3983 keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
3984 if (!keyconf) {
3985 ret = -ENOBUFS;
3986 goto out;
3987 }
3988
3989 keyconf->cipher = cipher;
3990 memcpy(keyconf->key, key, key_len);
3991 keyconf->keylen = key_len;
3992
3993 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
3994 0, NULL, 0, 0, true);
3995 kfree(keyconf);
3996 return 0;
3997out:
3998 iwl_mvm_dealloc_int_sta(mvm, sta);
3999 return ret;
4000}
4001