1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/ieee80211.h>
17#include <linux/slab.h>
18#include <linux/export.h>
19#include <net/mac80211.h>
20#include "ieee80211_i.h"
21#include "driver-ops.h"
22#include "wme.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
63 const u8 *da, u16 tid,
64 u8 dialog_token, u16 start_seq_num,
65 u16 agg_size, u16 timeout)
66{
67 struct ieee80211_local *local = sdata->local;
68 struct sk_buff *skb;
69 struct ieee80211_mgmt *mgmt;
70 u16 capab;
71
72 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
73
74 if (!skb)
75 return;
76
77 skb_reserve(skb, local->hw.extra_tx_headroom);
78 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
79 memset(mgmt, 0, 24);
80 memcpy(mgmt->da, da, ETH_ALEN);
81 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
82 if (sdata->vif.type == NL80211_IFTYPE_AP ||
83 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
84 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
85 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
86 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
87 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
88 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
89 memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
90
91 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
92 IEEE80211_STYPE_ACTION);
93
94 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
95
96 mgmt->u.action.category = WLAN_CATEGORY_BACK;
97 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
98
99 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
100 capab = (u16)(1 << 0);
101 capab |= (u16)(1 << 1);
102 capab |= (u16)(tid << 2);
103 capab |= (u16)(agg_size << 6);
104
105 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
106
107 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
108 mgmt->u.action.u.addba_req.start_seq_num =
109 cpu_to_le16(start_seq_num << 4);
110
111 ieee80211_tx_skb(sdata, skb);
112}
113
114void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
115{
116 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
117 struct ieee80211_local *local = sdata->local;
118 struct sk_buff *skb;
119 struct ieee80211_bar *bar;
120 u16 bar_control = 0;
121
122 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
123 if (!skb)
124 return;
125
126 skb_reserve(skb, local->hw.extra_tx_headroom);
127 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
128 memset(bar, 0, sizeof(*bar));
129 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
130 IEEE80211_STYPE_BACK_REQ);
131 memcpy(bar->ra, ra, ETH_ALEN);
132 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
133 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
134 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
135 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
136 bar->control = cpu_to_le16(bar_control);
137 bar->start_seq_num = cpu_to_le16(ssn);
138
139 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
140 IEEE80211_TX_CTL_REQ_TX_STATUS;
141 ieee80211_tx_skb_tid(sdata, skb, tid);
142}
143EXPORT_SYMBOL(ieee80211_send_bar);
144
145void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
146 struct tid_ampdu_tx *tid_tx)
147{
148 lockdep_assert_held(&sta->ampdu_mlme.mtx);
149 lockdep_assert_held(&sta->lock);
150 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
151}
152
153
154
155
156
157
158
159
160
161
162
163
164static void __acquires(agg_queue)
165ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
166{
167 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
168
169
170
171 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
172 ieee80211_stop_queue_by_reason(
173 &sdata->local->hw, queue,
174 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
175 false);
176 __acquire(agg_queue);
177}
178
179static void __releases(agg_queue)
180ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
181{
182 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
183
184 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
185 ieee80211_wake_queue_by_reason(
186 &sdata->local->hw, queue,
187 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
188 false);
189 __release(agg_queue);
190}
191
192static void
193ieee80211_agg_stop_txq(struct sta_info *sta, int tid)
194{
195 struct ieee80211_txq *txq = sta->sta.txq[tid];
196 struct txq_info *txqi;
197
198 if (!txq)
199 return;
200
201 txqi = to_txq_info(txq);
202
203
204 spin_lock_bh(&txqi->queue.lock);
205 set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
206 spin_unlock_bh(&txqi->queue.lock);
207}
208
209static void
210ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
211{
212 struct ieee80211_txq *txq = sta->sta.txq[tid];
213 struct txq_info *txqi;
214
215 if (!txq)
216 return;
217
218 txqi = to_txq_info(txq);
219
220 if (enable)
221 set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
222 else
223 clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
224
225 clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
226 drv_wake_tx_queue(sta->sdata->local, txqi);
227}
228
229
230
231
232
233static void __acquires(agg_queue)
234ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
235 struct tid_ampdu_tx *tid_tx, u16 tid)
236{
237 struct ieee80211_local *local = sdata->local;
238 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
239 unsigned long flags;
240
241 ieee80211_stop_queue_agg(sdata, tid);
242
243 if (WARN(!tid_tx,
244 "TID %d gone but expected when splicing aggregates from the pending queue\n",
245 tid))
246 return;
247
248 if (!skb_queue_empty(&tid_tx->pending)) {
249 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
250
251 skb_queue_splice_tail_init(&tid_tx->pending,
252 &local->pending[queue]);
253 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
254 }
255}
256
257static void __releases(agg_queue)
258ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
259{
260 ieee80211_wake_queue_agg(sdata, tid);
261}
262
263static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
264{
265 struct tid_ampdu_tx *tid_tx;
266
267 lockdep_assert_held(&sta->ampdu_mlme.mtx);
268 lockdep_assert_held(&sta->lock);
269
270 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
271
272
273
274
275
276
277
278
279
280
281
282 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
283
284
285 ieee80211_assign_tid_tx(sta, tid, NULL);
286
287 ieee80211_agg_splice_finish(sta->sdata, tid);
288 ieee80211_agg_start_txq(sta, tid, false);
289
290 kfree_rcu(tid_tx, rcu_head);
291}
292
293int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
294 enum ieee80211_agg_stop_reason reason)
295{
296 struct ieee80211_local *local = sta->local;
297 struct tid_ampdu_tx *tid_tx;
298 enum ieee80211_ampdu_mlme_action action;
299 int ret;
300
301 lockdep_assert_held(&sta->ampdu_mlme.mtx);
302
303 switch (reason) {
304 case AGG_STOP_DECLINED:
305 case AGG_STOP_LOCAL_REQUEST:
306 case AGG_STOP_PEER_REQUEST:
307 action = IEEE80211_AMPDU_TX_STOP_CONT;
308 break;
309 case AGG_STOP_DESTROY_STA:
310 action = IEEE80211_AMPDU_TX_STOP_FLUSH;
311 break;
312 default:
313 WARN_ON_ONCE(1);
314 return -EINVAL;
315 }
316
317 spin_lock_bh(&sta->lock);
318
319 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
320 if (!tid_tx) {
321 spin_unlock_bh(&sta->lock);
322 return -ENOENT;
323 }
324
325
326
327
328
329 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
330 spin_unlock_bh(&sta->lock);
331 if (reason != AGG_STOP_DESTROY_STA)
332 return -EALREADY;
333 ret = drv_ampdu_action(local, sta->sdata,
334 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
335 &sta->sta, tid, NULL, 0, false);
336 WARN_ON_ONCE(ret);
337 return 0;
338 }
339
340 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
341
342 ieee80211_assign_tid_tx(sta, tid, NULL);
343 spin_unlock_bh(&sta->lock);
344 kfree_rcu(tid_tx, rcu_head);
345 return 0;
346 }
347
348 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
349
350 spin_unlock_bh(&sta->lock);
351
352 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
353 sta->sta.addr, tid);
354
355 del_timer_sync(&tid_tx->addba_resp_timer);
356 del_timer_sync(&tid_tx->session_timer);
357
358
359
360
361
362
363 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
364
365
366
367
368
369
370
371
372
373
374
375
376
377 synchronize_net();
378
379 tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
380 WLAN_BACK_RECIPIENT :
381 WLAN_BACK_INITIATOR;
382 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
383
384 ret = drv_ampdu_action(local, sta->sdata, action,
385 &sta->sta, tid, NULL, 0, false);
386
387
388 if (WARN_ON(ret)) {
389
390
391
392
393 }
394
395
396
397
398
399
400
401
402
403
404
405 return 0;
406}
407
408
409
410
411
412
413static void sta_addba_resp_timer_expired(unsigned long data)
414{
415
416
417
418
419 u16 tid = *(u8 *)data;
420 struct sta_info *sta = container_of((void *)data,
421 struct sta_info, timer_to_tid[tid]);
422 struct tid_ampdu_tx *tid_tx;
423
424
425 rcu_read_lock();
426 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
427 if (!tid_tx ||
428 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
429 rcu_read_unlock();
430 ht_dbg(sta->sdata,
431 "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n",
432 sta->sta.addr, tid);
433 return;
434 }
435
436 ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
437 sta->sta.addr, tid);
438
439 ieee80211_stop_tx_ba_session(&sta->sta, tid);
440 rcu_read_unlock();
441}
442
443void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
444{
445 struct tid_ampdu_tx *tid_tx;
446 struct ieee80211_local *local = sta->local;
447 struct ieee80211_sub_if_data *sdata = sta->sdata;
448 u16 start_seq_num;
449 int ret;
450
451 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
452
453
454
455
456
457
458 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
459
460 ieee80211_agg_stop_txq(sta, tid);
461
462
463
464
465
466
467
468 synchronize_net();
469
470 start_seq_num = sta->tid_seq[tid] >> 4;
471
472 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
473 &sta->sta, tid, &start_seq_num, 0, false);
474 if (ret) {
475 ht_dbg(sdata,
476 "BA request denied - HW unavailable for %pM tid %d\n",
477 sta->sta.addr, tid);
478 spin_lock_bh(&sta->lock);
479 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
480 ieee80211_assign_tid_tx(sta, tid, NULL);
481 ieee80211_agg_splice_finish(sdata, tid);
482 spin_unlock_bh(&sta->lock);
483
484 ieee80211_agg_start_txq(sta, tid, false);
485
486 kfree_rcu(tid_tx, rcu_head);
487 return;
488 }
489
490
491 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
492 ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
493 sta->sta.addr, tid);
494
495 spin_lock_bh(&sta->lock);
496 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
497 sta->ampdu_mlme.addba_req_num[tid]++;
498 spin_unlock_bh(&sta->lock);
499
500
501 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
502 tid_tx->dialog_token, start_seq_num,
503 IEEE80211_MAX_AMPDU_BUF,
504 tid_tx->timeout);
505}
506
507
508
509
510
511static void sta_tx_agg_session_timer_expired(unsigned long data)
512{
513
514
515
516
517 u8 *ptid = (u8 *)data;
518 u8 *timer_to_id = ptid - *ptid;
519 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
520 timer_to_tid[0]);
521 struct tid_ampdu_tx *tid_tx;
522 unsigned long timeout;
523
524 rcu_read_lock();
525 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
526 if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
527 rcu_read_unlock();
528 return;
529 }
530
531 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
532 if (time_is_after_jiffies(timeout)) {
533 mod_timer(&tid_tx->session_timer, timeout);
534 rcu_read_unlock();
535 return;
536 }
537
538 rcu_read_unlock();
539
540 ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
541 sta->sta.addr, (u16)*ptid);
542
543 ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
544}
545
546int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
547 u16 timeout)
548{
549 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
550 struct ieee80211_sub_if_data *sdata = sta->sdata;
551 struct ieee80211_local *local = sdata->local;
552 struct tid_ampdu_tx *tid_tx;
553 int ret = 0;
554
555 trace_api_start_tx_ba_session(pubsta, tid);
556
557 if (WARN(sta->reserved_tid == tid,
558 "Requested to start BA session on reserved tid=%d", tid))
559 return -EINVAL;
560
561 if (!pubsta->ht_cap.ht_supported)
562 return -EINVAL;
563
564 if (WARN_ON_ONCE(!local->ops->ampdu_action))
565 return -EINVAL;
566
567 if ((tid >= IEEE80211_NUM_TIDS) ||
568 !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
569 ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
570 return -EINVAL;
571
572 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
573 pubsta->addr, tid);
574
575 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
576 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
577 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
578 sdata->vif.type != NL80211_IFTYPE_AP &&
579 sdata->vif.type != NL80211_IFTYPE_ADHOC)
580 return -EINVAL;
581
582 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
583 ht_dbg(sdata,
584 "BA sessions blocked - Denying BA session request %pM tid %d\n",
585 sta->sta.addr, tid);
586 return -EINVAL;
587 }
588
589
590
591
592
593
594
595
596
597
598
599
600
601 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
602 !sta->sta.ht_cap.ht_supported) {
603 ht_dbg(sdata,
604 "BA request denied - IBSS STA %pM does not advertise HT support\n",
605 pubsta->addr);
606 return -EINVAL;
607 }
608
609 spin_lock_bh(&sta->lock);
610
611
612 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
613 ret = -EBUSY;
614 goto err_unlock_sta;
615 }
616
617
618
619
620
621
622 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
623 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
624 HT_AGG_RETRIES_PERIOD)) {
625 ht_dbg(sdata,
626 "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n",
627 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
628 ret = -EBUSY;
629 goto err_unlock_sta;
630 }
631
632 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
633
634 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
635 ht_dbg(sdata,
636 "BA request denied - session is not idle on %pM tid %u\n",
637 sta->sta.addr, tid);
638 ret = -EAGAIN;
639 goto err_unlock_sta;
640 }
641
642
643 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
644 if (!tid_tx) {
645 ret = -ENOMEM;
646 goto err_unlock_sta;
647 }
648
649 skb_queue_head_init(&tid_tx->pending);
650 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
651
652 tid_tx->timeout = timeout;
653
654
655 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
656 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
657 init_timer(&tid_tx->addba_resp_timer);
658
659
660 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
661 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
662 init_timer_deferrable(&tid_tx->session_timer);
663
664
665 sta->ampdu_mlme.dialog_token_allocator++;
666 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
667
668
669
670
671
672 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
673
674 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
675
676
677 err_unlock_sta:
678 spin_unlock_bh(&sta->lock);
679 return ret;
680}
681EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
682
683static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
684 struct sta_info *sta, u16 tid)
685{
686 struct tid_ampdu_tx *tid_tx;
687
688 lockdep_assert_held(&sta->ampdu_mlme.mtx);
689
690 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
691
692 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
693 sta->sta.addr, tid);
694
695 drv_ampdu_action(local, sta->sdata,
696 IEEE80211_AMPDU_TX_OPERATIONAL,
697 &sta->sta, tid, NULL, tid_tx->buf_size,
698 tid_tx->amsdu);
699
700
701
702
703
704 spin_lock_bh(&sta->lock);
705
706 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
707
708
709
710
711
712 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
713 ieee80211_agg_splice_finish(sta->sdata, tid);
714
715 spin_unlock_bh(&sta->lock);
716
717 ieee80211_agg_start_txq(sta, tid, true);
718}
719
720void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
721{
722 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
723 struct ieee80211_local *local = sdata->local;
724 struct sta_info *sta;
725 struct tid_ampdu_tx *tid_tx;
726
727 trace_api_start_tx_ba_cb(sdata, ra, tid);
728
729 if (tid >= IEEE80211_NUM_TIDS) {
730 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
731 tid, IEEE80211_NUM_TIDS);
732 return;
733 }
734
735 mutex_lock(&local->sta_mtx);
736 sta = sta_info_get_bss(sdata, ra);
737 if (!sta) {
738 mutex_unlock(&local->sta_mtx);
739 ht_dbg(sdata, "Could not find station: %pM\n", ra);
740 return;
741 }
742
743 mutex_lock(&sta->ampdu_mlme.mtx);
744 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
745
746 if (WARN_ON(!tid_tx)) {
747 ht_dbg(sdata, "addBA was not requested!\n");
748 goto unlock;
749 }
750
751 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
752 goto unlock;
753
754 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
755 ieee80211_agg_tx_operational(local, sta, tid);
756
757 unlock:
758 mutex_unlock(&sta->ampdu_mlme.mtx);
759 mutex_unlock(&local->sta_mtx);
760}
761
762void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
763 const u8 *ra, u16 tid)
764{
765 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
766 struct ieee80211_local *local = sdata->local;
767 struct ieee80211_ra_tid *ra_tid;
768 struct sk_buff *skb = dev_alloc_skb(0);
769
770 if (unlikely(!skb))
771 return;
772
773 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
774 memcpy(&ra_tid->ra, ra, ETH_ALEN);
775 ra_tid->tid = tid;
776
777 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
778 skb_queue_tail(&sdata->skb_queue, skb);
779 ieee80211_queue_work(&local->hw, &sdata->work);
780}
781EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
782
783int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
784 enum ieee80211_agg_stop_reason reason)
785{
786 int ret;
787
788 mutex_lock(&sta->ampdu_mlme.mtx);
789
790 ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
791
792 mutex_unlock(&sta->ampdu_mlme.mtx);
793
794 return ret;
795}
796
797int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
798{
799 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
800 struct ieee80211_sub_if_data *sdata = sta->sdata;
801 struct ieee80211_local *local = sdata->local;
802 struct tid_ampdu_tx *tid_tx;
803 int ret = 0;
804
805 trace_api_stop_tx_ba_session(pubsta, tid);
806
807 if (!local->ops->ampdu_action)
808 return -EINVAL;
809
810 if (tid >= IEEE80211_NUM_TIDS)
811 return -EINVAL;
812
813 spin_lock_bh(&sta->lock);
814 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
815
816 if (!tid_tx) {
817 ret = -ENOENT;
818 goto unlock;
819 }
820
821 WARN(sta->reserved_tid == tid,
822 "Requested to stop BA session on reserved tid=%d", tid);
823
824 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
825
826 ret = 0;
827 goto unlock;
828 }
829
830 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
831 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
832
833 unlock:
834 spin_unlock_bh(&sta->lock);
835 return ret;
836}
837EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
838
839void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
840{
841 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
842 struct ieee80211_local *local = sdata->local;
843 struct sta_info *sta;
844 struct tid_ampdu_tx *tid_tx;
845 bool send_delba = false;
846
847 trace_api_stop_tx_ba_cb(sdata, ra, tid);
848
849 if (tid >= IEEE80211_NUM_TIDS) {
850 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
851 tid, IEEE80211_NUM_TIDS);
852 return;
853 }
854
855 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
856
857 mutex_lock(&local->sta_mtx);
858
859 sta = sta_info_get_bss(sdata, ra);
860 if (!sta) {
861 ht_dbg(sdata, "Could not find station: %pM\n", ra);
862 goto unlock;
863 }
864
865 mutex_lock(&sta->ampdu_mlme.mtx);
866 spin_lock_bh(&sta->lock);
867 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
868
869 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
870 ht_dbg(sdata,
871 "unexpected callback to A-MPDU stop for %pM tid %d\n",
872 sta->sta.addr, tid);
873 goto unlock_sta;
874 }
875
876 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
877 send_delba = true;
878
879 ieee80211_remove_tid_tx(sta, tid);
880
881 unlock_sta:
882 spin_unlock_bh(&sta->lock);
883
884 if (send_delba)
885 ieee80211_send_delba(sdata, ra, tid,
886 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
887
888 mutex_unlock(&sta->ampdu_mlme.mtx);
889 unlock:
890 mutex_unlock(&local->sta_mtx);
891}
892
893void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
894 const u8 *ra, u16 tid)
895{
896 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
897 struct ieee80211_local *local = sdata->local;
898 struct ieee80211_ra_tid *ra_tid;
899 struct sk_buff *skb = dev_alloc_skb(0);
900
901 if (unlikely(!skb))
902 return;
903
904 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
905 memcpy(&ra_tid->ra, ra, ETH_ALEN);
906 ra_tid->tid = tid;
907
908 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
909 skb_queue_tail(&sdata->skb_queue, skb);
910 ieee80211_queue_work(&local->hw, &sdata->work);
911}
912EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
913
914
915void ieee80211_process_addba_resp(struct ieee80211_local *local,
916 struct sta_info *sta,
917 struct ieee80211_mgmt *mgmt,
918 size_t len)
919{
920 struct tid_ampdu_tx *tid_tx;
921 u16 capab, tid;
922 u8 buf_size;
923 bool amsdu;
924
925 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
926 amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
927 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
928 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
929 buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
930
931 mutex_lock(&sta->ampdu_mlme.mtx);
932
933 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
934 if (!tid_tx)
935 goto out;
936
937 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
938 ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
939 sta->sta.addr, tid);
940 goto out;
941 }
942
943 del_timer_sync(&tid_tx->addba_resp_timer);
944
945 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
946 sta->sta.addr, tid);
947
948
949
950
951
952
953 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
954 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
955 ht_dbg(sta->sdata,
956 "got addBA resp for %pM tid %d but we already gave up\n",
957 sta->sta.addr, tid);
958 goto out;
959 }
960
961
962
963
964
965
966
967 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
968 == WLAN_STATUS_SUCCESS && buf_size) {
969 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
970 &tid_tx->state)) {
971
972 goto out;
973 }
974
975 tid_tx->buf_size = buf_size;
976 tid_tx->amsdu = amsdu;
977
978 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
979 ieee80211_agg_tx_operational(local, sta, tid);
980
981 sta->ampdu_mlme.addba_req_num[tid] = 0;
982
983 if (tid_tx->timeout) {
984 mod_timer(&tid_tx->session_timer,
985 TU_TO_EXP_TIME(tid_tx->timeout));
986 tid_tx->last_tx = jiffies;
987 }
988
989 } else {
990 ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
991 }
992
993 out:
994 mutex_unlock(&sta->ampdu_mlme.mtx);
995}
996