1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/ieee80211.h>
17#include <linux/slab.h>
18#include <linux/export.h>
19#include <net/mac80211.h>
20#include "ieee80211_i.h"
21#include "driver-ops.h"
22#include "wme.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
63 const u8 *da, u16 tid,
64 u8 dialog_token, u16 start_seq_num,
65 u16 agg_size, u16 timeout)
66{
67 struct ieee80211_local *local = sdata->local;
68 struct sk_buff *skb;
69 struct ieee80211_mgmt *mgmt;
70 u16 capab;
71
72 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
73
74 if (!skb)
75 return;
76
77 skb_reserve(skb, local->hw.extra_tx_headroom);
78 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
79 memset(mgmt, 0, 24);
80 memcpy(mgmt->da, da, ETH_ALEN);
81 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
82 if (sdata->vif.type == NL80211_IFTYPE_AP ||
83 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
84 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
85 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
86 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
87 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
88 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
89 memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
90
91 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
92 IEEE80211_STYPE_ACTION);
93
94 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
95
96 mgmt->u.action.category = WLAN_CATEGORY_BACK;
97 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
98
99 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
100 capab = (u16)(1 << 1);
101 capab |= (u16)(tid << 2);
102 capab |= (u16)(agg_size << 6);
103
104 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
105
106 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
107 mgmt->u.action.u.addba_req.start_seq_num =
108 cpu_to_le16(start_seq_num << 4);
109
110 ieee80211_tx_skb_tid(sdata, skb, tid);
111}
112
113void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
114{
115 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
116 struct ieee80211_local *local = sdata->local;
117 struct sk_buff *skb;
118 struct ieee80211_bar *bar;
119 u16 bar_control = 0;
120
121 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
122 if (!skb)
123 return;
124
125 skb_reserve(skb, local->hw.extra_tx_headroom);
126 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
127 memset(bar, 0, sizeof(*bar));
128 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
129 IEEE80211_STYPE_BACK_REQ);
130 memcpy(bar->ra, ra, ETH_ALEN);
131 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
132 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
133 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
134 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
135 bar->control = cpu_to_le16(bar_control);
136 bar->start_seq_num = cpu_to_le16(ssn);
137
138 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
139 IEEE80211_TX_CTL_REQ_TX_STATUS;
140 ieee80211_tx_skb_tid(sdata, skb, tid);
141}
142EXPORT_SYMBOL(ieee80211_send_bar);
143
144void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
145 struct tid_ampdu_tx *tid_tx)
146{
147 lockdep_assert_held(&sta->ampdu_mlme.mtx);
148 lockdep_assert_held(&sta->lock);
149 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
150}
151
152static inline int ieee80211_ac_from_tid(int tid)
153{
154 return ieee802_1d_to_ac[tid & 7];
155}
156
157
158
159
160
161
162
163
164
165
166
167
168static void __acquires(agg_queue)
169ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
170{
171 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
172
173 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
174 ieee80211_stop_queue_by_reason(
175 &sdata->local->hw, queue,
176 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
177 __acquire(agg_queue);
178}
179
180static void __releases(agg_queue)
181ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
182{
183 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
184
185 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
186 ieee80211_wake_queue_by_reason(
187 &sdata->local->hw, queue,
188 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
189 __release(agg_queue);
190}
191
192
193
194
195
196static void __acquires(agg_queue)
197ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
198 struct tid_ampdu_tx *tid_tx, u16 tid)
199{
200 struct ieee80211_local *local = sdata->local;
201 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
202 unsigned long flags;
203
204 ieee80211_stop_queue_agg(sdata, tid);
205
206 if (WARN(!tid_tx,
207 "TID %d gone but expected when splicing aggregates from the pending queue\n",
208 tid))
209 return;
210
211 if (!skb_queue_empty(&tid_tx->pending)) {
212 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
213
214 skb_queue_splice_tail_init(&tid_tx->pending,
215 &local->pending[queue]);
216 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
217 }
218}
219
220static void __releases(agg_queue)
221ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
222{
223 ieee80211_wake_queue_agg(sdata, tid);
224}
225
226static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
227{
228 struct tid_ampdu_tx *tid_tx;
229
230 lockdep_assert_held(&sta->ampdu_mlme.mtx);
231 lockdep_assert_held(&sta->lock);
232
233 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
234
235
236
237
238
239
240
241
242
243
244
245 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
246
247
248 ieee80211_assign_tid_tx(sta, tid, NULL);
249
250 ieee80211_agg_splice_finish(sta->sdata, tid);
251
252 kfree_rcu(tid_tx, rcu_head);
253}
254
255int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
256 enum ieee80211_agg_stop_reason reason)
257{
258 struct ieee80211_local *local = sta->local;
259 struct tid_ampdu_tx *tid_tx;
260 enum ieee80211_ampdu_mlme_action action;
261 int ret;
262
263 lockdep_assert_held(&sta->ampdu_mlme.mtx);
264
265 switch (reason) {
266 case AGG_STOP_DECLINED:
267 case AGG_STOP_LOCAL_REQUEST:
268 case AGG_STOP_PEER_REQUEST:
269 action = IEEE80211_AMPDU_TX_STOP_CONT;
270 break;
271 case AGG_STOP_DESTROY_STA:
272 action = IEEE80211_AMPDU_TX_STOP_FLUSH;
273 break;
274 default:
275 WARN_ON_ONCE(1);
276 return -EINVAL;
277 }
278
279 spin_lock_bh(&sta->lock);
280
281 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
282 if (!tid_tx) {
283 spin_unlock_bh(&sta->lock);
284 return -ENOENT;
285 }
286
287
288
289
290
291 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
292 spin_unlock_bh(&sta->lock);
293 if (reason != AGG_STOP_DESTROY_STA)
294 return -EALREADY;
295 ret = drv_ampdu_action(local, sta->sdata,
296 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
297 &sta->sta, tid, NULL, 0);
298 WARN_ON_ONCE(ret);
299 return 0;
300 }
301
302 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
303
304 ieee80211_assign_tid_tx(sta, tid, NULL);
305 spin_unlock_bh(&sta->lock);
306 kfree_rcu(tid_tx, rcu_head);
307 return 0;
308 }
309
310 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
311
312 spin_unlock_bh(&sta->lock);
313
314 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
315 sta->sta.addr, tid);
316
317 del_timer_sync(&tid_tx->addba_resp_timer);
318 del_timer_sync(&tid_tx->session_timer);
319
320
321
322
323
324
325 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
326
327
328
329
330
331
332
333
334
335
336
337
338
339 synchronize_net();
340
341 tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
342 WLAN_BACK_RECIPIENT :
343 WLAN_BACK_INITIATOR;
344 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
345
346 ret = drv_ampdu_action(local, sta->sdata, action,
347 &sta->sta, tid, NULL, 0);
348
349
350 if (WARN_ON(ret)) {
351
352
353
354
355 }
356
357
358
359
360
361
362
363
364
365
366
367 return 0;
368}
369
370
371
372
373
374
375static void sta_addba_resp_timer_expired(unsigned long data)
376{
377
378
379
380
381 u16 tid = *(u8 *)data;
382 struct sta_info *sta = container_of((void *)data,
383 struct sta_info, timer_to_tid[tid]);
384 struct tid_ampdu_tx *tid_tx;
385
386
387 rcu_read_lock();
388 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
389 if (!tid_tx ||
390 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
391 rcu_read_unlock();
392 ht_dbg(sta->sdata,
393 "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n",
394 sta->sta.addr, tid);
395 return;
396 }
397
398 ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
399 sta->sta.addr, tid);
400
401 ieee80211_stop_tx_ba_session(&sta->sta, tid);
402 rcu_read_unlock();
403}
404
405void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
406{
407 struct tid_ampdu_tx *tid_tx;
408 struct ieee80211_local *local = sta->local;
409 struct ieee80211_sub_if_data *sdata = sta->sdata;
410 u16 start_seq_num;
411 int ret;
412
413 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
414
415
416
417
418
419
420 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
421
422
423
424
425
426
427
428 synchronize_net();
429
430 start_seq_num = sta->tid_seq[tid] >> 4;
431
432 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
433 &sta->sta, tid, &start_seq_num, 0);
434 if (ret) {
435 ht_dbg(sdata,
436 "BA request denied - HW unavailable for %pM tid %d\n",
437 sta->sta.addr, tid);
438 spin_lock_bh(&sta->lock);
439 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
440 ieee80211_assign_tid_tx(sta, tid, NULL);
441 ieee80211_agg_splice_finish(sdata, tid);
442 spin_unlock_bh(&sta->lock);
443
444 kfree_rcu(tid_tx, rcu_head);
445 return;
446 }
447
448
449 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
450 ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
451 sta->sta.addr, tid);
452
453 spin_lock_bh(&sta->lock);
454 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
455 sta->ampdu_mlme.addba_req_num[tid]++;
456 spin_unlock_bh(&sta->lock);
457
458
459 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
460 tid_tx->dialog_token, start_seq_num,
461 local->hw.max_tx_aggregation_subframes,
462 tid_tx->timeout);
463}
464
465
466
467
468
469static void sta_tx_agg_session_timer_expired(unsigned long data)
470{
471
472
473
474
475 u8 *ptid = (u8 *)data;
476 u8 *timer_to_id = ptid - *ptid;
477 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
478 timer_to_tid[0]);
479 struct tid_ampdu_tx *tid_tx;
480 unsigned long timeout;
481
482 rcu_read_lock();
483 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
484 if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
485 rcu_read_unlock();
486 return;
487 }
488
489 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
490 if (time_is_after_jiffies(timeout)) {
491 mod_timer(&tid_tx->session_timer, timeout);
492 rcu_read_unlock();
493 return;
494 }
495
496 rcu_read_unlock();
497
498 ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
499 sta->sta.addr, (u16)*ptid);
500
501 ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
502}
503
504int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
505 u16 timeout)
506{
507 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
508 struct ieee80211_sub_if_data *sdata = sta->sdata;
509 struct ieee80211_local *local = sdata->local;
510 struct tid_ampdu_tx *tid_tx;
511 int ret = 0;
512
513 trace_api_start_tx_ba_session(pubsta, tid);
514
515 if (WARN_ON_ONCE(!local->ops->ampdu_action))
516 return -EINVAL;
517
518 if ((tid >= IEEE80211_NUM_TIDS) ||
519 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
520 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
521 return -EINVAL;
522
523 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
524 pubsta->addr, tid);
525
526 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
527 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
528 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
529 sdata->vif.type != NL80211_IFTYPE_AP &&
530 sdata->vif.type != NL80211_IFTYPE_ADHOC)
531 return -EINVAL;
532
533 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
534 ht_dbg(sdata,
535 "BA sessions blocked - Denying BA session request %pM tid %d\n",
536 sta->sta.addr, tid);
537 return -EINVAL;
538 }
539
540
541
542
543
544
545
546
547
548
549
550
551
552 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
553 !sta->sta.ht_cap.ht_supported) {
554 ht_dbg(sdata,
555 "BA request denied - IBSS STA %pM does not advertise HT support\n",
556 pubsta->addr);
557 return -EINVAL;
558 }
559
560 spin_lock_bh(&sta->lock);
561
562
563 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
564 ret = -EBUSY;
565 goto err_unlock_sta;
566 }
567
568
569
570
571
572
573 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
574 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
575 HT_AGG_RETRIES_PERIOD)) {
576 ht_dbg(sdata,
577 "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n",
578 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
579 ret = -EBUSY;
580 goto err_unlock_sta;
581 }
582
583 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
584
585 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
586 ht_dbg(sdata,
587 "BA request denied - session is not idle on %pM tid %u\n",
588 sta->sta.addr, tid);
589 ret = -EAGAIN;
590 goto err_unlock_sta;
591 }
592
593
594 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
595 if (!tid_tx) {
596 ret = -ENOMEM;
597 goto err_unlock_sta;
598 }
599
600 skb_queue_head_init(&tid_tx->pending);
601 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
602
603 tid_tx->timeout = timeout;
604
605
606 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
607 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
608 init_timer(&tid_tx->addba_resp_timer);
609
610
611 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
612 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
613 init_timer_deferrable(&tid_tx->session_timer);
614
615
616 sta->ampdu_mlme.dialog_token_allocator++;
617 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
618
619
620
621
622
623 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
624
625 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
626
627
628 err_unlock_sta:
629 spin_unlock_bh(&sta->lock);
630 return ret;
631}
632EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
633
634static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
635 struct sta_info *sta, u16 tid)
636{
637 struct tid_ampdu_tx *tid_tx;
638
639 lockdep_assert_held(&sta->ampdu_mlme.mtx);
640
641 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
642
643 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
644 sta->sta.addr, tid);
645
646 drv_ampdu_action(local, sta->sdata,
647 IEEE80211_AMPDU_TX_OPERATIONAL,
648 &sta->sta, tid, NULL, tid_tx->buf_size);
649
650
651
652
653
654 spin_lock_bh(&sta->lock);
655
656 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
657
658
659
660
661
662 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
663 ieee80211_agg_splice_finish(sta->sdata, tid);
664
665 spin_unlock_bh(&sta->lock);
666}
667
668void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
669{
670 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
671 struct ieee80211_local *local = sdata->local;
672 struct sta_info *sta;
673 struct tid_ampdu_tx *tid_tx;
674
675 trace_api_start_tx_ba_cb(sdata, ra, tid);
676
677 if (tid >= IEEE80211_NUM_TIDS) {
678 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
679 tid, IEEE80211_NUM_TIDS);
680 return;
681 }
682
683 mutex_lock(&local->sta_mtx);
684 sta = sta_info_get_bss(sdata, ra);
685 if (!sta) {
686 mutex_unlock(&local->sta_mtx);
687 ht_dbg(sdata, "Could not find station: %pM\n", ra);
688 return;
689 }
690
691 mutex_lock(&sta->ampdu_mlme.mtx);
692 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
693
694 if (WARN_ON(!tid_tx)) {
695 ht_dbg(sdata, "addBA was not requested!\n");
696 goto unlock;
697 }
698
699 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
700 goto unlock;
701
702 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
703 ieee80211_agg_tx_operational(local, sta, tid);
704
705 unlock:
706 mutex_unlock(&sta->ampdu_mlme.mtx);
707 mutex_unlock(&local->sta_mtx);
708}
709
710void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
711 const u8 *ra, u16 tid)
712{
713 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
714 struct ieee80211_local *local = sdata->local;
715 struct ieee80211_ra_tid *ra_tid;
716 struct sk_buff *skb = dev_alloc_skb(0);
717
718 if (unlikely(!skb))
719 return;
720
721 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
722 memcpy(&ra_tid->ra, ra, ETH_ALEN);
723 ra_tid->tid = tid;
724
725 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
726 skb_queue_tail(&sdata->skb_queue, skb);
727 ieee80211_queue_work(&local->hw, &sdata->work);
728}
729EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
730
731int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
732 enum ieee80211_agg_stop_reason reason)
733{
734 int ret;
735
736 mutex_lock(&sta->ampdu_mlme.mtx);
737
738 ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
739
740 mutex_unlock(&sta->ampdu_mlme.mtx);
741
742 return ret;
743}
744
745int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
746{
747 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
748 struct ieee80211_sub_if_data *sdata = sta->sdata;
749 struct ieee80211_local *local = sdata->local;
750 struct tid_ampdu_tx *tid_tx;
751 int ret = 0;
752
753 trace_api_stop_tx_ba_session(pubsta, tid);
754
755 if (!local->ops->ampdu_action)
756 return -EINVAL;
757
758 if (tid >= IEEE80211_NUM_TIDS)
759 return -EINVAL;
760
761 spin_lock_bh(&sta->lock);
762 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
763
764 if (!tid_tx) {
765 ret = -ENOENT;
766 goto unlock;
767 }
768
769 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
770
771 ret = 0;
772 goto unlock;
773 }
774
775 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
776 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
777
778 unlock:
779 spin_unlock_bh(&sta->lock);
780 return ret;
781}
782EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
783
784void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
785{
786 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
787 struct ieee80211_local *local = sdata->local;
788 struct sta_info *sta;
789 struct tid_ampdu_tx *tid_tx;
790
791 trace_api_stop_tx_ba_cb(sdata, ra, tid);
792
793 if (tid >= IEEE80211_NUM_TIDS) {
794 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
795 tid, IEEE80211_NUM_TIDS);
796 return;
797 }
798
799 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
800
801 mutex_lock(&local->sta_mtx);
802
803 sta = sta_info_get_bss(sdata, ra);
804 if (!sta) {
805 ht_dbg(sdata, "Could not find station: %pM\n", ra);
806 goto unlock;
807 }
808
809 mutex_lock(&sta->ampdu_mlme.mtx);
810 spin_lock_bh(&sta->lock);
811 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
812
813 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
814 ht_dbg(sdata,
815 "unexpected callback to A-MPDU stop for %pM tid %d\n",
816 sta->sta.addr, tid);
817 goto unlock_sta;
818 }
819
820 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
821 ieee80211_send_delba(sta->sdata, ra, tid,
822 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
823
824 ieee80211_remove_tid_tx(sta, tid);
825
826 unlock_sta:
827 spin_unlock_bh(&sta->lock);
828 mutex_unlock(&sta->ampdu_mlme.mtx);
829 unlock:
830 mutex_unlock(&local->sta_mtx);
831}
832
833void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
834 const u8 *ra, u16 tid)
835{
836 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
837 struct ieee80211_local *local = sdata->local;
838 struct ieee80211_ra_tid *ra_tid;
839 struct sk_buff *skb = dev_alloc_skb(0);
840
841 if (unlikely(!skb))
842 return;
843
844 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
845 memcpy(&ra_tid->ra, ra, ETH_ALEN);
846 ra_tid->tid = tid;
847
848 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
849 skb_queue_tail(&sdata->skb_queue, skb);
850 ieee80211_queue_work(&local->hw, &sdata->work);
851}
852EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
853
854
855void ieee80211_process_addba_resp(struct ieee80211_local *local,
856 struct sta_info *sta,
857 struct ieee80211_mgmt *mgmt,
858 size_t len)
859{
860 struct tid_ampdu_tx *tid_tx;
861 u16 capab, tid;
862 u8 buf_size;
863
864 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
865 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
866 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
867
868 mutex_lock(&sta->ampdu_mlme.mtx);
869
870 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
871 if (!tid_tx)
872 goto out;
873
874 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
875 ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
876 sta->sta.addr, tid);
877 goto out;
878 }
879
880 del_timer_sync(&tid_tx->addba_resp_timer);
881
882 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
883 sta->sta.addr, tid);
884
885
886
887
888
889
890 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
891 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
892 ht_dbg(sta->sdata,
893 "got addBA resp for %pM tid %d but we already gave up\n",
894 sta->sta.addr, tid);
895 goto out;
896 }
897
898
899
900
901
902
903
904 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
905 == WLAN_STATUS_SUCCESS && buf_size) {
906 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
907 &tid_tx->state)) {
908
909 goto out;
910 }
911
912 tid_tx->buf_size = buf_size;
913
914 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
915 ieee80211_agg_tx_operational(local, sta, tid);
916
917 sta->ampdu_mlme.addba_req_num[tid] = 0;
918
919 if (tid_tx->timeout) {
920 mod_timer(&tid_tx->session_timer,
921 TU_TO_EXP_TIME(tid_tx->timeout));
922 tid_tx->last_tx = jiffies;
923 }
924
925 } else {
926 ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
927 }
928
929 out:
930 mutex_unlock(&sta->ampdu_mlme.mtx);
931}
932