1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/compiler.h>
35
36#include <linux/errno.h>
37#include <linux/if_arp.h>
38#include <linux/in6.h>
39#include <linux/in.h>
40#include <linux/ip.h>
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/netdevice.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/skbuff.h>
47#include <linux/slab.h>
48#include <linux/tcp.h>
49#include <linux/types.h>
50#include <linux/wireless.h>
51#include <linux/etherdevice.h>
52#include <asm/uaccess.h>
53#include <linux/if_vlan.h>
54
55#include "ieee80211.h"
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
156static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
157
158static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
159{
160 struct ieee80211_snap_hdr *snap;
161 u8 *oui;
162
163 snap = (struct ieee80211_snap_hdr *)data;
164 snap->dsap = 0xaa;
165 snap->ssap = 0xaa;
166 snap->ctrl = 0x03;
167
168 if (h_proto == 0x8137 || h_proto == 0x80f3)
169 oui = P802_1H_OUI;
170 else
171 oui = RFC1042_OUI;
172 snap->oui[0] = oui[0];
173 snap->oui[1] = oui[1];
174 snap->oui[2] = oui[2];
175
176 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
177
178 return SNAP_SIZE + sizeof(u16);
179}
180
181int ieee80211_encrypt_fragment(
182 struct ieee80211_device *ieee,
183 struct sk_buff *frag,
184 int hdr_len)
185{
186 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
187 int res;
188
189 if (!(crypt && crypt->ops))
190 {
191 printk("=========>%s(), crypt is null\n", __FUNCTION__);
192 return -1;
193 }
194#ifdef CONFIG_IEEE80211_CRYPT_TKIP
195 struct ieee80211_hdr *header;
196
197 if (ieee->tkip_countermeasures &&
198 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
199 header = (struct ieee80211_hdr *) frag->data;
200 if (net_ratelimit()) {
201 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
202 "TX packet to %pM\n",
203 ieee->dev->name, header->addr1);
204 }
205 return -1;
206 }
207#endif
208
209
210
211
212
213
214 atomic_inc(&crypt->refcnt);
215 res = 0;
216 if (crypt->ops->encrypt_msdu)
217 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
218 if (res == 0 && crypt->ops->encrypt_mpdu)
219 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
220
221 atomic_dec(&crypt->refcnt);
222 if (res < 0) {
223 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
224 ieee->dev->name, frag->len);
225 ieee->ieee_stats.tx_discards++;
226 return -1;
227 }
228
229 return 0;
230}
231
232
233void ieee80211_txb_free(struct ieee80211_txb *txb) {
234
235 if (unlikely(!txb))
236 return;
237 kfree(txb);
238}
239
240struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
241 int gfp_mask)
242{
243 struct ieee80211_txb *txb;
244 int i;
245 txb = kmalloc(
246 sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
247 gfp_mask);
248 if (!txb)
249 return NULL;
250
251 memset(txb, 0, sizeof(struct ieee80211_txb));
252 txb->nr_frags = nr_frags;
253 txb->frag_size = txb_size;
254
255 for (i = 0; i < nr_frags; i++) {
256 txb->fragments[i] = dev_alloc_skb(txb_size);
257 if (unlikely(!txb->fragments[i])) {
258 i--;
259 break;
260 }
261 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
262 }
263 if (unlikely(i != nr_frags)) {
264 while (i >= 0)
265 dev_kfree_skb_any(txb->fragments[i--]);
266 kfree(txb);
267 return NULL;
268 }
269 return txb;
270}
271
272
273
274static int
275ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
276{
277 struct ethhdr *eth;
278 struct iphdr *ip;
279 eth = (struct ethhdr *)skb->data;
280 if (eth->h_proto != htons(ETH_P_IP))
281 return 0;
282
283
284 ip = ip_hdr(skb);
285 switch (ip->tos & 0xfc) {
286 case 0x20:
287 return 2;
288 case 0x40:
289 return 1;
290 case 0x60:
291 return 3;
292 case 0x80:
293 return 4;
294 case 0xa0:
295 return 5;
296 case 0xc0:
297 return 6;
298 case 0xe0:
299 return 7;
300 default:
301 return 0;
302 }
303}
304
305#define SN_LESS(a, b) (((a-b)&0x800)!=0)
306void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee, struct sk_buff *skb, cb_desc *tcb_desc)
307{
308 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
309 PTX_TS_RECORD pTxTs = NULL;
310 struct ieee80211_hdr_1addr *hdr = (struct ieee80211_hdr_1addr *)skb->data;
311
312 if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
313 return;
314 if (!IsQoSDataFrame(skb->data))
315 return;
316
317 if (is_multicast_ether_addr(hdr->addr1))
318 return;
319
320#ifdef TO_DO_LIST
321 if(pTcb->PacketLength >= 4096)
322 return;
323
324 if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
325 return;
326#endif
327 if(!ieee->GetNmodeSupportBySecCfg(ieee->dev))
328 {
329 return;
330 }
331 if(pHTInfo->bCurrentAMPDUEnable)
332 {
333 if (!GetTs(ieee, (PTS_COMMON_INFO *)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
334 {
335 printk("===>can't get TS\n");
336 return;
337 }
338 if (pTxTs->TxAdmittedBARecord.bValid == false)
339 {
340 TsStartAddBaProcess(ieee, pTxTs);
341 goto FORCED_AGG_SETTING;
342 }
343 else if (pTxTs->bUsingBa == false)
344 {
345 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
346 pTxTs->bUsingBa = true;
347 else
348 goto FORCED_AGG_SETTING;
349 }
350
351 if (ieee->iw_mode == IW_MODE_INFRA)
352 {
353 tcb_desc->bAMPDUEnable = true;
354 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
355 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
356 }
357 }
358FORCED_AGG_SETTING:
359 switch (pHTInfo->ForcedAMPDUMode )
360 {
361 case HT_AGG_AUTO:
362 break;
363
364 case HT_AGG_FORCE_ENABLE:
365 tcb_desc->bAMPDUEnable = true;
366 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
367 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
368 break;
369
370 case HT_AGG_FORCE_DISABLE:
371 tcb_desc->bAMPDUEnable = false;
372 tcb_desc->ampdu_density = 0;
373 tcb_desc->ampdu_factor = 0;
374 break;
375
376 }
377 return;
378}
379
380extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
381{
382 tcb_desc->bUseShortPreamble = false;
383 if (tcb_desc->data_rate == 2)
384 {
385 return;
386 }
387 else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
388 {
389 tcb_desc->bUseShortPreamble = true;
390 }
391 return;
392}
393extern void
394ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
395{
396 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
397
398 tcb_desc->bUseShortGI = false;
399
400 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
401 return;
402
403 if(pHTInfo->bForcedShortGI)
404 {
405 tcb_desc->bUseShortGI = true;
406 return;
407 }
408
409 if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
410 tcb_desc->bUseShortGI = true;
411 else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
412 tcb_desc->bUseShortGI = true;
413}
414
415void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
416{
417 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
418
419 tcb_desc->bPacketBW = false;
420
421 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
422 return;
423
424 if(tcb_desc->bMulticast || tcb_desc->bBroadcast)
425 return;
426
427 if((tcb_desc->data_rate & 0x80)==0)
428 return;
429
430 if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
431 tcb_desc->bPacketBW = true;
432 return;
433}
434
435void ieee80211_query_protectionmode(struct ieee80211_device *ieee, cb_desc *tcb_desc, struct sk_buff *skb)
436{
437
438 tcb_desc->bRTSSTBC = false;
439 tcb_desc->bRTSUseShortGI = false;
440 tcb_desc->bCTSEnable = false;
441 tcb_desc->RTSSC = 0;
442 tcb_desc->bRTSBW = false;
443
444 if(tcb_desc->bBroadcast || tcb_desc->bMulticast)
445 return;
446
447 if (is_broadcast_ether_addr(skb->data+16))
448 return;
449
450 if (ieee->mode < IEEE_N_24G)
451 {
452
453
454
455
456 if (skb->len > ieee->rts)
457 {
458 tcb_desc->bRTSEnable = true;
459 tcb_desc->rts_rate = MGN_24M;
460 }
461 else if (ieee->current_network.buseprotection)
462 {
463
464 tcb_desc->bRTSEnable = true;
465 tcb_desc->bCTSEnable = true;
466 tcb_desc->rts_rate = MGN_24M;
467 }
468
469 return;
470 }
471 else
472 {
473 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
474 while (true)
475 {
476
477 if (ieee->current_network.buseprotection)
478 {
479 tcb_desc->bRTSEnable = true;
480 tcb_desc->bCTSEnable = true;
481 tcb_desc->rts_rate = MGN_24M;
482 break;
483 }
484
485 if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
486 {
487 u8 HTOpMode = pHTInfo->CurrentOpMode;
488 if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
489 (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
490 {
491 tcb_desc->rts_rate = MGN_24M;
492 tcb_desc->bRTSEnable = true;
493 break;
494 }
495 }
496
497 if (skb->len > ieee->rts)
498 {
499 tcb_desc->rts_rate = MGN_24M;
500 tcb_desc->bRTSEnable = true;
501 break;
502 }
503
504
505 if(tcb_desc->bAMPDUEnable)
506 {
507 tcb_desc->rts_rate = MGN_24M;
508
509
510 tcb_desc->bRTSEnable = false;
511 break;
512 }
513
514 if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
515 {
516 tcb_desc->bCTSEnable = true;
517 tcb_desc->rts_rate = MGN_24M;
518 tcb_desc->bRTSEnable = true;
519 break;
520 }
521
522 goto NO_PROTECTION;
523 }
524 }
525
526 if( 0 )
527 {
528 tcb_desc->bCTSEnable = true;
529 tcb_desc->rts_rate = MGN_24M;
530 tcb_desc->bRTSEnable = true;
531 }
532 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
533 tcb_desc->bUseShortPreamble = true;
534 if (ieee->mode == IW_MODE_MASTER)
535 goto NO_PROTECTION;
536 return;
537NO_PROTECTION:
538 tcb_desc->bRTSEnable = false;
539 tcb_desc->bCTSEnable = false;
540 tcb_desc->rts_rate = 0;
541 tcb_desc->RTSSC = 0;
542 tcb_desc->bRTSBW = false;
543}
544
545
546void ieee80211_txrate_selectmode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
547{
548#ifdef TO_DO_LIST
549 if(!IsDataFrame(pFrame))
550 {
551 pTcb->bTxDisableRateFallBack = TRUE;
552 pTcb->bTxUseDriverAssingedRate = TRUE;
553 pTcb->RATRIndex = 7;
554 return;
555 }
556
557 if(pMgntInfo->ForcedDataRate!= 0)
558 {
559 pTcb->bTxDisableRateFallBack = TRUE;
560 pTcb->bTxUseDriverAssingedRate = TRUE;
561 return;
562 }
563#endif
564 if(ieee->bTxDisableRateFallBack)
565 tcb_desc->bTxDisableRateFallBack = true;
566
567 if(ieee->bTxUseDriverAssingedRate)
568 tcb_desc->bTxUseDriverAssingedRate = true;
569 if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
570 {
571 if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
572 tcb_desc->RATRIndex = 0;
573 }
574}
575
576void ieee80211_query_seqnum(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *dst)
577{
578 if (is_multicast_ether_addr(dst))
579 return;
580 if (IsQoSDataFrame(skb->data))
581 {
582 PTX_TS_RECORD pTS = NULL;
583 if (!GetTs(ieee, (PTS_COMMON_INFO *)(&pTS), dst, skb->priority, TX_DIR, true))
584 {
585 return;
586 }
587 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
588 }
589}
590
591int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
592{
593 struct ieee80211_device *ieee = netdev_priv(dev);
594 struct ieee80211_txb *txb = NULL;
595 struct ieee80211_hdr_3addrqos *frag_hdr;
596 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
597 unsigned long flags;
598 struct net_device_stats *stats = &ieee->stats;
599 int ether_type = 0, encrypt;
600 int bytes, fc, qos_ctl = 0, hdr_len;
601 struct sk_buff *skb_frag;
602 struct ieee80211_hdr_3addrqos header = {
603 .duration_id = 0,
604 .seq_ctl = 0,
605 .qos_ctl = 0
606 };
607 u8 dest[ETH_ALEN], src[ETH_ALEN];
608 int qos_actived = ieee->current_network.qos_data.active;
609
610 struct ieee80211_crypt_data *crypt;
611
612 cb_desc *tcb_desc;
613
614 spin_lock_irqsave(&ieee->lock, flags);
615
616
617
618 if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
619 ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
620 printk(KERN_WARNING "%s: No xmit handler.\n",
621 ieee->dev->name);
622 goto success;
623 }
624
625
626 if(likely(ieee->raw_tx == 0)){
627 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
628 printk(KERN_WARNING "%s: skb too small (%d).\n",
629 ieee->dev->name, skb->len);
630 goto success;
631 }
632
633 memset(skb->cb, 0, sizeof(skb->cb));
634 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
635
636 crypt = ieee->crypt[ieee->tx_keyidx];
637
638 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
639 ieee->host_encrypt && crypt && crypt->ops;
640
641 if (!encrypt && ieee->ieee802_1x &&
642 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
643 stats->tx_dropped++;
644 goto success;
645 }
646 #ifdef CONFIG_IEEE80211_DEBUG
647 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
648 struct eapol *eap = (struct eapol *)(skb->data +
649 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
650 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
651 eap_get_type(eap->type));
652 }
653 #endif
654
655
656 memcpy(&dest, skb->data, ETH_ALEN);
657 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
658
659
660 skb_pull(skb, sizeof(struct ethhdr));
661
662
663 bytes = skb->len + SNAP_SIZE + sizeof(u16);
664
665 if (encrypt)
666 fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
667 else
668
669 fc = IEEE80211_FTYPE_DATA;
670
671
672 if(qos_actived)
673 fc |= IEEE80211_STYPE_QOS_DATA;
674 else
675 fc |= IEEE80211_STYPE_DATA;
676
677 if (ieee->iw_mode == IW_MODE_INFRA) {
678 fc |= IEEE80211_FCTL_TODS;
679
680
681 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
682 memcpy(&header.addr2, &src, ETH_ALEN);
683 memcpy(&header.addr3, &dest, ETH_ALEN);
684 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
685
686
687 memcpy(&header.addr1, dest, ETH_ALEN);
688 memcpy(&header.addr2, src, ETH_ALEN);
689 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
690 }
691
692 header.frame_ctl = cpu_to_le16(fc);
693
694
695
696 if (is_multicast_ether_addr(header.addr1)) {
697 frag_size = MAX_FRAG_THRESHOLD;
698 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
699 }
700 else {
701 frag_size = ieee->fts;
702 qos_ctl = 0;
703 }
704
705
706 if(qos_actived)
707 {
708 hdr_len = IEEE80211_3ADDR_LEN + 2;
709
710 skb->priority = ieee80211_classify(skb, &ieee->current_network);
711 qos_ctl |= skb->priority;
712 header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
713 } else {
714 hdr_len = IEEE80211_3ADDR_LEN;
715 }
716
717
718
719
720 bytes_per_frag = frag_size - hdr_len;
721 if (ieee->config &
722 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
723 bytes_per_frag -= IEEE80211_FCS_LEN;
724
725
726 if (encrypt)
727 bytes_per_frag -= crypt->ops->extra_prefix_len +
728 crypt->ops->extra_postfix_len;
729
730
731
732 nr_frags = bytes / bytes_per_frag;
733 bytes_last_frag = bytes % bytes_per_frag;
734 if (bytes_last_frag)
735 nr_frags++;
736 else
737 bytes_last_frag = bytes_per_frag;
738
739
740
741
742 txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
743 if (unlikely(!txb)) {
744 printk(KERN_WARNING "%s: Could not allocate TXB\n",
745 ieee->dev->name);
746 goto failed;
747 }
748 txb->encrypted = encrypt;
749 txb->payload_size = bytes;
750
751
752 if(qos_actived)
753 {
754 txb->queue_index = UP2AC(skb->priority);
755 } else {
756 txb->queue_index = WME_AC_BK;
757 }
758
759
760
761 for (i = 0; i < nr_frags; i++) {
762 skb_frag = txb->fragments[i];
763 tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
764 if(qos_actived){
765 skb_frag->priority = skb->priority;
766 tcb_desc->queue_index = UP2AC(skb->priority);
767 } else {
768 skb_frag->priority = WME_AC_BK;
769 tcb_desc->queue_index = WME_AC_BK;
770 }
771 skb_reserve(skb_frag, ieee->tx_headroom);
772
773 if (encrypt){
774 if (ieee->hwsec_active)
775 tcb_desc->bHwSec = 1;
776 else
777 tcb_desc->bHwSec = 0;
778 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
779 }
780 else
781 {
782 tcb_desc->bHwSec = 0;
783 }
784 frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
785 memcpy(frag_hdr, &header, hdr_len);
786
787
788
789 if (i != nr_frags - 1) {
790 frag_hdr->frame_ctl = cpu_to_le16(
791 fc | IEEE80211_FCTL_MOREFRAGS);
792 bytes = bytes_per_frag;
793
794 } else {
795
796 bytes = bytes_last_frag;
797 }
798
799 if(qos_actived)
800 {
801
802 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
803 } else {
804 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
805 }
806
807
808 if (i == 0) {
809 ieee80211_put_snap(
810 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
811 ether_type);
812 bytes -= SNAP_SIZE + sizeof(u16);
813 }
814
815 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
816
817
818 skb_pull(skb, bytes);
819
820
821
822 if (encrypt)
823 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
824 if (ieee->config &
825 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
826 skb_put(skb_frag, 4);
827 }
828
829 if(qos_actived)
830 {
831 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
832 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
833 else
834 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
835 } else {
836 if (ieee->seq_ctrl[0] == 0xFFF)
837 ieee->seq_ctrl[0] = 0;
838 else
839 ieee->seq_ctrl[0]++;
840 }
841 }else{
842 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
843 printk(KERN_WARNING "%s: skb too small (%d).\n",
844 ieee->dev->name, skb->len);
845 goto success;
846 }
847
848 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
849 if(!txb){
850 printk(KERN_WARNING "%s: Could not allocate TXB\n",
851 ieee->dev->name);
852 goto failed;
853 }
854
855 txb->encrypted = 0;
856 txb->payload_size = skb->len;
857 memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
858 }
859
860 success:
861
862 if (txb)
863 {
864 cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
865 tcb_desc->bTxEnableFwCalcDur = 1;
866 if (is_multicast_ether_addr(header.addr1))
867 tcb_desc->bMulticast = 1;
868 if (is_broadcast_ether_addr(header.addr1))
869 tcb_desc->bBroadcast = 1;
870 ieee80211_txrate_selectmode(ieee, tcb_desc);
871 if ( tcb_desc->bMulticast || tcb_desc->bBroadcast)
872 tcb_desc->data_rate = ieee->basic_rate;
873 else
874
875 tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
876 ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
877 ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
878 ieee80211_query_HTCapShortGI(ieee, tcb_desc);
879 ieee80211_query_BandwidthMode(ieee, tcb_desc);
880 ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
881 ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
882
883
884 }
885 spin_unlock_irqrestore(&ieee->lock, flags);
886 dev_kfree_skb_any(skb);
887 if (txb) {
888 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
889 ieee80211_softmac_xmit(txb, ieee);
890 }else{
891 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
892 stats->tx_packets++;
893 stats->tx_bytes += txb->payload_size;
894 return 0;
895 }
896 ieee80211_txb_free(txb);
897 }
898 }
899
900 return 0;
901
902 failed:
903 spin_unlock_irqrestore(&ieee->lock, flags);
904 netif_stop_queue(dev);
905 stats->tx_errors++;
906 return 1;
907
908}
909
910EXPORT_SYMBOL(ieee80211_txb_free);
911