1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/compiler.h>
35#include <linux/errno.h>
36#include <linux/if_arp.h>
37#include <linux/in6.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/proc_fs.h>
45#include <linux/skbuff.h>
46#include <linux/slab.h>
47#include <linux/tcp.h>
48#include <linux/types.h>
49#include <linux/wireless.h>
50#include <linux/etherdevice.h>
51#include <asm/uaccess.h>
52#include <linux/if_vlan.h>
53
54#include "ieee80211.h"
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
155static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
156
157static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
158{
159 struct ieee80211_snap_hdr *snap;
160 u8 *oui;
161
162 snap = (struct ieee80211_snap_hdr *)data;
163 snap->dsap = 0xaa;
164 snap->ssap = 0xaa;
165 snap->ctrl = 0x03;
166
167 if (h_proto == 0x8137 || h_proto == 0x80f3)
168 oui = P802_1H_OUI;
169 else
170 oui = RFC1042_OUI;
171 snap->oui[0] = oui[0];
172 snap->oui[1] = oui[1];
173 snap->oui[2] = oui[2];
174
175 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
176
177 return SNAP_SIZE + sizeof(u16);
178}
179
180int ieee80211_encrypt_fragment(
181 struct ieee80211_device *ieee,
182 struct sk_buff *frag,
183 int hdr_len)
184{
185 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
186 int res;
187
188 if (!(crypt && crypt->ops))
189 {
190 printk("=========>%s(), crypt is null\n", __FUNCTION__);
191 return -1;
192 }
193#ifdef CONFIG_IEEE80211_CRYPT_TKIP
194 struct ieee80211_hdr *header;
195
196 if (ieee->tkip_countermeasures &&
197 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
198 header = (struct ieee80211_hdr *) frag->data;
199 if (net_ratelimit()) {
200 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
201 "TX packet to %pM\n",
202 ieee->dev->name, header->addr1);
203 }
204 return -1;
205 }
206#endif
207
208
209
210
211
212
213 atomic_inc(&crypt->refcnt);
214 res = 0;
215 if (crypt->ops->encrypt_msdu)
216 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
217 if (res == 0 && crypt->ops->encrypt_mpdu)
218 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
219
220 atomic_dec(&crypt->refcnt);
221 if (res < 0) {
222 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
223 ieee->dev->name, frag->len);
224 ieee->ieee_stats.tx_discards++;
225 return -1;
226 }
227
228 return 0;
229}
230
231
232void ieee80211_txb_free(struct ieee80211_txb *txb) {
233 if (unlikely(!txb))
234 return;
235 kfree(txb);
236}
237
238struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
239 int gfp_mask)
240{
241 struct ieee80211_txb *txb;
242 int i;
243 txb = kmalloc(
244 sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
245 gfp_mask);
246 if (!txb)
247 return NULL;
248
249 memset(txb, 0, sizeof(struct ieee80211_txb));
250 txb->nr_frags = nr_frags;
251 txb->frag_size = txb_size;
252
253 for (i = 0; i < nr_frags; i++) {
254 txb->fragments[i] = dev_alloc_skb(txb_size);
255 if (unlikely(!txb->fragments[i])) {
256 i--;
257 break;
258 }
259 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
260 }
261 if (unlikely(i != nr_frags)) {
262 while (i >= 0)
263 dev_kfree_skb_any(txb->fragments[i--]);
264 kfree(txb);
265 return NULL;
266 }
267 return txb;
268}
269
270
271
272static int
273ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
274{
275 struct ethhdr *eth;
276 struct iphdr *ip;
277 eth = (struct ethhdr *)skb->data;
278 if (eth->h_proto != htons(ETH_P_IP))
279 return 0;
280
281 ip = ip_hdr(skb);
282 switch (ip->tos & 0xfc) {
283 case 0x20:
284 return 2;
285 case 0x40:
286 return 1;
287 case 0x60:
288 return 3;
289 case 0x80:
290 return 4;
291 case 0xa0:
292 return 5;
293 case 0xc0:
294 return 6;
295 case 0xe0:
296 return 7;
297 default:
298 return 0;
299 }
300}
301
302#define SN_LESS(a, b) (((a-b)&0x800)!=0)
303void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc)
304{
305 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
306 PTX_TS_RECORD pTxTs = NULL;
307 struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data;
308
309 if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
310 return;
311 if (!IsQoSDataFrame(skb->data))
312 return;
313
314 if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1))
315 return;
316
317#ifdef TO_DO_LIST
318 if(pTcb->PacketLength >= 4096)
319 return;
320
321 if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
322 return;
323#endif
324
325 if(tcb_desc->bdhcp)
326 {
327 return;
328 }
329
330
331#if 1
332 if (!ieee->GetNmodeSupportBySecCfg(ieee))
333 {
334 return;
335 }
336#endif
337 if(pHTInfo->bCurrentAMPDUEnable)
338 {
339 if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
340 {
341 printk("===>can't get TS\n");
342 return;
343 }
344 if (pTxTs->TxAdmittedBARecord.bValid == false)
345 {
346
347 if (ieee->wpa_ie_len && (ieee->pairwise_key_type == KEY_TYPE_NA))
348 ;
349 else
350 TsStartAddBaProcess(ieee, pTxTs);
351 goto FORCED_AGG_SETTING;
352 }
353 else if (pTxTs->bUsingBa == false)
354 {
355 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
356 pTxTs->bUsingBa = true;
357 else
358 goto FORCED_AGG_SETTING;
359 }
360
361 if (ieee->iw_mode == IW_MODE_INFRA)
362 {
363 tcb_desc->bAMPDUEnable = true;
364 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
365 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
366 }
367 }
368FORCED_AGG_SETTING:
369 switch(pHTInfo->ForcedAMPDUMode )
370 {
371 case HT_AGG_AUTO:
372 break;
373
374 case HT_AGG_FORCE_ENABLE:
375 tcb_desc->bAMPDUEnable = true;
376 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
377 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
378 break;
379
380 case HT_AGG_FORCE_DISABLE:
381 tcb_desc->bAMPDUEnable = false;
382 tcb_desc->ampdu_density = 0;
383 tcb_desc->ampdu_factor = 0;
384 break;
385
386 }
387 return;
388}
389
390extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
391{
392 tcb_desc->bUseShortPreamble = false;
393 if (tcb_desc->data_rate == 2)
394 {
395 return;
396 }
397 else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
398 {
399 tcb_desc->bUseShortPreamble = true;
400 }
401 return;
402}
403extern void
404ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
405{
406 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
407
408 tcb_desc->bUseShortGI = false;
409
410 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
411 return;
412
413 if(pHTInfo->bForcedShortGI)
414 {
415 tcb_desc->bUseShortGI = true;
416 return;
417 }
418
419 if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
420 tcb_desc->bUseShortGI = true;
421 else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
422 tcb_desc->bUseShortGI = true;
423}
424
425void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc)
426{
427 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
428
429 tcb_desc->bPacketBW = false;
430
431 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
432 return;
433
434 if(tcb_desc->bMulticast || tcb_desc->bBroadcast)
435 return;
436
437 if((tcb_desc->data_rate & 0x80)==0)
438 return;
439
440 if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
441 tcb_desc->bPacketBW = true;
442 return;
443}
444
445void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb)
446{
447
448 tcb_desc->bRTSSTBC = false;
449 tcb_desc->bRTSUseShortGI = false;
450 tcb_desc->bCTSEnable = false;
451 tcb_desc->RTSSC = 0;
452 tcb_desc->bRTSBW = false;
453
454 if(tcb_desc->bBroadcast || tcb_desc->bMulticast)
455 return;
456
457 if (is_broadcast_ether_addr(skb->data+16))
458 return;
459
460 if (ieee->mode < IEEE_N_24G)
461 {
462
463
464
465
466 if (skb->len > ieee->rts)
467 {
468 tcb_desc->bRTSEnable = true;
469 tcb_desc->rts_rate = MGN_24M;
470 }
471 else if (ieee->current_network.buseprotection)
472 {
473
474 tcb_desc->bRTSEnable = true;
475 tcb_desc->bCTSEnable = true;
476 tcb_desc->rts_rate = MGN_24M;
477 }
478
479 return;
480 }
481 else
482 {
483 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
484 while (true)
485 {
486
487 if (ieee->current_network.buseprotection)
488 {
489 tcb_desc->bRTSEnable = true;
490 tcb_desc->bCTSEnable = true;
491 tcb_desc->rts_rate = MGN_24M;
492 break;
493 }
494
495 if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
496 {
497 u8 HTOpMode = pHTInfo->CurrentOpMode;
498 if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
499 (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
500 {
501 tcb_desc->rts_rate = MGN_24M;
502 tcb_desc->bRTSEnable = true;
503 break;
504 }
505 }
506
507 if (skb->len > ieee->rts)
508 {
509 tcb_desc->rts_rate = MGN_24M;
510 tcb_desc->bRTSEnable = true;
511 break;
512 }
513
514
515 if(tcb_desc->bAMPDUEnable)
516 {
517 tcb_desc->rts_rate = MGN_24M;
518
519
520 tcb_desc->bRTSEnable = false;
521 break;
522 }
523
524 if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
525 {
526 tcb_desc->bCTSEnable = true;
527 tcb_desc->rts_rate = MGN_24M;
528 tcb_desc->bRTSEnable = true;
529 break;
530 }
531
532 goto NO_PROTECTION;
533 }
534 }
535
536 if( 0 )
537 {
538 tcb_desc->bCTSEnable = true;
539 tcb_desc->rts_rate = MGN_24M;
540 tcb_desc->bRTSEnable = true;
541 }
542 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
543 tcb_desc->bUseShortPreamble = true;
544 if (ieee->mode == IW_MODE_MASTER)
545 goto NO_PROTECTION;
546 return;
547NO_PROTECTION:
548 tcb_desc->bRTSEnable = false;
549 tcb_desc->bCTSEnable = false;
550 tcb_desc->rts_rate = 0;
551 tcb_desc->RTSSC = 0;
552 tcb_desc->bRTSBW = false;
553}
554
555
556void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
557{
558#ifdef TO_DO_LIST
559 if(!IsDataFrame(pFrame))
560 {
561 pTcb->bTxDisableRateFallBack = TRUE;
562 pTcb->bTxUseDriverAssingedRate = TRUE;
563 pTcb->RATRIndex = 7;
564 return;
565 }
566
567 if(pMgntInfo->ForcedDataRate!= 0)
568 {
569 pTcb->bTxDisableRateFallBack = TRUE;
570 pTcb->bTxUseDriverAssingedRate = TRUE;
571 return;
572 }
573#endif
574 if(ieee->bTxDisableRateFallBack)
575 tcb_desc->bTxDisableRateFallBack = true;
576
577 if(ieee->bTxUseDriverAssingedRate)
578 tcb_desc->bTxUseDriverAssingedRate = true;
579 if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
580 {
581 if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
582 tcb_desc->RATRIndex = 0;
583 }
584}
585
586void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst)
587{
588 if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
589 return;
590 if (IsQoSDataFrame(skb->data))
591 {
592 PTX_TS_RECORD pTS = NULL;
593 if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true))
594 {
595 return;
596 }
597 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
598 }
599}
600
601int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
602{
603 struct ieee80211_device *ieee = netdev_priv(dev);
604 struct ieee80211_txb *txb = NULL;
605 struct ieee80211_hdr_3addrqos *frag_hdr;
606 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
607 unsigned long flags;
608 struct net_device_stats *stats = &ieee->stats;
609 int ether_type = 0, encrypt;
610 int bytes, fc, qos_ctl = 0, hdr_len;
611 struct sk_buff *skb_frag;
612 struct ieee80211_hdr_3addrqos header = {
613 .duration_id = 0,
614 .seq_ctl = 0,
615 .qos_ctl = 0
616 };
617 u8 dest[ETH_ALEN], src[ETH_ALEN];
618 int qos_actived = ieee->current_network.qos_data.active;
619
620 struct ieee80211_crypt_data* crypt;
621 bool bdhcp =false;
622
623 cb_desc *tcb_desc;
624
625 spin_lock_irqsave(&ieee->lock, flags);
626
627
628
629 if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
630 ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
631 printk(KERN_WARNING "%s: No xmit handler.\n",
632 ieee->dev->name);
633 goto success;
634 }
635
636
637 if(likely(ieee->raw_tx == 0)){
638 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
639 printk(KERN_WARNING "%s: skb too small (%d).\n",
640 ieee->dev->name, skb->len);
641 goto success;
642 }
643
644 memset(skb->cb, 0, sizeof(skb->cb));
645 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
646
647 crypt = ieee->crypt[ieee->tx_keyidx];
648
649 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
650 ieee->host_encrypt && crypt && crypt->ops;
651
652 if (!encrypt && ieee->ieee802_1x &&
653 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
654 stats->tx_dropped++;
655 goto success;
656 }
657 #ifdef CONFIG_IEEE80211_DEBUG
658 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
659 struct eapol *eap = (struct eapol *)(skb->data +
660 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
661 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
662 eap_get_type(eap->type));
663 }
664 #endif
665
666
667
668 if (skb->len > 282){
669 if (ETH_P_IP == ether_type) {
670 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
671 if (IPPROTO_UDP == ip->protocol) {
672 struct udphdr *udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
673 if(((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) ||
674 ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) {
675
676
677 printk("DHCP pkt src port:%d, dest port:%d!!\n", ((u8 *)udp)[1],((u8 *)udp)[3]);
678
679 bdhcp = true;
680#ifdef _RTL8192_EXT_PATCH_
681 ieee->LPSDelayCnt = 100;
682#else
683 ieee->LPSDelayCnt = 100;
684#endif
685 }
686 }
687 }else if(ETH_P_ARP == ether_type){
688 printk("=================>DHCP Protocol start tx ARP pkt!!\n");
689 bdhcp = true;
690 ieee->LPSDelayCnt = ieee->current_network.tim.tim_count;
691
692 }
693 }
694
695
696 memcpy(&dest, skb->data, ETH_ALEN);
697 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
698
699
700 skb_pull(skb, sizeof(struct ethhdr));
701
702
703 bytes = skb->len + SNAP_SIZE + sizeof(u16);
704
705 if (encrypt)
706 fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
707 else
708
709 fc = IEEE80211_FTYPE_DATA;
710
711 if(qos_actived)
712 fc |= IEEE80211_STYPE_QOS_DATA;
713 else
714 fc |= IEEE80211_STYPE_DATA;
715
716 if (ieee->iw_mode == IW_MODE_INFRA) {
717 fc |= IEEE80211_FCTL_TODS;
718
719
720 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
721 memcpy(&header.addr2, &src, ETH_ALEN);
722 memcpy(&header.addr3, &dest, ETH_ALEN);
723 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
724
725
726 memcpy(&header.addr1, dest, ETH_ALEN);
727 memcpy(&header.addr2, src, ETH_ALEN);
728 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
729 }
730
731 header.frame_ctl = cpu_to_le16(fc);
732
733
734
735 if (is_multicast_ether_addr(header.addr1) ||
736 is_broadcast_ether_addr(header.addr1)) {
737 frag_size = MAX_FRAG_THRESHOLD;
738 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
739 }
740 else {
741 frag_size = ieee->fts;
742 qos_ctl = 0;
743 }
744
745 if(qos_actived)
746 {
747 hdr_len = IEEE80211_3ADDR_LEN + 2;
748
749 skb->priority = ieee80211_classify(skb, &ieee->current_network);
750 qos_ctl |= skb->priority;
751 header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
752 } else {
753 hdr_len = IEEE80211_3ADDR_LEN;
754 }
755
756
757
758
759 bytes_per_frag = frag_size - hdr_len;
760 if (ieee->config &
761 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
762 bytes_per_frag -= IEEE80211_FCS_LEN;
763
764
765 if (encrypt)
766 bytes_per_frag -= crypt->ops->extra_prefix_len +
767 crypt->ops->extra_postfix_len;
768
769
770
771 nr_frags = bytes / bytes_per_frag;
772 bytes_last_frag = bytes % bytes_per_frag;
773 if (bytes_last_frag)
774 nr_frags++;
775 else
776 bytes_last_frag = bytes_per_frag;
777
778
779
780
781 txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
782 if (unlikely(!txb)) {
783 printk(KERN_WARNING "%s: Could not allocate TXB\n",
784 ieee->dev->name);
785 goto failed;
786 }
787 txb->encrypted = encrypt;
788 txb->payload_size = bytes;
789
790 if(qos_actived)
791 {
792 txb->queue_index = UP2AC(skb->priority);
793 } else {
794 txb->queue_index = WME_AC_BK;
795 }
796
797
798
799 for (i = 0; i < nr_frags; i++) {
800 skb_frag = txb->fragments[i];
801 tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
802 if(qos_actived){
803 skb_frag->priority = skb->priority;
804 tcb_desc->queue_index = UP2AC(skb->priority);
805 } else {
806 skb_frag->priority = WME_AC_BK;
807 tcb_desc->queue_index = WME_AC_BK;
808 }
809 skb_reserve(skb_frag, ieee->tx_headroom);
810
811 if (encrypt){
812 if (ieee->hwsec_active)
813 tcb_desc->bHwSec = 1;
814 else
815 tcb_desc->bHwSec = 0;
816 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
817 }
818 else
819 {
820 tcb_desc->bHwSec = 0;
821 }
822 frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
823 memcpy(frag_hdr, &header, hdr_len);
824
825
826
827 if (i != nr_frags - 1) {
828 frag_hdr->frame_ctl = cpu_to_le16(
829 fc | IEEE80211_FCTL_MOREFRAGS);
830 bytes = bytes_per_frag;
831
832 } else {
833
834 bytes = bytes_last_frag;
835 }
836
837 if(qos_actived)
838 {
839
840 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
841 } else {
842 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
843 }
844
845
846 if (i == 0) {
847 ieee80211_put_snap(
848 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
849 ether_type);
850 bytes -= SNAP_SIZE + sizeof(u16);
851 }
852
853 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
854
855
856 skb_pull(skb, bytes);
857
858
859
860 if (encrypt)
861 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
862 if (ieee->config &
863 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
864 skb_put(skb_frag, 4);
865 }
866
867 if(qos_actived)
868 {
869 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
870 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
871 else
872 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
873 } else {
874 if (ieee->seq_ctrl[0] == 0xFFF)
875 ieee->seq_ctrl[0] = 0;
876 else
877 ieee->seq_ctrl[0]++;
878 }
879 }else{
880 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
881 printk(KERN_WARNING "%s: skb too small (%d).\n",
882 ieee->dev->name, skb->len);
883 goto success;
884 }
885
886 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
887 if(!txb){
888 printk(KERN_WARNING "%s: Could not allocate TXB\n",
889 ieee->dev->name);
890 goto failed;
891 }
892
893 txb->encrypted = 0;
894 txb->payload_size = skb->len;
895 memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
896 }
897
898 success:
899
900 if (txb)
901 {
902 cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
903 tcb_desc->bTxEnableFwCalcDur = 1;
904 if (is_multicast_ether_addr(header.addr1))
905 tcb_desc->bMulticast = 1;
906 if (is_broadcast_ether_addr(header.addr1))
907 tcb_desc->bBroadcast = 1;
908 ieee80211_txrate_selectmode(ieee, tcb_desc);
909 if ( tcb_desc->bMulticast || tcb_desc->bBroadcast)
910 tcb_desc->data_rate = ieee->basic_rate;
911 else
912 tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
913
914 if(bdhcp == true){
915 tcb_desc->data_rate = MGN_1M;
916 tcb_desc->bTxDisableRateFallBack = 1;
917
918 tcb_desc->RATRIndex = 7;
919 tcb_desc->bTxUseDriverAssingedRate = 1;
920 tcb_desc->bdhcp = 1;
921 }
922
923
924 ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
925 ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
926 ieee80211_query_HTCapShortGI(ieee, tcb_desc);
927 ieee80211_query_BandwidthMode(ieee, tcb_desc);
928 ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
929 ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
930 }
931 spin_unlock_irqrestore(&ieee->lock, flags);
932 dev_kfree_skb_any(skb);
933 if (txb) {
934 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
935 ieee80211_softmac_xmit(txb, ieee);
936 }else{
937 if ((*ieee->hard_start_xmit)(txb, ieee) == 0) {
938 stats->tx_packets++;
939 stats->tx_bytes += txb->payload_size;
940 return 0;
941 }
942 ieee80211_txb_free(txb);
943 }
944 }
945
946 return 0;
947
948 failed:
949 spin_unlock_irqrestore(&ieee->lock, flags);
950 netif_stop_queue(dev);
951 stats->tx_errors++;
952 return 1;
953
954}
955
956