1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/compiler.h>
35#include <linux/errno.h>
36#include <linux/if_arp.h>
37#include <linux/in6.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/proc_fs.h>
45#include <linux/skbuff.h>
46#include <linux/slab.h>
47#include <linux/tcp.h>
48#include <linux/types.h>
49#include <linux/version.h>
50#include <linux/wireless.h>
51#include <linux/etherdevice.h>
52#include <asm/uaccess.h>
53#include <linux/if_vlan.h>
54
55#include "ieee80211.h"
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
156static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
157
158static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
159{
160 struct ieee80211_snap_hdr *snap;
161 u8 *oui;
162
163 snap = (struct ieee80211_snap_hdr *)data;
164 snap->dsap = 0xaa;
165 snap->ssap = 0xaa;
166 snap->ctrl = 0x03;
167
168 if (h_proto == 0x8137 || h_proto == 0x80f3)
169 oui = P802_1H_OUI;
170 else
171 oui = RFC1042_OUI;
172 snap->oui[0] = oui[0];
173 snap->oui[1] = oui[1];
174 snap->oui[2] = oui[2];
175
176 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
177
178 return SNAP_SIZE + sizeof(u16);
179}
180
181int ieee80211_encrypt_fragment(
182 struct ieee80211_device *ieee,
183 struct sk_buff *frag,
184 int hdr_len)
185{
186 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
187 int res;
188
189 if (!(crypt && crypt->ops))
190 {
191 printk("=========>%s(), crypt is null\n", __FUNCTION__);
192 return -1;
193 }
194#ifdef CONFIG_IEEE80211_CRYPT_TKIP
195 struct ieee80211_hdr *header;
196
197 if (ieee->tkip_countermeasures &&
198 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
199 header = (struct ieee80211_hdr *) frag->data;
200 if (net_ratelimit()) {
201 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
202 "TX packet to %pM\n",
203 ieee->dev->name, header->addr1);
204 }
205 return -1;
206 }
207#endif
208
209
210
211
212
213
214 atomic_inc(&crypt->refcnt);
215 res = 0;
216 if (crypt->ops->encrypt_msdu)
217 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
218 if (res == 0 && crypt->ops->encrypt_mpdu)
219 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
220
221 atomic_dec(&crypt->refcnt);
222 if (res < 0) {
223 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
224 ieee->dev->name, frag->len);
225 ieee->ieee_stats.tx_discards++;
226 return -1;
227 }
228
229 return 0;
230}
231
232
233void ieee80211_txb_free(struct ieee80211_txb *txb) {
234 if (unlikely(!txb))
235 return;
236 kfree(txb);
237}
238
239struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
240 int gfp_mask)
241{
242 struct ieee80211_txb *txb;
243 int i;
244 txb = kmalloc(
245 sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
246 gfp_mask);
247 if (!txb)
248 return NULL;
249
250 memset(txb, 0, sizeof(struct ieee80211_txb));
251 txb->nr_frags = nr_frags;
252 txb->frag_size = txb_size;
253
254 for (i = 0; i < nr_frags; i++) {
255 txb->fragments[i] = dev_alloc_skb(txb_size);
256 if (unlikely(!txb->fragments[i])) {
257 i--;
258 break;
259 }
260 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
261 }
262 if (unlikely(i != nr_frags)) {
263 while (i >= 0)
264 dev_kfree_skb_any(txb->fragments[i--]);
265 kfree(txb);
266 return NULL;
267 }
268 return txb;
269}
270
271
272
273static int
274ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
275{
276 struct ethhdr *eth;
277 struct iphdr *ip;
278 eth = (struct ethhdr *)skb->data;
279 if (eth->h_proto != htons(ETH_P_IP))
280 return 0;
281
282 ip = ip_hdr(skb);
283 switch (ip->tos & 0xfc) {
284 case 0x20:
285 return 2;
286 case 0x40:
287 return 1;
288 case 0x60:
289 return 3;
290 case 0x80:
291 return 4;
292 case 0xa0:
293 return 5;
294 case 0xc0:
295 return 6;
296 case 0xe0:
297 return 7;
298 default:
299 return 0;
300 }
301}
302
303#define SN_LESS(a, b) (((a-b)&0x800)!=0)
304void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc)
305{
306 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
307 PTX_TS_RECORD pTxTs = NULL;
308 struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data;
309
310 if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
311 return;
312 if (!IsQoSDataFrame(skb->data))
313 return;
314
315 if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1))
316 return;
317
318#ifdef TO_DO_LIST
319 if(pTcb->PacketLength >= 4096)
320 return;
321
322 if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
323 return;
324#endif
325
326 if(tcb_desc->bdhcp)
327 {
328 return;
329 }
330
331
332#if 1
333 if (!ieee->GetNmodeSupportBySecCfg(ieee))
334 {
335 return;
336 }
337#endif
338 if(pHTInfo->bCurrentAMPDUEnable)
339 {
340 if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
341 {
342 printk("===>can't get TS\n");
343 return;
344 }
345 if (pTxTs->TxAdmittedBARecord.bValid == false)
346 {
347
348 if (ieee->wpa_ie_len && (ieee->pairwise_key_type == KEY_TYPE_NA))
349 ;
350 else
351 TsStartAddBaProcess(ieee, pTxTs);
352 goto FORCED_AGG_SETTING;
353 }
354 else if (pTxTs->bUsingBa == false)
355 {
356 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
357 pTxTs->bUsingBa = true;
358 else
359 goto FORCED_AGG_SETTING;
360 }
361
362 if (ieee->iw_mode == IW_MODE_INFRA)
363 {
364 tcb_desc->bAMPDUEnable = true;
365 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
366 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
367 }
368 }
369FORCED_AGG_SETTING:
370 switch(pHTInfo->ForcedAMPDUMode )
371 {
372 case HT_AGG_AUTO:
373 break;
374
375 case HT_AGG_FORCE_ENABLE:
376 tcb_desc->bAMPDUEnable = true;
377 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
378 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
379 break;
380
381 case HT_AGG_FORCE_DISABLE:
382 tcb_desc->bAMPDUEnable = false;
383 tcb_desc->ampdu_density = 0;
384 tcb_desc->ampdu_factor = 0;
385 break;
386
387 }
388 return;
389}
390
391extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
392{
393 tcb_desc->bUseShortPreamble = false;
394 if (tcb_desc->data_rate == 2)
395 {
396 return;
397 }
398 else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
399 {
400 tcb_desc->bUseShortPreamble = true;
401 }
402 return;
403}
404extern void
405ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
406{
407 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
408
409 tcb_desc->bUseShortGI = false;
410
411 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
412 return;
413
414 if(pHTInfo->bForcedShortGI)
415 {
416 tcb_desc->bUseShortGI = true;
417 return;
418 }
419
420 if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
421 tcb_desc->bUseShortGI = true;
422 else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
423 tcb_desc->bUseShortGI = true;
424}
425
426void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc)
427{
428 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
429
430 tcb_desc->bPacketBW = false;
431
432 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
433 return;
434
435 if(tcb_desc->bMulticast || tcb_desc->bBroadcast)
436 return;
437
438 if((tcb_desc->data_rate & 0x80)==0)
439 return;
440
441 if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
442 tcb_desc->bPacketBW = true;
443 return;
444}
445
446void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb)
447{
448
449 tcb_desc->bRTSSTBC = false;
450 tcb_desc->bRTSUseShortGI = false;
451 tcb_desc->bCTSEnable = false;
452 tcb_desc->RTSSC = 0;
453 tcb_desc->bRTSBW = false;
454
455 if(tcb_desc->bBroadcast || tcb_desc->bMulticast)
456 return;
457
458 if (is_broadcast_ether_addr(skb->data+16))
459 return;
460
461 if (ieee->mode < IEEE_N_24G)
462 {
463
464
465
466
467 if (skb->len > ieee->rts)
468 {
469 tcb_desc->bRTSEnable = true;
470 tcb_desc->rts_rate = MGN_24M;
471 }
472 else if (ieee->current_network.buseprotection)
473 {
474
475 tcb_desc->bRTSEnable = true;
476 tcb_desc->bCTSEnable = true;
477 tcb_desc->rts_rate = MGN_24M;
478 }
479
480 return;
481 }
482 else
483 {
484 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
485 while (true)
486 {
487
488 if (ieee->current_network.buseprotection)
489 {
490 tcb_desc->bRTSEnable = true;
491 tcb_desc->bCTSEnable = true;
492 tcb_desc->rts_rate = MGN_24M;
493 break;
494 }
495
496 if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
497 {
498 u8 HTOpMode = pHTInfo->CurrentOpMode;
499 if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
500 (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
501 {
502 tcb_desc->rts_rate = MGN_24M;
503 tcb_desc->bRTSEnable = true;
504 break;
505 }
506 }
507
508 if (skb->len > ieee->rts)
509 {
510 tcb_desc->rts_rate = MGN_24M;
511 tcb_desc->bRTSEnable = true;
512 break;
513 }
514
515
516 if(tcb_desc->bAMPDUEnable)
517 {
518 tcb_desc->rts_rate = MGN_24M;
519
520
521 tcb_desc->bRTSEnable = false;
522 break;
523 }
524
525 if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
526 {
527 tcb_desc->bCTSEnable = true;
528 tcb_desc->rts_rate = MGN_24M;
529 tcb_desc->bRTSEnable = true;
530 break;
531 }
532
533 goto NO_PROTECTION;
534 }
535 }
536
537 if( 0 )
538 {
539 tcb_desc->bCTSEnable = true;
540 tcb_desc->rts_rate = MGN_24M;
541 tcb_desc->bRTSEnable = true;
542 }
543 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
544 tcb_desc->bUseShortPreamble = true;
545 if (ieee->mode == IW_MODE_MASTER)
546 goto NO_PROTECTION;
547 return;
548NO_PROTECTION:
549 tcb_desc->bRTSEnable = false;
550 tcb_desc->bCTSEnable = false;
551 tcb_desc->rts_rate = 0;
552 tcb_desc->RTSSC = 0;
553 tcb_desc->bRTSBW = false;
554}
555
556
557void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
558{
559#ifdef TO_DO_LIST
560 if(!IsDataFrame(pFrame))
561 {
562 pTcb->bTxDisableRateFallBack = TRUE;
563 pTcb->bTxUseDriverAssingedRate = TRUE;
564 pTcb->RATRIndex = 7;
565 return;
566 }
567
568 if(pMgntInfo->ForcedDataRate!= 0)
569 {
570 pTcb->bTxDisableRateFallBack = TRUE;
571 pTcb->bTxUseDriverAssingedRate = TRUE;
572 return;
573 }
574#endif
575 if(ieee->bTxDisableRateFallBack)
576 tcb_desc->bTxDisableRateFallBack = true;
577
578 if(ieee->bTxUseDriverAssingedRate)
579 tcb_desc->bTxUseDriverAssingedRate = true;
580 if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
581 {
582 if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
583 tcb_desc->RATRIndex = 0;
584 }
585}
586
587void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst)
588{
589 if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
590 return;
591 if (IsQoSDataFrame(skb->data))
592 {
593 PTX_TS_RECORD pTS = NULL;
594 if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true))
595 {
596 return;
597 }
598 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
599 }
600}
601
602int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
603{
604 struct ieee80211_device *ieee = netdev_priv(dev);
605 struct ieee80211_txb *txb = NULL;
606 struct ieee80211_hdr_3addrqos *frag_hdr;
607 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
608 unsigned long flags;
609 struct net_device_stats *stats = &ieee->stats;
610 int ether_type = 0, encrypt;
611 int bytes, fc, qos_ctl = 0, hdr_len;
612 struct sk_buff *skb_frag;
613 struct ieee80211_hdr_3addrqos header = {
614 .duration_id = 0,
615 .seq_ctl = 0,
616 .qos_ctl = 0
617 };
618 u8 dest[ETH_ALEN], src[ETH_ALEN];
619 int qos_actived = ieee->current_network.qos_data.active;
620
621 struct ieee80211_crypt_data* crypt;
622 bool bdhcp =false;
623
624 cb_desc *tcb_desc;
625
626 spin_lock_irqsave(&ieee->lock, flags);
627
628
629
630 if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
631 ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
632 printk(KERN_WARNING "%s: No xmit handler.\n",
633 ieee->dev->name);
634 goto success;
635 }
636
637
638 if(likely(ieee->raw_tx == 0)){
639 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
640 printk(KERN_WARNING "%s: skb too small (%d).\n",
641 ieee->dev->name, skb->len);
642 goto success;
643 }
644
645 memset(skb->cb, 0, sizeof(skb->cb));
646 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
647
648 crypt = ieee->crypt[ieee->tx_keyidx];
649
650 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
651 ieee->host_encrypt && crypt && crypt->ops;
652
653 if (!encrypt && ieee->ieee802_1x &&
654 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
655 stats->tx_dropped++;
656 goto success;
657 }
658 #ifdef CONFIG_IEEE80211_DEBUG
659 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
660 struct eapol *eap = (struct eapol *)(skb->data +
661 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
662 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
663 eap_get_type(eap->type));
664 }
665 #endif
666
667
668
669 if (skb->len > 282){
670 if (ETH_P_IP == ether_type) {
671 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
672 if (IPPROTO_UDP == ip->protocol) {
673 struct udphdr *udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
674 if(((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) ||
675 ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) {
676
677
678 printk("DHCP pkt src port:%d, dest port:%d!!\n", ((u8 *)udp)[1],((u8 *)udp)[3]);
679
680 bdhcp = true;
681#ifdef _RTL8192_EXT_PATCH_
682 ieee->LPSDelayCnt = 100;
683#else
684 ieee->LPSDelayCnt = 100;
685#endif
686 }
687 }
688 }else if(ETH_P_ARP == ether_type){
689 printk("=================>DHCP Protocol start tx ARP pkt!!\n");
690 bdhcp = true;
691 ieee->LPSDelayCnt = ieee->current_network.tim.tim_count;
692
693 }
694 }
695
696
697 memcpy(&dest, skb->data, ETH_ALEN);
698 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
699
700
701 skb_pull(skb, sizeof(struct ethhdr));
702
703
704 bytes = skb->len + SNAP_SIZE + sizeof(u16);
705
706 if (encrypt)
707 fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
708 else
709
710 fc = IEEE80211_FTYPE_DATA;
711
712 if(qos_actived)
713 fc |= IEEE80211_STYPE_QOS_DATA;
714 else
715 fc |= IEEE80211_STYPE_DATA;
716
717 if (ieee->iw_mode == IW_MODE_INFRA) {
718 fc |= IEEE80211_FCTL_TODS;
719
720
721 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
722 memcpy(&header.addr2, &src, ETH_ALEN);
723 memcpy(&header.addr3, &dest, ETH_ALEN);
724 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
725
726
727 memcpy(&header.addr1, dest, ETH_ALEN);
728 memcpy(&header.addr2, src, ETH_ALEN);
729 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
730 }
731
732 header.frame_ctl = cpu_to_le16(fc);
733
734
735
736 if (is_multicast_ether_addr(header.addr1) ||
737 is_broadcast_ether_addr(header.addr1)) {
738 frag_size = MAX_FRAG_THRESHOLD;
739 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
740 }
741 else {
742 frag_size = ieee->fts;
743 qos_ctl = 0;
744 }
745
746 if(qos_actived)
747 {
748 hdr_len = IEEE80211_3ADDR_LEN + 2;
749
750 skb->priority = ieee80211_classify(skb, &ieee->current_network);
751 qos_ctl |= skb->priority;
752 header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
753 } else {
754 hdr_len = IEEE80211_3ADDR_LEN;
755 }
756
757
758
759
760 bytes_per_frag = frag_size - hdr_len;
761 if (ieee->config &
762 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
763 bytes_per_frag -= IEEE80211_FCS_LEN;
764
765
766 if (encrypt)
767 bytes_per_frag -= crypt->ops->extra_prefix_len +
768 crypt->ops->extra_postfix_len;
769
770
771
772 nr_frags = bytes / bytes_per_frag;
773 bytes_last_frag = bytes % bytes_per_frag;
774 if (bytes_last_frag)
775 nr_frags++;
776 else
777 bytes_last_frag = bytes_per_frag;
778
779
780
781
782 txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
783 if (unlikely(!txb)) {
784 printk(KERN_WARNING "%s: Could not allocate TXB\n",
785 ieee->dev->name);
786 goto failed;
787 }
788 txb->encrypted = encrypt;
789 txb->payload_size = bytes;
790
791 if(qos_actived)
792 {
793 txb->queue_index = UP2AC(skb->priority);
794 } else {
795 txb->queue_index = WME_AC_BK;
796 }
797
798
799
800 for (i = 0; i < nr_frags; i++) {
801 skb_frag = txb->fragments[i];
802 tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
803 if(qos_actived){
804 skb_frag->priority = skb->priority;
805 tcb_desc->queue_index = UP2AC(skb->priority);
806 } else {
807 skb_frag->priority = WME_AC_BK;
808 tcb_desc->queue_index = WME_AC_BK;
809 }
810 skb_reserve(skb_frag, ieee->tx_headroom);
811
812 if (encrypt){
813 if (ieee->hwsec_active)
814 tcb_desc->bHwSec = 1;
815 else
816 tcb_desc->bHwSec = 0;
817 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
818 }
819 else
820 {
821 tcb_desc->bHwSec = 0;
822 }
823 frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
824 memcpy(frag_hdr, &header, hdr_len);
825
826
827
828 if (i != nr_frags - 1) {
829 frag_hdr->frame_ctl = cpu_to_le16(
830 fc | IEEE80211_FCTL_MOREFRAGS);
831 bytes = bytes_per_frag;
832
833 } else {
834
835 bytes = bytes_last_frag;
836 }
837
838 if(qos_actived)
839 {
840
841 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
842 } else {
843 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
844 }
845
846
847 if (i == 0) {
848 ieee80211_put_snap(
849 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
850 ether_type);
851 bytes -= SNAP_SIZE + sizeof(u16);
852 }
853
854 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
855
856
857 skb_pull(skb, bytes);
858
859
860
861 if (encrypt)
862 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
863 if (ieee->config &
864 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
865 skb_put(skb_frag, 4);
866 }
867
868 if(qos_actived)
869 {
870 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
871 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
872 else
873 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
874 } else {
875 if (ieee->seq_ctrl[0] == 0xFFF)
876 ieee->seq_ctrl[0] = 0;
877 else
878 ieee->seq_ctrl[0]++;
879 }
880 }else{
881 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
882 printk(KERN_WARNING "%s: skb too small (%d).\n",
883 ieee->dev->name, skb->len);
884 goto success;
885 }
886
887 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
888 if(!txb){
889 printk(KERN_WARNING "%s: Could not allocate TXB\n",
890 ieee->dev->name);
891 goto failed;
892 }
893
894 txb->encrypted = 0;
895 txb->payload_size = skb->len;
896 memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
897 }
898
899 success:
900
901 if (txb)
902 {
903 cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
904 tcb_desc->bTxEnableFwCalcDur = 1;
905 if (is_multicast_ether_addr(header.addr1))
906 tcb_desc->bMulticast = 1;
907 if (is_broadcast_ether_addr(header.addr1))
908 tcb_desc->bBroadcast = 1;
909 ieee80211_txrate_selectmode(ieee, tcb_desc);
910 if ( tcb_desc->bMulticast || tcb_desc->bBroadcast)
911 tcb_desc->data_rate = ieee->basic_rate;
912 else
913 tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
914
915 if(bdhcp == true){
916 tcb_desc->data_rate = MGN_1M;
917 tcb_desc->bTxDisableRateFallBack = 1;
918
919 tcb_desc->RATRIndex = 7;
920 tcb_desc->bTxUseDriverAssingedRate = 1;
921 tcb_desc->bdhcp = 1;
922 }
923
924
925 ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
926 ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
927 ieee80211_query_HTCapShortGI(ieee, tcb_desc);
928 ieee80211_query_BandwidthMode(ieee, tcb_desc);
929 ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
930 ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
931 }
932 spin_unlock_irqrestore(&ieee->lock, flags);
933 dev_kfree_skb_any(skb);
934 if (txb) {
935 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
936 ieee80211_softmac_xmit(txb, ieee);
937 }else{
938 if ((*ieee->hard_start_xmit)(txb, ieee) == 0) {
939 stats->tx_packets++;
940 stats->tx_bytes += txb->payload_size;
941 return 0;
942 }
943 ieee80211_txb_free(txb);
944 }
945 }
946
947 return 0;
948
949 failed:
950 spin_unlock_irqrestore(&ieee->lock, flags);
951 netif_stop_queue(dev);
952 stats->tx_errors++;
953 return 1;
954
955}
956
957