1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/compiler.h>
35
36#include <linux/errno.h>
37#include <linux/if_arp.h>
38#include <linux/in6.h>
39#include <linux/in.h>
40#include <linux/ip.h>
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/netdevice.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/skbuff.h>
47#include <linux/slab.h>
48#include <linux/tcp.h>
49#include <linux/types.h>
50#include <linux/version.h>
51#include <linux/wireless.h>
52#include <linux/etherdevice.h>
53#include <asm/uaccess.h>
54#include <linux/if_vlan.h>
55
56#include "ieee80211.h"
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
157static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
158
159static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
160{
161 struct ieee80211_snap_hdr *snap;
162 u8 *oui;
163
164 snap = (struct ieee80211_snap_hdr *)data;
165 snap->dsap = 0xaa;
166 snap->ssap = 0xaa;
167 snap->ctrl = 0x03;
168
169 if (h_proto == 0x8137 || h_proto == 0x80f3)
170 oui = P802_1H_OUI;
171 else
172 oui = RFC1042_OUI;
173 snap->oui[0] = oui[0];
174 snap->oui[1] = oui[1];
175 snap->oui[2] = oui[2];
176
177 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
178
179 return SNAP_SIZE + sizeof(u16);
180}
181
182int ieee80211_encrypt_fragment(
183 struct ieee80211_device *ieee,
184 struct sk_buff *frag,
185 int hdr_len)
186{
187 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
188 int res;
189
190 if (!(crypt && crypt->ops))
191 {
192 printk("=========>%s(), crypt is null\n", __FUNCTION__);
193 return -1;
194 }
195#ifdef CONFIG_IEEE80211_CRYPT_TKIP
196 struct rtl_ieee80211_hdr *header;
197
198 if (ieee->tkip_countermeasures &&
199 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
200 header = (struct rtl_ieee80211_hdr *)frag->data;
201 if (net_ratelimit()) {
202 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
203 "TX packet to " MAC_FMT "\n",
204 ieee->dev->name, MAC_ARG(header->addr1));
205 }
206 return -1;
207 }
208#endif
209
210
211
212
213
214
215 atomic_inc(&crypt->refcnt);
216 res = 0;
217 if (crypt->ops->encrypt_msdu)
218 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
219 if (res == 0 && crypt->ops->encrypt_mpdu)
220 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
221
222 atomic_dec(&crypt->refcnt);
223 if (res < 0) {
224 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
225 ieee->dev->name, frag->len);
226 ieee->ieee_stats.tx_discards++;
227 return -1;
228 }
229
230 return 0;
231}
232
233
234void ieee80211_txb_free(struct ieee80211_txb *txb) {
235
236 if (unlikely(!txb))
237 return;
238 kfree(txb);
239}
240
241struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
242 int gfp_mask)
243{
244 struct ieee80211_txb *txb;
245 int i;
246 txb = kmalloc(
247 sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
248 gfp_mask);
249 if (!txb)
250 return NULL;
251
252 memset(txb, 0, sizeof(struct ieee80211_txb));
253 txb->nr_frags = nr_frags;
254 txb->frag_size = txb_size;
255
256 for (i = 0; i < nr_frags; i++) {
257 txb->fragments[i] = dev_alloc_skb(txb_size);
258 if (unlikely(!txb->fragments[i])) {
259 i--;
260 break;
261 }
262 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
263 }
264 if (unlikely(i != nr_frags)) {
265 while (i >= 0)
266 dev_kfree_skb_any(txb->fragments[i--]);
267 kfree(txb);
268 return NULL;
269 }
270 return txb;
271}
272
273
274
275static int
276ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
277{
278 struct ethhdr *eth;
279 struct iphdr *ip;
280 eth = (struct ethhdr *)skb->data;
281 if (eth->h_proto != htons(ETH_P_IP))
282 return 0;
283
284
285 ip = ip_hdr(skb);
286
287 switch (ip->tos & 0xfc) {
288 case 0x20:
289 return 2;
290 case 0x40:
291 return 1;
292 case 0x60:
293 return 3;
294 case 0x80:
295 return 4;
296 case 0xa0:
297 return 5;
298 case 0xc0:
299 return 6;
300 case 0xe0:
301 return 7;
302 default:
303 return 0;
304 }
305}
306
307void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc)
308{
309 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
310 PTX_TS_RECORD pTxTs = NULL;
311 struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data;
312
313 if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
314 return;
315 if (!IsQoSDataFrame(skb->data))
316 return;
317
318 if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1))
319 return;
320
321#ifdef TO_DO_LIST
322 if(pTcb->PacketLength >= 4096)
323 return;
324
325 if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
326 return;
327#endif
328
329 if(pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
330 return;
331
332#if 1
333 if(!ieee->GetNmodeSupportBySecCfg(ieee->dev))
334 {
335 return;
336 }
337#endif
338 if(pHTInfo->bCurrentAMPDUEnable)
339 {
340 if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
341 {
342 printk("===>can't get TS\n");
343 return;
344 }
345 if (pTxTs->TxAdmittedBARecord.bValid == false)
346 {
347
348 if (ieee->wpa_ie_len && (ieee->pairwise_key_type == KEY_TYPE_NA))
349 ;
350 else
351 TsStartAddBaProcess(ieee, pTxTs);
352 goto FORCED_AGG_SETTING;
353 }
354 else if (pTxTs->bUsingBa == false)
355 {
356 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
357 pTxTs->bUsingBa = true;
358 else
359 goto FORCED_AGG_SETTING;
360 }
361
362 if (ieee->iw_mode == IW_MODE_INFRA)
363 {
364 tcb_desc->bAMPDUEnable = true;
365 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
366 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
367 }
368 }
369FORCED_AGG_SETTING:
370 switch(pHTInfo->ForcedAMPDUMode )
371 {
372 case HT_AGG_AUTO:
373 break;
374
375 case HT_AGG_FORCE_ENABLE:
376 tcb_desc->bAMPDUEnable = true;
377 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
378 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
379 break;
380
381 case HT_AGG_FORCE_DISABLE:
382 tcb_desc->bAMPDUEnable = false;
383 tcb_desc->ampdu_density = 0;
384 tcb_desc->ampdu_factor = 0;
385 break;
386
387 }
388 return;
389}
390
391extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
392{
393 tcb_desc->bUseShortPreamble = false;
394 if (tcb_desc->data_rate == 2)
395 {
396 return;
397 }
398 else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
399 {
400 tcb_desc->bUseShortPreamble = true;
401 }
402 return;
403}
404extern void
405ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
406{
407 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
408
409 tcb_desc->bUseShortGI = false;
410
411 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
412 return;
413
414 if(pHTInfo->bForcedShortGI)
415 {
416 tcb_desc->bUseShortGI = true;
417 return;
418 }
419
420 if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
421 tcb_desc->bUseShortGI = true;
422 else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
423 tcb_desc->bUseShortGI = true;
424}
425
426void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc)
427{
428 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
429
430 tcb_desc->bPacketBW = false;
431
432 if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
433 return;
434
435 if(tcb_desc->bMulticast || tcb_desc->bBroadcast)
436 return;
437
438 if((tcb_desc->data_rate & 0x80)==0)
439 return;
440
441 if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
442 tcb_desc->bPacketBW = true;
443 return;
444}
445
446void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb)
447{
448
449 tcb_desc->bRTSSTBC = false;
450 tcb_desc->bRTSUseShortGI = false;
451 tcb_desc->bCTSEnable = false;
452 tcb_desc->RTSSC = 0;
453 tcb_desc->bRTSBW = false;
454
455 if(tcb_desc->bBroadcast || tcb_desc->bMulticast)
456 return;
457
458 if (is_broadcast_ether_addr(skb->data+16))
459 return;
460
461 if (ieee->mode < IEEE_N_24G)
462 {
463
464
465
466
467 if (skb->len > ieee->rts)
468 {
469 tcb_desc->bRTSEnable = true;
470 tcb_desc->rts_rate = MGN_24M;
471 }
472 else if (ieee->current_network.buseprotection)
473 {
474
475 tcb_desc->bRTSEnable = true;
476 tcb_desc->bCTSEnable = true;
477 tcb_desc->rts_rate = MGN_24M;
478 }
479
480 return;
481 }
482 else
483 {
484 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
485 while (true)
486 {
487
488 if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
489 {
490 tcb_desc->bCTSEnable = true;
491 tcb_desc->rts_rate = MGN_24M;
492 tcb_desc->bRTSEnable = false;
493 break;
494 }
495 else if(pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS|HT_IOT_ACT_PURE_N_MODE))
496 {
497 tcb_desc->bRTSEnable = true;
498 tcb_desc->rts_rate = MGN_24M;
499 break;
500 }
501
502 if (ieee->current_network.buseprotection)
503 {
504 tcb_desc->bRTSEnable = true;
505 tcb_desc->bCTSEnable = true;
506 tcb_desc->rts_rate = MGN_24M;
507 break;
508 }
509
510 if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
511 {
512 u8 HTOpMode = pHTInfo->CurrentOpMode;
513 if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
514 (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
515 {
516 tcb_desc->rts_rate = MGN_24M;
517 tcb_desc->bRTSEnable = true;
518 break;
519 }
520 }
521
522 if (skb->len > ieee->rts)
523 {
524 tcb_desc->rts_rate = MGN_24M;
525 tcb_desc->bRTSEnable = true;
526 break;
527 }
528
529
530 if(tcb_desc->bAMPDUEnable)
531 {
532 tcb_desc->rts_rate = MGN_24M;
533
534
535 tcb_desc->bRTSEnable = false;
536 break;
537 }
538
539 goto NO_PROTECTION;
540 }
541 }
542
543 if( 0 )
544 {
545 tcb_desc->bCTSEnable = true;
546 tcb_desc->rts_rate = MGN_24M;
547 tcb_desc->bRTSEnable = true;
548 }
549 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
550 tcb_desc->bUseShortPreamble = true;
551 if (ieee->mode == IW_MODE_MASTER)
552 goto NO_PROTECTION;
553 return;
554NO_PROTECTION:
555 tcb_desc->bRTSEnable = false;
556 tcb_desc->bCTSEnable = false;
557 tcb_desc->rts_rate = 0;
558 tcb_desc->RTSSC = 0;
559 tcb_desc->bRTSBW = false;
560}
561
562
563void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
564{
565#ifdef TO_DO_LIST
566 if(!IsDataFrame(pFrame))
567 {
568 pTcb->bTxDisableRateFallBack = TRUE;
569 pTcb->bTxUseDriverAssingedRate = TRUE;
570 pTcb->RATRIndex = 7;
571 return;
572 }
573
574 if(pMgntInfo->ForcedDataRate!= 0)
575 {
576 pTcb->bTxDisableRateFallBack = TRUE;
577 pTcb->bTxUseDriverAssingedRate = TRUE;
578 return;
579 }
580#endif
581 if(ieee->bTxDisableRateFallBack)
582 tcb_desc->bTxDisableRateFallBack = true;
583
584 if(ieee->bTxUseDriverAssingedRate)
585 tcb_desc->bTxUseDriverAssingedRate = true;
586 if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
587 {
588 if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
589 tcb_desc->RATRIndex = 0;
590 }
591}
592
593void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst)
594{
595 if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
596 return;
597 if (IsQoSDataFrame(skb->data))
598 {
599 PTX_TS_RECORD pTS = NULL;
600 if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true))
601 {
602 return;
603 }
604 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
605 }
606}
607
608int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
609{
610 struct ieee80211_device *ieee = netdev_priv(dev);
611 struct ieee80211_txb *txb = NULL;
612 struct ieee80211_hdr_3addrqos *frag_hdr;
613 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
614 unsigned long flags;
615 struct net_device_stats *stats = &ieee->stats;
616 int ether_type = 0, encrypt;
617 int bytes, fc, qos_ctl = 0, hdr_len;
618 struct sk_buff *skb_frag;
619 struct ieee80211_hdr_3addrqos header = {
620 .duration_id = 0,
621 .seq_ctl = 0,
622 .qos_ctl = 0
623 };
624 u8 dest[ETH_ALEN], src[ETH_ALEN];
625 int qos_actived = ieee->current_network.qos_data.active;
626
627 struct ieee80211_crypt_data* crypt;
628
629 cb_desc *tcb_desc;
630
631 spin_lock_irqsave(&ieee->lock, flags);
632
633
634
635 if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
636 ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
637 printk(KERN_WARNING "%s: No xmit handler.\n",
638 ieee->dev->name);
639 goto success;
640 }
641
642
643 if(likely(ieee->raw_tx == 0)){
644 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
645 printk(KERN_WARNING "%s: skb too small (%d).\n",
646 ieee->dev->name, skb->len);
647 goto success;
648 }
649
650 memset(skb->cb, 0, sizeof(skb->cb));
651 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
652
653 crypt = ieee->crypt[ieee->tx_keyidx];
654
655 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
656 ieee->host_encrypt && crypt && crypt->ops;
657
658 if (!encrypt && ieee->ieee802_1x &&
659 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
660 stats->tx_dropped++;
661 goto success;
662 }
663 #ifdef CONFIG_IEEE80211_DEBUG
664 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
665 struct eapol *eap = (struct eapol *)(skb->data +
666 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
667 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
668 eap_get_type(eap->type));
669 }
670 #endif
671
672
673 memcpy(&dest, skb->data, ETH_ALEN);
674 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
675
676
677 skb_pull(skb, sizeof(struct ethhdr));
678
679
680 bytes = skb->len + SNAP_SIZE + sizeof(u16);
681
682 if (encrypt)
683 fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
684 else
685
686 fc = IEEE80211_FTYPE_DATA;
687
688
689 if(qos_actived)
690 fc |= IEEE80211_STYPE_QOS_DATA;
691 else
692 fc |= IEEE80211_STYPE_DATA;
693
694 if (ieee->iw_mode == IW_MODE_INFRA) {
695 fc |= IEEE80211_FCTL_TODS;
696
697
698 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
699 memcpy(&header.addr2, &src, ETH_ALEN);
700 memcpy(&header.addr3, &dest, ETH_ALEN);
701 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
702
703
704 memcpy(&header.addr1, dest, ETH_ALEN);
705 memcpy(&header.addr2, src, ETH_ALEN);
706 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
707 }
708
709 header.frame_ctl = cpu_to_le16(fc);
710
711
712
713 if (is_multicast_ether_addr(header.addr1) ||
714 is_broadcast_ether_addr(header.addr1)) {
715 frag_size = MAX_FRAG_THRESHOLD;
716 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
717 }
718 else {
719 frag_size = ieee->fts;
720 qos_ctl = 0;
721 }
722
723
724 if(qos_actived)
725 {
726 hdr_len = IEEE80211_3ADDR_LEN + 2;
727
728 skb->priority = ieee80211_classify(skb, &ieee->current_network);
729 qos_ctl |= skb->priority;
730 header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
731 } else {
732 hdr_len = IEEE80211_3ADDR_LEN;
733 }
734
735
736
737
738 bytes_per_frag = frag_size - hdr_len;
739 if (ieee->config &
740 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
741 bytes_per_frag -= IEEE80211_FCS_LEN;
742
743
744 if (encrypt)
745 bytes_per_frag -= crypt->ops->extra_prefix_len +
746 crypt->ops->extra_postfix_len;
747
748
749
750 nr_frags = bytes / bytes_per_frag;
751 bytes_last_frag = bytes % bytes_per_frag;
752 if (bytes_last_frag)
753 nr_frags++;
754 else
755 bytes_last_frag = bytes_per_frag;
756
757
758
759
760 txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
761 if (unlikely(!txb)) {
762 printk(KERN_WARNING "%s: Could not allocate TXB\n",
763 ieee->dev->name);
764 goto failed;
765 }
766 txb->encrypted = encrypt;
767 txb->payload_size = bytes;
768
769
770 if(qos_actived)
771 {
772 txb->queue_index = UP2AC(skb->priority);
773 } else {
774 txb->queue_index = WME_AC_BK;;
775 }
776
777
778
779 for (i = 0; i < nr_frags; i++) {
780 skb_frag = txb->fragments[i];
781 tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
782 if(qos_actived){
783 skb_frag->priority = skb->priority;
784 tcb_desc->queue_index = UP2AC(skb->priority);
785 } else {
786 skb_frag->priority = WME_AC_BK;
787 tcb_desc->queue_index = WME_AC_BK;
788 }
789 skb_reserve(skb_frag, ieee->tx_headroom);
790
791 if (encrypt){
792 if (ieee->hwsec_active)
793 tcb_desc->bHwSec = 1;
794 else
795 tcb_desc->bHwSec = 0;
796 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
797 }
798 else
799 {
800 tcb_desc->bHwSec = 0;
801 }
802 frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
803 memcpy(frag_hdr, &header, hdr_len);
804
805
806
807 if (i != nr_frags - 1) {
808 frag_hdr->frame_ctl = cpu_to_le16(
809 fc | IEEE80211_FCTL_MOREFRAGS);
810 bytes = bytes_per_frag;
811
812 } else {
813
814 bytes = bytes_last_frag;
815 }
816
817 if(qos_actived)
818 {
819
820 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
821 } else {
822 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
823 }
824
825
826 if (i == 0) {
827 ieee80211_put_snap(
828 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
829 ether_type);
830 bytes -= SNAP_SIZE + sizeof(u16);
831 }
832
833 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
834
835
836 skb_pull(skb, bytes);
837
838
839
840 if (encrypt)
841 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
842 if (ieee->config &
843 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
844 skb_put(skb_frag, 4);
845 }
846
847 if(qos_actived)
848 {
849 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
850 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
851 else
852 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
853 } else {
854 if (ieee->seq_ctrl[0] == 0xFFF)
855 ieee->seq_ctrl[0] = 0;
856 else
857 ieee->seq_ctrl[0]++;
858 }
859 }else{
860 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
861 printk(KERN_WARNING "%s: skb too small (%d).\n",
862 ieee->dev->name, skb->len);
863 goto success;
864 }
865
866 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
867 if(!txb){
868 printk(KERN_WARNING "%s: Could not allocate TXB\n",
869 ieee->dev->name);
870 goto failed;
871 }
872
873 txb->encrypted = 0;
874 txb->payload_size = skb->len;
875 memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
876 }
877
878 success:
879
880 if (txb)
881 {
882#if 1
883 cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
884 tcb_desc->bTxEnableFwCalcDur = 1;
885 if (is_multicast_ether_addr(header.addr1))
886 tcb_desc->bMulticast = 1;
887 if (is_broadcast_ether_addr(header.addr1))
888 tcb_desc->bBroadcast = 1;
889 ieee80211_txrate_selectmode(ieee, tcb_desc);
890 if ( tcb_desc->bMulticast || tcb_desc->bBroadcast)
891 tcb_desc->data_rate = ieee->basic_rate;
892 else
893
894 tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
895 ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
896 ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
897 ieee80211_query_HTCapShortGI(ieee, tcb_desc);
898 ieee80211_query_BandwidthMode(ieee, tcb_desc);
899 ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
900 ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
901
902
903#endif
904 }
905 spin_unlock_irqrestore(&ieee->lock, flags);
906 dev_kfree_skb_any(skb);
907 if (txb) {
908 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
909 ieee80211_softmac_xmit(txb, ieee);
910 }else{
911 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
912 stats->tx_packets++;
913 stats->tx_bytes += txb->payload_size;
914 return 0;
915 }
916 ieee80211_txb_free(txb);
917 }
918 }
919
920 return 0;
921
922 failed:
923 spin_unlock_irqrestore(&ieee->lock, flags);
924 netif_stop_queue(dev);
925 stats->tx_errors++;
926 return 1;
927
928}
929