1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/compiler.h>
19#include <linux/errno.h>
20#include <linux/if_arp.h>
21#include <linux/in6.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/netdevice.h>
27#include <linux/pci.h>
28#include <linux/proc_fs.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <linux/tcp.h>
32#include <linux/types.h>
33#include <linux/wireless.h>
34#include <linux/etherdevice.h>
35#include <linux/uaccess.h>
36#include <linux/if_vlan.h>
37
38#include "ieee80211.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
139static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
140
141static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
142{
143 struct ieee80211_snap_hdr *snap;
144 u8 *oui;
145
146 snap = (struct ieee80211_snap_hdr *)data;
147 snap->dsap = 0xaa;
148 snap->ssap = 0xaa;
149 snap->ctrl = 0x03;
150
151 if (h_proto == 0x8137 || h_proto == 0x80f3)
152 oui = P802_1H_OUI;
153 else
154 oui = RFC1042_OUI;
155 snap->oui[0] = oui[0];
156 snap->oui[1] = oui[1];
157 snap->oui[2] = oui[2];
158
159 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
160
161 return SNAP_SIZE + sizeof(u16);
162}
163
164int ieee80211_encrypt_fragment(
165 struct ieee80211_device *ieee,
166 struct sk_buff *frag,
167 int hdr_len)
168{
169 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
170 int res;
171
172 if (!(crypt && crypt->ops)) {
173 printk("=========>%s(), crypt is null\n", __func__);
174 return -1;
175 }
176
177 if (ieee->tkip_countermeasures &&
178 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
179 if (net_ratelimit()) {
180 struct rtl_80211_hdr_3addrqos *header;
181
182 header = (struct rtl_80211_hdr_3addrqos *)frag->data;
183 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
184 "TX packet to %pM\n",
185 ieee->dev->name, header->addr1);
186 }
187 return -1;
188 }
189
190
191
192
193
194
195
196
197
198 atomic_inc(&crypt->refcnt);
199 res = 0;
200 if (crypt->ops->encrypt_msdu)
201 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
202 if (res == 0 && crypt->ops->encrypt_mpdu)
203 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
204
205 atomic_dec(&crypt->refcnt);
206 if (res < 0) {
207 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
208 ieee->dev->name, frag->len);
209 ieee->ieee_stats.tx_discards++;
210 return -1;
211 }
212
213 return 0;
214}
215
216
217void ieee80211_txb_free(struct ieee80211_txb *txb) {
218
219 if (unlikely(!txb))
220 return;
221 kfree(txb);
222}
223EXPORT_SYMBOL(ieee80211_txb_free);
224
225static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
226 gfp_t gfp_mask)
227{
228 struct ieee80211_txb *txb;
229 int i;
230 txb = kmalloc(
231 sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
232 gfp_mask);
233 if (!txb)
234 return NULL;
235
236 memset(txb, 0, sizeof(struct ieee80211_txb));
237 txb->nr_frags = nr_frags;
238 txb->frag_size = __cpu_to_le16(txb_size);
239
240 for (i = 0; i < nr_frags; i++) {
241 txb->fragments[i] = dev_alloc_skb(txb_size);
242 if (unlikely(!txb->fragments[i])) {
243 i--;
244 break;
245 }
246 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
247 }
248 if (unlikely(i != nr_frags)) {
249 while (i >= 0)
250 dev_kfree_skb_any(txb->fragments[i--]);
251 kfree(txb);
252 return NULL;
253 }
254 return txb;
255}
256
257
258
259static int
260ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
261{
262 struct ethhdr *eth;
263 struct iphdr *ip;
264 eth = (struct ethhdr *)skb->data;
265 if (eth->h_proto != htons(ETH_P_IP))
266 return 0;
267
268 ip = ip_hdr(skb);
269 switch (ip->tos & 0xfc) {
270 case 0x20:
271 return 2;
272 case 0x40:
273 return 1;
274 case 0x60:
275 return 3;
276 case 0x80:
277 return 4;
278 case 0xa0:
279 return 5;
280 case 0xc0:
281 return 6;
282 case 0xe0:
283 return 7;
284 default:
285 return 0;
286 }
287}
288
289static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
290 struct sk_buff *skb, struct cb_desc *tcb_desc)
291{
292 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
293 struct tx_ts_record *pTxTs = NULL;
294 struct rtl_80211_hdr_1addr *hdr = (struct rtl_80211_hdr_1addr *)skb->data;
295
296 if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
297 return;
298 if (!IsQoSDataFrame(skb->data))
299 return;
300
301 if (is_multicast_ether_addr(hdr->addr1))
302 return;
303
304#ifdef TO_DO_LIST
305 if (pTcb->PacketLength >= 4096)
306 return;
307
308 if (!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
309 return;
310#endif
311 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
312 return;
313 }
314 if (pHTInfo->bCurrentAMPDUEnable) {
315 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true)) {
316 printk("===>can't get TS\n");
317 return;
318 }
319 if (!pTxTs->tx_admitted_ba_record.valid) {
320 TsStartAddBaProcess(ieee, pTxTs);
321 goto FORCED_AGG_SETTING;
322 } else if (!pTxTs->using_ba) {
323 if (SN_LESS(pTxTs->tx_admitted_ba_record.start_seq_ctrl.field.seq_num, (pTxTs->tx_cur_seq + 1) % 4096))
324 pTxTs->using_ba = true;
325 else
326 goto FORCED_AGG_SETTING;
327 }
328
329 if (ieee->iw_mode == IW_MODE_INFRA) {
330 tcb_desc->bAMPDUEnable = true;
331 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
332 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
333 }
334 }
335FORCED_AGG_SETTING:
336 switch (pHTInfo->ForcedAMPDUMode )
337 {
338 case HT_AGG_AUTO:
339 break;
340
341 case HT_AGG_FORCE_ENABLE:
342 tcb_desc->bAMPDUEnable = true;
343 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
344 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
345 break;
346
347 case HT_AGG_FORCE_DISABLE:
348 tcb_desc->bAMPDUEnable = false;
349 tcb_desc->ampdu_density = 0;
350 tcb_desc->ampdu_factor = 0;
351 break;
352
353 }
354 return;
355}
356
357static void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee,
358 struct cb_desc *tcb_desc)
359{
360 tcb_desc->bUseShortPreamble = false;
361 if (tcb_desc->data_rate == 2) {
362 return;
363 } else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
364 tcb_desc->bUseShortPreamble = true;
365 }
366 return;
367}
368static void
369ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_desc)
370{
371 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
372
373 tcb_desc->bUseShortGI = false;
374
375 if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
376 return;
377
378 if (pHTInfo->bForcedShortGI) {
379 tcb_desc->bUseShortGI = true;
380 return;
381 }
382
383 if ((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
384 tcb_desc->bUseShortGI = true;
385 else if ((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
386 tcb_desc->bUseShortGI = true;
387}
388
389static void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee,
390 struct cb_desc *tcb_desc)
391{
392 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
393
394 tcb_desc->bPacketBW = false;
395
396 if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
397 return;
398
399 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
400 return;
401
402 if ((tcb_desc->data_rate & 0x80)==0)
403 return;
404
405 if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
406 tcb_desc->bPacketBW = true;
407 return;
408}
409
410static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
411 struct cb_desc *tcb_desc,
412 struct sk_buff *skb)
413{
414
415 tcb_desc->bRTSSTBC = false;
416 tcb_desc->bRTSUseShortGI = false;
417 tcb_desc->bCTSEnable = false;
418 tcb_desc->RTSSC = 0;
419 tcb_desc->bRTSBW = false;
420
421 if(tcb_desc->bBroadcast || tcb_desc->bMulticast)
422 return;
423
424 if (is_broadcast_ether_addr(skb->data+16))
425 return;
426
427 if (ieee->mode < IEEE_N_24G)
428 {
429
430
431
432
433 if (skb->len > ieee->rts)
434 {
435 tcb_desc->bRTSEnable = true;
436 tcb_desc->rts_rate = MGN_24M;
437 }
438 else if (ieee->current_network.buseprotection)
439 {
440
441 tcb_desc->bRTSEnable = true;
442 tcb_desc->bCTSEnable = true;
443 tcb_desc->rts_rate = MGN_24M;
444 }
445
446 return;
447 }
448 else
449 {
450 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
451 while (true)
452 {
453
454 if (ieee->current_network.buseprotection)
455 {
456 tcb_desc->bRTSEnable = true;
457 tcb_desc->bCTSEnable = true;
458 tcb_desc->rts_rate = MGN_24M;
459 break;
460 }
461
462 if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
463 {
464 u8 HTOpMode = pHTInfo->CurrentOpMode;
465 if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
466 (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
467 {
468 tcb_desc->rts_rate = MGN_24M;
469 tcb_desc->bRTSEnable = true;
470 break;
471 }
472 }
473
474 if (skb->len > ieee->rts)
475 {
476 tcb_desc->rts_rate = MGN_24M;
477 tcb_desc->bRTSEnable = true;
478 break;
479 }
480
481
482 if(tcb_desc->bAMPDUEnable)
483 {
484 tcb_desc->rts_rate = MGN_24M;
485
486
487 tcb_desc->bRTSEnable = false;
488 break;
489 }
490
491 if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
492 {
493 tcb_desc->bCTSEnable = true;
494 tcb_desc->rts_rate = MGN_24M;
495 tcb_desc->bRTSEnable = true;
496 break;
497 }
498
499 goto NO_PROTECTION;
500 }
501 }
502
503 if (0) {
504 tcb_desc->bCTSEnable = true;
505 tcb_desc->rts_rate = MGN_24M;
506 tcb_desc->bRTSEnable = true;
507 }
508 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
509 tcb_desc->bUseShortPreamble = true;
510 if (ieee->mode == IW_MODE_MASTER)
511 goto NO_PROTECTION;
512 return;
513NO_PROTECTION:
514 tcb_desc->bRTSEnable = false;
515 tcb_desc->bCTSEnable = false;
516 tcb_desc->rts_rate = 0;
517 tcb_desc->RTSSC = 0;
518 tcb_desc->bRTSBW = false;
519}
520
521
522static void ieee80211_txrate_selectmode(struct ieee80211_device *ieee,
523 struct cb_desc *tcb_desc)
524{
525#ifdef TO_DO_LIST
526 if (!IsDataFrame(pFrame)) {
527 pTcb->bTxDisableRateFallBack = true;
528 pTcb->bTxUseDriverAssingedRate = true;
529 pTcb->RATRIndex = 7;
530 return;
531 }
532
533 if (pMgntInfo->ForcedDataRate!= 0) {
534 pTcb->bTxDisableRateFallBack = true;
535 pTcb->bTxUseDriverAssingedRate = true;
536 return;
537 }
538#endif
539 if (ieee->bTxDisableRateFallBack)
540 tcb_desc->bTxDisableRateFallBack = true;
541
542 if (ieee->bTxUseDriverAssingedRate)
543 tcb_desc->bTxUseDriverAssingedRate = true;
544 if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
545 {
546 if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
547 tcb_desc->RATRIndex = 0;
548 }
549}
550
551static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
552 struct sk_buff *skb, u8 *dst)
553{
554 if (is_multicast_ether_addr(dst))
555 return;
556 if (IsQoSDataFrame(skb->data))
557 {
558 struct tx_ts_record *pTS = NULL;
559 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true))
560 {
561 return;
562 }
563 pTS->tx_cur_seq = (pTS->tx_cur_seq + 1) % 4096;
564 }
565}
566
567int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
568{
569 struct ieee80211_device *ieee = netdev_priv(dev);
570 struct ieee80211_txb *txb = NULL;
571 struct rtl_80211_hdr_3addrqos *frag_hdr;
572 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
573 unsigned long flags;
574 struct net_device_stats *stats = &ieee->stats;
575 int ether_type = 0, encrypt;
576 int bytes, fc, qos_ctl = 0, hdr_len;
577 struct sk_buff *skb_frag;
578 struct rtl_80211_hdr_3addrqos header = {
579 .duration_id = 0,
580 .seq_ctl = 0,
581 .qos_ctl = 0
582 };
583 u8 dest[ETH_ALEN], src[ETH_ALEN];
584 int qos_actived = ieee->current_network.qos_data.active;
585
586 struct ieee80211_crypt_data *crypt;
587
588 struct cb_desc *tcb_desc;
589
590 spin_lock_irqsave(&ieee->lock, flags);
591
592
593
594
595 if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
596 ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
597 printk(KERN_WARNING "%s: No xmit handler.\n",
598 ieee->dev->name);
599 goto success;
600 }
601
602
603 if (likely(ieee->raw_tx == 0)) {
604 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
605 printk(KERN_WARNING "%s: skb too small (%d).\n",
606 ieee->dev->name, skb->len);
607 goto success;
608 }
609
610 memset(skb->cb, 0, sizeof(skb->cb));
611 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
612
613 crypt = ieee->crypt[ieee->tx_keyidx];
614
615 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
616 ieee->host_encrypt && crypt && crypt->ops;
617
618 if (!encrypt && ieee->ieee802_1x &&
619 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
620 stats->tx_dropped++;
621 goto success;
622 }
623 #ifdef CONFIG_IEEE80211_DEBUG
624 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
625 struct eapol *eap = (struct eapol *)(skb->data +
626 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
627 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
628 eap_get_type(eap->type));
629 }
630 #endif
631
632
633 memcpy(&dest, skb->data, ETH_ALEN);
634 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
635
636
637 skb_pull(skb, sizeof(struct ethhdr));
638
639
640 bytes = skb->len + SNAP_SIZE + sizeof(u16);
641
642 if (encrypt)
643 fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
644 else
645
646 fc = IEEE80211_FTYPE_DATA;
647
648
649 if(qos_actived)
650 fc |= IEEE80211_STYPE_QOS_DATA;
651 else
652 fc |= IEEE80211_STYPE_DATA;
653
654 if (ieee->iw_mode == IW_MODE_INFRA) {
655 fc |= IEEE80211_FCTL_TODS;
656
657
658
659 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
660 memcpy(&header.addr2, &src, ETH_ALEN);
661 memcpy(&header.addr3, &dest, ETH_ALEN);
662 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
663
664
665
666 memcpy(&header.addr1, dest, ETH_ALEN);
667 memcpy(&header.addr2, src, ETH_ALEN);
668 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
669 }
670
671 header.frame_ctl = cpu_to_le16(fc);
672
673
674
675
676 if (is_multicast_ether_addr(header.addr1)) {
677 frag_size = MAX_FRAG_THRESHOLD;
678 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
679 } else {
680 frag_size = ieee->fts;
681 qos_ctl = 0;
682 }
683
684
685 if (qos_actived) {
686 hdr_len = IEEE80211_3ADDR_LEN + 2;
687
688 skb->priority = ieee80211_classify(skb, &ieee->current_network);
689 qos_ctl |= skb->priority;
690 header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
691 } else {
692 hdr_len = IEEE80211_3ADDR_LEN;
693 }
694
695
696
697
698
699 bytes_per_frag = frag_size - hdr_len;
700 if (ieee->config &
701 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
702 bytes_per_frag -= IEEE80211_FCS_LEN;
703
704
705 if (encrypt)
706 bytes_per_frag -= crypt->ops->extra_prefix_len +
707 crypt->ops->extra_postfix_len;
708
709
710
711
712 nr_frags = bytes / bytes_per_frag;
713 bytes_last_frag = bytes % bytes_per_frag;
714 if (bytes_last_frag)
715 nr_frags++;
716 else
717 bytes_last_frag = bytes_per_frag;
718
719
720
721
722
723 txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
724 if (unlikely(!txb)) {
725 printk(KERN_WARNING "%s: Could not allocate TXB\n",
726 ieee->dev->name);
727 goto failed;
728 }
729 txb->encrypted = encrypt;
730 txb->payload_size = __cpu_to_le16(bytes);
731
732
733 if (qos_actived)
734 txb->queue_index = UP2AC(skb->priority);
735 else
736 txb->queue_index = WME_AC_BK;
737
738
739
740 for (i = 0; i < nr_frags; i++) {
741 skb_frag = txb->fragments[i];
742 tcb_desc = (struct cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
743 if(qos_actived){
744 skb_frag->priority = skb->priority;
745 tcb_desc->queue_index = UP2AC(skb->priority);
746 } else {
747 skb_frag->priority = WME_AC_BK;
748 tcb_desc->queue_index = WME_AC_BK;
749 }
750 skb_reserve(skb_frag, ieee->tx_headroom);
751
752 if (encrypt){
753 if (ieee->hwsec_active)
754 tcb_desc->bHwSec = 1;
755 else
756 tcb_desc->bHwSec = 0;
757 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
758 }
759 else
760 {
761 tcb_desc->bHwSec = 0;
762 }
763 frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
764
765
766
767
768 if (i != nr_frags - 1) {
769 frag_hdr->frame_ctl = cpu_to_le16(
770 fc | IEEE80211_FCTL_MOREFRAGS);
771 bytes = bytes_per_frag;
772
773 } else {
774
775 bytes = bytes_last_frag;
776 }
777
778 if(qos_actived)
779 {
780
781 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
782 } else {
783 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
784 }
785
786
787 if (i == 0) {
788 ieee80211_put_snap(
789 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
790 ether_type);
791 bytes -= SNAP_SIZE + sizeof(u16);
792 }
793
794 skb_put_data(skb_frag, skb->data, bytes);
795
796
797 skb_pull(skb, bytes);
798
799
800
801
802 if (encrypt)
803 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
804 if (ieee->config &
805 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
806 skb_put(skb_frag, 4);
807 }
808
809 if(qos_actived)
810 {
811 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
812 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
813 else
814 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
815 } else {
816 if (ieee->seq_ctrl[0] == 0xFFF)
817 ieee->seq_ctrl[0] = 0;
818 else
819 ieee->seq_ctrl[0]++;
820 }
821 } else {
822 if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) {
823 printk(KERN_WARNING "%s: skb too small (%d).\n",
824 ieee->dev->name, skb->len);
825 goto success;
826 }
827
828 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
829 if(!txb){
830 printk(KERN_WARNING "%s: Could not allocate TXB\n",
831 ieee->dev->name);
832 goto failed;
833 }
834
835 txb->encrypted = 0;
836 txb->payload_size = __cpu_to_le16(skb->len);
837 skb_put_data(txb->fragments[0], skb->data, skb->len);
838 }
839
840 success:
841
842 if (txb)
843 {
844 struct cb_desc *tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
845 tcb_desc->bTxEnableFwCalcDur = 1;
846 if (is_multicast_ether_addr(header.addr1))
847 tcb_desc->bMulticast = 1;
848 if (is_broadcast_ether_addr(header.addr1))
849 tcb_desc->bBroadcast = 1;
850 ieee80211_txrate_selectmode(ieee, tcb_desc);
851 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
852 tcb_desc->data_rate = ieee->basic_rate;
853 else
854 tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
855 ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
856 ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
857 ieee80211_query_HTCapShortGI(ieee, tcb_desc);
858 ieee80211_query_BandwidthMode(ieee, tcb_desc);
859 ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
860 ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
861 }
862 spin_unlock_irqrestore(&ieee->lock, flags);
863 dev_kfree_skb_any(skb);
864 if (txb) {
865 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
866 ieee80211_softmac_xmit(txb, ieee);
867 }else{
868 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
869 stats->tx_packets++;
870 stats->tx_bytes += __le16_to_cpu(txb->payload_size);
871 return 0;
872 }
873 ieee80211_txb_free(txb);
874 }
875 }
876
877 return 0;
878
879 failed:
880 spin_unlock_irqrestore(&ieee->lock, flags);
881 netif_stop_queue(dev);
882 stats->tx_errors++;
883 return 1;
884
885}
886