1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/compiler.h>
35
36#include <linux/errno.h>
37#include <linux/if_arp.h>
38#include <linux/in6.h>
39#include <linux/in.h>
40#include <linux/ip.h>
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/netdevice.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/skbuff.h>
47#include <linux/slab.h>
48#include <linux/tcp.h>
49#include <linux/types.h>
50#include <linux/wireless.h>
51#include <linux/etherdevice.h>
52#include <asm/uaccess.h>
53#include <linux/if_vlan.h>
54
55#include "ieee80211.h"
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
156static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
157
158static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
159{
160 struct ieee80211_snap_hdr *snap;
161 u8 *oui;
162
163 snap = (struct ieee80211_snap_hdr *)data;
164 snap->dsap = 0xaa;
165 snap->ssap = 0xaa;
166 snap->ctrl = 0x03;
167
168 if (h_proto == 0x8137 || h_proto == 0x80f3)
169 oui = P802_1H_OUI;
170 else
171 oui = RFC1042_OUI;
172 snap->oui[0] = oui[0];
173 snap->oui[1] = oui[1];
174 snap->oui[2] = oui[2];
175
176 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
177
178 return SNAP_SIZE + sizeof(u16);
179}
180
181int ieee80211_encrypt_fragment(
182 struct ieee80211_device *ieee,
183 struct sk_buff *frag,
184 int hdr_len)
185{
186 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
187 int res;
188
189
190 if (!crypt || !crypt->ops)
191 return -1;
192
193#ifdef CONFIG_IEEE80211_CRYPT_TKIP
194 struct ieee80211_hdr_4addr *header;
195
196 if (ieee->tkip_countermeasures &&
197 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
198 header = (struct ieee80211_hdr_4addr *)frag->data;
199 if (net_ratelimit()) {
200 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
201 "TX packet to %pM\n",
202 ieee->dev->name, header->addr1);
203 }
204 return -1;
205 }
206#endif
207
208
209
210
211
212
213 atomic_inc(&crypt->refcnt);
214 res = 0;
215 if (crypt->ops->encrypt_msdu)
216 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
217 if (res == 0 && crypt->ops->encrypt_mpdu)
218 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
219
220 atomic_dec(&crypt->refcnt);
221 if (res < 0) {
222 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
223 ieee->dev->name, frag->len);
224 ieee->ieee_stats.tx_discards++;
225 return -1;
226 }
227
228 return 0;
229}
230
231
232void ieee80211_txb_free(struct ieee80211_txb *txb) {
233 int i;
234 if (unlikely(!txb))
235 return;
236 for (i = 0; i < txb->nr_frags; i++)
237 if (txb->fragments[i])
238 dev_kfree_skb_any(txb->fragments[i]);
239 kfree(txb);
240}
241
242struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
243 int gfp_mask)
244{
245 struct ieee80211_txb *txb;
246 int i;
247 txb = kmalloc(
248 sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
249 gfp_mask);
250 if (!txb)
251 return NULL;
252
253 memset(txb, 0, sizeof(struct ieee80211_txb));
254 txb->nr_frags = nr_frags;
255 txb->frag_size = txb_size;
256
257 for (i = 0; i < nr_frags; i++) {
258 txb->fragments[i] = dev_alloc_skb(txb_size);
259 if (unlikely(!txb->fragments[i])) {
260 i--;
261 break;
262 }
263 }
264 if (unlikely(i != nr_frags)) {
265 while (i >= 0)
266 dev_kfree_skb_any(txb->fragments[i--]);
267 kfree(txb);
268 return NULL;
269 }
270 return txb;
271}
272
273
274
275static int
276ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
277{
278 struct ether_header *eh = (struct ether_header*)skb->data;
279 unsigned int wme_UP = 0;
280
281 if(!network->QoS_Enable) {
282 skb->priority = 0;
283 return(wme_UP);
284 }
285
286 if(eh->ether_type == __constant_htons(ETHERTYPE_IP)) {
287 const struct iphdr *ih = (struct iphdr*)(skb->data + \
288 sizeof(struct ether_header));
289 wme_UP = (ih->tos >> 5)&0x07;
290 } else if (skb_vlan_tag_present(skb)) {
291#ifndef VLAN_PRI_SHIFT
292#define VLAN_PRI_SHIFT 13
293#define VLAN_PRI_MASK 7
294#endif
295 u32 tag = skb_vlan_tag_get(skb);
296 wme_UP = (tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
297 } else if(ETH_P_PAE == ntohs(((struct ethhdr *)skb->data)->h_proto)) {
298
299 wme_UP = 7;
300 }
301
302 skb->priority = wme_UP;
303 return(wme_UP);
304}
305
306
307int ieee80211_rtl_xmit(struct sk_buff *skb,
308 struct net_device *dev)
309{
310 struct ieee80211_device *ieee = netdev_priv(dev);
311 struct ieee80211_txb *txb = NULL;
312 struct ieee80211_hdr_3addrqos *frag_hdr;
313 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
314 unsigned long flags;
315 struct net_device_stats *stats = &ieee->stats;
316 int ether_type, encrypt;
317 int bytes, fc, qos_ctl, hdr_len;
318 struct sk_buff *skb_frag;
319 struct ieee80211_hdr_3addrqos header = {
320 .duration_id = 0,
321 .seq_ctl = 0,
322 .qos_ctl = 0
323 };
324 u8 dest[ETH_ALEN], src[ETH_ALEN];
325
326 struct ieee80211_crypt_data* crypt;
327
328
329 spin_lock_irqsave(&ieee->lock, flags);
330
331
332
333 if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
334 ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
335 printk(KERN_WARNING "%s: No xmit handler.\n",
336 ieee->dev->name);
337 goto success;
338 }
339
340 ieee80211_classify(skb,&ieee->current_network);
341 if(likely(ieee->raw_tx == 0)){
342
343 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
344 printk(KERN_WARNING "%s: skb too small (%d).\n",
345 ieee->dev->name, skb->len);
346 goto success;
347 }
348
349 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
350
351 crypt = ieee->crypt[ieee->tx_keyidx];
352
353 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
354 ieee->host_encrypt && crypt && crypt->ops;
355
356 if (!encrypt && ieee->ieee802_1x &&
357 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
358 stats->tx_dropped++;
359 goto success;
360 }
361
362 #ifdef CONFIG_IEEE80211_DEBUG
363 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
364 struct eapol *eap = (struct eapol *)(skb->data +
365 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
366 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
367 eap_get_type(eap->type));
368 }
369 #endif
370
371
372 memcpy(&dest, skb->data, ETH_ALEN);
373 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
374
375
376 skb_pull(skb, sizeof(struct ethhdr));
377
378
379 bytes = skb->len + SNAP_SIZE + sizeof(u16);
380
381 if(ieee->current_network.QoS_Enable) {
382 if (encrypt)
383 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA |
384 IEEE80211_FCTL_WEP;
385 else
386 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
387
388 } else {
389 if (encrypt)
390 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
391 IEEE80211_FCTL_WEP;
392 else
393 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
394 }
395
396 if (ieee->iw_mode == IW_MODE_INFRA) {
397 fc |= IEEE80211_FCTL_TODS;
398
399
400 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
401 memcpy(&header.addr2, &src, ETH_ALEN);
402 memcpy(&header.addr3, &dest, ETH_ALEN);
403 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
404
405
406 memcpy(&header.addr1, dest, ETH_ALEN);
407 memcpy(&header.addr2, src, ETH_ALEN);
408 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
409 }
410
411 header.frame_ctl = cpu_to_le16(fc);
412
413
414
415
416 if (is_multicast_ether_addr(header.addr1)) {
417 frag_size = MAX_FRAG_THRESHOLD;
418 qos_ctl = QOS_CTL_NOTCONTAIN_ACK;
419 }
420 else {
421
422 frag_size = ieee->fts;
423 qos_ctl = 0;
424 }
425
426 if (ieee->current_network.QoS_Enable) {
427 hdr_len = IEEE80211_3ADDR_LEN + 2;
428
429 qos_ctl |= skb->priority;
430 header.qos_ctl = cpu_to_le16(qos_ctl);
431 } else {
432 hdr_len = IEEE80211_3ADDR_LEN;
433 }
434
435
436
437
438
439
440 bytes_per_frag = frag_size - hdr_len;
441 if (ieee->config &
442 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
443 bytes_per_frag -= IEEE80211_FCS_LEN;
444
445
446 if (encrypt)
447 bytes_per_frag -= crypt->ops->extra_prefix_len +
448 crypt->ops->extra_postfix_len;
449
450
451
452 nr_frags = bytes / bytes_per_frag;
453 bytes_last_frag = bytes % bytes_per_frag;
454 if (bytes_last_frag)
455 nr_frags++;
456 else
457 bytes_last_frag = bytes_per_frag;
458
459
460
461
462 txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
463 if (unlikely(!txb)) {
464 printk(KERN_WARNING "%s: Could not allocate TXB\n",
465 ieee->dev->name);
466 goto failed;
467 }
468 txb->encrypted = encrypt;
469 txb->payload_size = bytes;
470
471 for (i = 0; i < nr_frags; i++) {
472 skb_frag = txb->fragments[i];
473 skb_frag->priority = UP2AC(skb->priority);
474 if (encrypt)
475 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
476
477 frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
478 memcpy(frag_hdr, &header, hdr_len);
479
480
481
482 if (i != nr_frags - 1) {
483 frag_hdr->frame_ctl = cpu_to_le16(
484 fc | IEEE80211_FCTL_MOREFRAGS);
485 bytes = bytes_per_frag;
486
487 } else {
488
489 bytes = bytes_last_frag;
490 }
491 if(ieee->current_network.QoS_Enable) {
492
493 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
494
495
496 } else {
497 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
498 }
499
500
501
502
503 if (i == 0) {
504 ieee80211_put_snap(
505 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
506 ether_type);
507 bytes -= SNAP_SIZE + sizeof(u16);
508 }
509
510 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
511
512
513 skb_pull(skb, bytes);
514
515
516
517 if (encrypt)
518 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
519 if (ieee->config &
520 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
521 skb_put(skb_frag, 4);
522 }
523
524
525 if (ieee->current_network.QoS_Enable) {
526 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
527 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
528 else
529 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
530 } else {
531 if (ieee->seq_ctrl[0] == 0xFFF)
532 ieee->seq_ctrl[0] = 0;
533 else
534 ieee->seq_ctrl[0]++;
535 }
536
537 }else{
538 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
539 printk(KERN_WARNING "%s: skb too small (%d).\n",
540 ieee->dev->name, skb->len);
541 goto success;
542 }
543
544 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
545 if(!txb){
546 printk(KERN_WARNING "%s: Could not allocate TXB\n",
547 ieee->dev->name);
548 goto failed;
549 }
550
551 txb->encrypted = 0;
552 txb->payload_size = skb->len;
553 memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
554 }
555
556 success:
557 spin_unlock_irqrestore(&ieee->lock, flags);
558 dev_kfree_skb_any(skb);
559 if (txb) {
560 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
561 ieee80211_softmac_xmit(txb, ieee);
562 }else{
563 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
564 stats->tx_packets++;
565 stats->tx_bytes += txb->payload_size;
566 return NETDEV_TX_OK;
567 }
568 ieee80211_txb_free(txb);
569 }
570 }
571
572 return NETDEV_TX_OK;
573
574 failed:
575 spin_unlock_irqrestore(&ieee->lock, flags);
576 netif_stop_queue(dev);
577 stats->tx_errors++;
578 return NETDEV_TX_BUSY;
579
580}
581