1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/compiler.h>
35
36#include <linux/errno.h>
37#include <linux/if_arp.h>
38#include <linux/in6.h>
39#include <linux/in.h>
40#include <linux/ip.h>
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/netdevice.h>
44#include <linux/pci.h>
45#include <linux/proc_fs.h>
46#include <linux/skbuff.h>
47#include <linux/slab.h>
48#include <linux/tcp.h>
49#include <linux/types.h>
50#include <linux/version.h>
51#include <linux/wireless.h>
52#include <linux/etherdevice.h>
53#include <asm/uaccess.h>
54#include <linux/if_vlan.h>
55
56#include "ieee80211.h"
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
157static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
158
159static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
160{
161 struct ieee80211_snap_hdr *snap;
162 u8 *oui;
163
164 snap = (struct ieee80211_snap_hdr *)data;
165 snap->dsap = 0xaa;
166 snap->ssap = 0xaa;
167 snap->ctrl = 0x03;
168
169 if (h_proto == 0x8137 || h_proto == 0x80f3)
170 oui = P802_1H_OUI;
171 else
172 oui = RFC1042_OUI;
173 snap->oui[0] = oui[0];
174 snap->oui[1] = oui[1];
175 snap->oui[2] = oui[2];
176
177 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
178
179 return SNAP_SIZE + sizeof(u16);
180}
181
182int ieee80211_encrypt_fragment(
183 struct ieee80211_device *ieee,
184 struct sk_buff *frag,
185 int hdr_len)
186{
187 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
188 int res;
189
190
191 if (!crypt || !crypt->ops)
192 return -1;
193
194#ifdef CONFIG_IEEE80211_CRYPT_TKIP
195 struct ieee80211_hdr_4addr *header;
196
197 if (ieee->tkip_countermeasures &&
198 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
199 header = (struct ieee80211_hdr_4addr *)frag->data;
200 if (net_ratelimit()) {
201 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
202 "TX packet to " MAC_FMT "\n",
203 ieee->dev->name, MAC_ARG(header->addr1));
204 }
205 return -1;
206 }
207#endif
208
209
210
211
212
213
214 atomic_inc(&crypt->refcnt);
215 res = 0;
216 if (crypt->ops->encrypt_msdu)
217 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
218 if (res == 0 && crypt->ops->encrypt_mpdu)
219 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
220
221 atomic_dec(&crypt->refcnt);
222 if (res < 0) {
223 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
224 ieee->dev->name, frag->len);
225 ieee->ieee_stats.tx_discards++;
226 return -1;
227 }
228
229 return 0;
230}
231
232
233void ieee80211_txb_free(struct ieee80211_txb *txb) {
234 int i;
235 if (unlikely(!txb))
236 return;
237 for (i = 0; i < txb->nr_frags; i++)
238 if (txb->fragments[i])
239 dev_kfree_skb_any(txb->fragments[i]);
240 kfree(txb);
241}
242
243struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
244 int gfp_mask)
245{
246 struct ieee80211_txb *txb;
247 int i;
248 txb = kmalloc(
249 sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
250 gfp_mask);
251 if (!txb)
252 return NULL;
253
254 memset(txb, 0, sizeof(struct ieee80211_txb));
255 txb->nr_frags = nr_frags;
256 txb->frag_size = txb_size;
257
258 for (i = 0; i < nr_frags; i++) {
259 txb->fragments[i] = dev_alloc_skb(txb_size);
260 if (unlikely(!txb->fragments[i])) {
261 i--;
262 break;
263 }
264 }
265 if (unlikely(i != nr_frags)) {
266 while (i >= 0)
267 dev_kfree_skb_any(txb->fragments[i--]);
268 kfree(txb);
269 return NULL;
270 }
271 return txb;
272}
273
274
275
276static int
277ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
278{
279 struct ether_header *eh = (struct ether_header*)skb->data;
280 unsigned int wme_UP = 0;
281
282 if(!network->QoS_Enable) {
283 skb->priority = 0;
284 return(wme_UP);
285 }
286
287 if(eh->ether_type == __constant_htons(ETHERTYPE_IP)) {
288 const struct iphdr *ih = (struct iphdr*)(skb->data + \
289 sizeof(struct ether_header));
290 wme_UP = (ih->tos >> 5)&0x07;
291 } else if (vlan_tx_tag_present(skb)) {
292#ifndef VLAN_PRI_SHIFT
293#define VLAN_PRI_SHIFT 13
294#define VLAN_PRI_MASK 7
295#endif
296 u32 tag = vlan_tx_tag_get(skb);
297 wme_UP = (tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
298 } else if(ETH_P_PAE == ntohs(((struct ethhdr *)skb->data)->h_proto)) {
299
300 wme_UP = 7;
301 }
302
303 skb->priority = wme_UP;
304 return(wme_UP);
305}
306
307
308int ieee80211_xmit(struct sk_buff *skb,
309 struct net_device *dev)
310{
311 struct ieee80211_device *ieee = netdev_priv(dev);
312 struct ieee80211_txb *txb = NULL;
313 struct ieee80211_hdr_3addrqos *frag_hdr;
314 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
315 unsigned long flags;
316 struct net_device_stats *stats = &ieee->stats;
317 int ether_type, encrypt;
318 int bytes, fc, qos_ctl, hdr_len;
319 struct sk_buff *skb_frag;
320 struct ieee80211_hdr_3addrqos header = {
321 .duration_id = 0,
322 .seq_ctl = 0,
323 .qos_ctl = 0
324 };
325 u8 dest[ETH_ALEN], src[ETH_ALEN];
326
327 struct ieee80211_crypt_data* crypt;
328
329
330 spin_lock_irqsave(&ieee->lock, flags);
331
332
333
334 if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
335 ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
336 printk(KERN_WARNING "%s: No xmit handler.\n",
337 ieee->dev->name);
338 goto success;
339 }
340
341 ieee80211_classify(skb,&ieee->current_network);
342 if(likely(ieee->raw_tx == 0)){
343
344 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
345 printk(KERN_WARNING "%s: skb too small (%d).\n",
346 ieee->dev->name, skb->len);
347 goto success;
348 }
349
350 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
351
352 crypt = ieee->crypt[ieee->tx_keyidx];
353
354 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
355 ieee->host_encrypt && crypt && crypt->ops;
356
357 if (!encrypt && ieee->ieee802_1x &&
358 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
359 stats->tx_dropped++;
360 goto success;
361 }
362
363 #ifdef CONFIG_IEEE80211_DEBUG
364 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
365 struct eapol *eap = (struct eapol *)(skb->data +
366 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
367 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
368 eap_get_type(eap->type));
369 }
370 #endif
371
372
373 memcpy(&dest, skb->data, ETH_ALEN);
374 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
375
376
377 skb_pull(skb, sizeof(struct ethhdr));
378
379
380 bytes = skb->len + SNAP_SIZE + sizeof(u16);
381
382 if(ieee->current_network.QoS_Enable) {
383 if (encrypt)
384 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA |
385 IEEE80211_FCTL_WEP;
386 else
387 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
388
389 } else {
390 if (encrypt)
391 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
392 IEEE80211_FCTL_WEP;
393 else
394 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
395 }
396
397 if (ieee->iw_mode == IW_MODE_INFRA) {
398 fc |= IEEE80211_FCTL_TODS;
399
400
401 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
402 memcpy(&header.addr2, &src, ETH_ALEN);
403 memcpy(&header.addr3, &dest, ETH_ALEN);
404 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
405
406
407 memcpy(&header.addr1, dest, ETH_ALEN);
408 memcpy(&header.addr2, src, ETH_ALEN);
409 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
410 }
411
412 header.frame_ctl = cpu_to_le16(fc);
413
414
415
416
417
418
419 if (is_multicast_ether_addr(header.addr1) ||
420 is_broadcast_ether_addr(header.addr1)) {
421 frag_size = MAX_FRAG_THRESHOLD;
422 qos_ctl = QOS_CTL_NOTCONTAIN_ACK;
423 }
424 else {
425
426 frag_size = ieee->fts;
427 qos_ctl = 0;
428 }
429
430 if (ieee->current_network.QoS_Enable) {
431 hdr_len = IEEE80211_3ADDR_LEN + 2;
432
433 qos_ctl |= skb->priority;
434 header.qos_ctl = cpu_to_le16(qos_ctl);
435 } else {
436 hdr_len = IEEE80211_3ADDR_LEN;
437 }
438
439
440
441
442
443
444 bytes_per_frag = frag_size - hdr_len;
445 if (ieee->config &
446 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
447 bytes_per_frag -= IEEE80211_FCS_LEN;
448
449
450 if (encrypt)
451 bytes_per_frag -= crypt->ops->extra_prefix_len +
452 crypt->ops->extra_postfix_len;
453
454
455
456 nr_frags = bytes / bytes_per_frag;
457 bytes_last_frag = bytes % bytes_per_frag;
458 if (bytes_last_frag)
459 nr_frags++;
460 else
461 bytes_last_frag = bytes_per_frag;
462
463
464
465
466 txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
467 if (unlikely(!txb)) {
468 printk(KERN_WARNING "%s: Could not allocate TXB\n",
469 ieee->dev->name);
470 goto failed;
471 }
472 txb->encrypted = encrypt;
473 txb->payload_size = bytes;
474
475 for (i = 0; i < nr_frags; i++) {
476 skb_frag = txb->fragments[i];
477 skb_frag->priority = UP2AC(skb->priority);
478 if (encrypt)
479 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
480
481 frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
482 memcpy(frag_hdr, &header, hdr_len);
483
484
485
486 if (i != nr_frags - 1) {
487 frag_hdr->frame_ctl = cpu_to_le16(
488 fc | IEEE80211_FCTL_MOREFRAGS);
489 bytes = bytes_per_frag;
490
491 } else {
492
493 bytes = bytes_last_frag;
494 }
495 if(ieee->current_network.QoS_Enable) {
496
497 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
498
499
500 } else {
501 frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
502 }
503
504
505
506
507 if (i == 0) {
508 ieee80211_put_snap(
509 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
510 ether_type);
511 bytes -= SNAP_SIZE + sizeof(u16);
512 }
513
514 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
515
516
517 skb_pull(skb, bytes);
518
519
520
521 if (encrypt)
522 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
523 if (ieee->config &
524 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
525 skb_put(skb_frag, 4);
526 }
527
528
529 if (ieee->current_network.QoS_Enable) {
530 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
531 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
532 else
533 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
534 } else {
535 if (ieee->seq_ctrl[0] == 0xFFF)
536 ieee->seq_ctrl[0] = 0;
537 else
538 ieee->seq_ctrl[0]++;
539 }
540
541 }else{
542 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
543 printk(KERN_WARNING "%s: skb too small (%d).\n",
544 ieee->dev->name, skb->len);
545 goto success;
546 }
547
548 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
549 if(!txb){
550 printk(KERN_WARNING "%s: Could not allocate TXB\n",
551 ieee->dev->name);
552 goto failed;
553 }
554
555 txb->encrypted = 0;
556 txb->payload_size = skb->len;
557 memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
558 }
559
560 success:
561 spin_unlock_irqrestore(&ieee->lock, flags);
562 dev_kfree_skb_any(skb);
563 if (txb) {
564 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
565 ieee80211_softmac_xmit(txb, ieee);
566 }else{
567 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
568 stats->tx_packets++;
569 stats->tx_bytes += txb->payload_size;
570 return NETDEV_TX_OK;
571 }
572 ieee80211_txb_free(txb);
573 }
574 }
575
576 return NETDEV_TX_OK;
577
578 failed:
579 spin_unlock_irqrestore(&ieee->lock, flags);
580 netif_stop_queue(dev);
581 stats->tx_errors++;
582 return NETDEV_TX_BUSY;
583
584}
585