1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/compiler.h>
27#include <linux/errno.h>
28#include <linux/if_arp.h>
29#include <linux/in6.h>
30#include <linux/in.h>
31#include <linux/ip.h>
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/netdevice.h>
35#include <linux/proc_fs.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/tcp.h>
39#include <linux/types.h>
40#include <linux/wireless.h>
41#include <linux/etherdevice.h>
42#include <asm/uaccess.h>
43
44#include "libipw.h"
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
127static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
128
129static int libipw_copy_snap(u8 * data, __be16 h_proto)
130{
131 struct libipw_snap_hdr *snap;
132 u8 *oui;
133
134 snap = (struct libipw_snap_hdr *)data;
135 snap->dsap = 0xaa;
136 snap->ssap = 0xaa;
137 snap->ctrl = 0x03;
138
139 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
140 oui = P802_1H_OUI;
141 else
142 oui = RFC1042_OUI;
143 snap->oui[0] = oui[0];
144 snap->oui[1] = oui[1];
145 snap->oui[2] = oui[2];
146
147 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
148
149 return SNAP_SIZE + sizeof(u16);
150}
151
152static int libipw_encrypt_fragment(struct libipw_device *ieee,
153 struct sk_buff *frag, int hdr_len)
154{
155 struct lib80211_crypt_data *crypt =
156 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
157 int res;
158
159 if (crypt == NULL)
160 return -1;
161
162
163
164 atomic_inc(&crypt->refcnt);
165 res = 0;
166 if (crypt->ops && crypt->ops->encrypt_mpdu)
167 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
168
169 atomic_dec(&crypt->refcnt);
170 if (res < 0) {
171 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
172 ieee->dev->name, frag->len);
173 ieee->ieee_stats.tx_discards++;
174 return -1;
175 }
176
177 return 0;
178}
179
180void libipw_txb_free(struct libipw_txb *txb)
181{
182 int i;
183 if (unlikely(!txb))
184 return;
185 for (i = 0; i < txb->nr_frags; i++)
186 if (txb->fragments[i])
187 dev_kfree_skb_any(txb->fragments[i]);
188 kfree(txb);
189}
190
191static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
192 int headroom, gfp_t gfp_mask)
193{
194 struct libipw_txb *txb;
195 int i;
196 txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags),
197 gfp_mask);
198 if (!txb)
199 return NULL;
200
201 memset(txb, 0, sizeof(struct libipw_txb));
202 txb->nr_frags = nr_frags;
203 txb->frag_size = txb_size;
204
205 for (i = 0; i < nr_frags; i++) {
206 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
207 gfp_mask);
208 if (unlikely(!txb->fragments[i])) {
209 i--;
210 break;
211 }
212 skb_reserve(txb->fragments[i], headroom);
213 }
214 if (unlikely(i != nr_frags)) {
215 while (i >= 0)
216 dev_kfree_skb_any(txb->fragments[i--]);
217 kfree(txb);
218 return NULL;
219 }
220 return txb;
221}
222
223static int libipw_classify(struct sk_buff *skb)
224{
225 struct ethhdr *eth;
226 struct iphdr *ip;
227
228 eth = (struct ethhdr *)skb->data;
229 if (eth->h_proto != htons(ETH_P_IP))
230 return 0;
231
232 ip = ip_hdr(skb);
233 switch (ip->tos & 0xfc) {
234 case 0x20:
235 return 2;
236 case 0x40:
237 return 1;
238 case 0x60:
239 return 3;
240 case 0x80:
241 return 4;
242 case 0xa0:
243 return 5;
244 case 0xc0:
245 return 6;
246 case 0xe0:
247 return 7;
248 default:
249 return 0;
250 }
251}
252
253
254
255netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
256{
257 struct libipw_device *ieee = netdev_priv(dev);
258 struct libipw_txb *txb = NULL;
259 struct libipw_hdr_3addrqos *frag_hdr;
260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
261 rts_required;
262 unsigned long flags;
263 int encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
264 __be16 ether_type;
265 int bytes, fc, hdr_len;
266 struct sk_buff *skb_frag;
267 struct libipw_hdr_3addrqos header = {
268 .duration_id = 0,
269 .seq_ctl = 0,
270 .qos_ctl = 0
271 };
272 u8 dest[ETH_ALEN], src[ETH_ALEN];
273 struct lib80211_crypt_data *crypt;
274 int priority = skb->priority;
275 int snapped = 0;
276
277 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
278 return NETDEV_TX_BUSY;
279
280 spin_lock_irqsave(&ieee->lock, flags);
281
282
283
284 if (!ieee->hard_start_xmit) {
285 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
286 goto success;
287 }
288
289 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
290 printk(KERN_WARNING "%s: skb too small (%d).\n",
291 ieee->dev->name, skb->len);
292 goto success;
293 }
294
295 ether_type = ((struct ethhdr *)skb->data)->h_proto;
296
297 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
298
299 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
300 ieee->sec.encrypt;
301
302 host_encrypt = ieee->host_encrypt && encrypt && crypt;
303 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
304 host_build_iv = ieee->host_build_iv && encrypt && crypt;
305
306 if (!encrypt && ieee->ieee802_1x &&
307 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
308 dev->stats.tx_dropped++;
309 goto success;
310 }
311
312
313 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
314 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
315
316 if (host_encrypt || host_build_iv)
317 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
318 IEEE80211_FCTL_PROTECTED;
319 else
320 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
321
322 if (ieee->iw_mode == IW_MODE_INFRA) {
323 fc |= IEEE80211_FCTL_TODS;
324
325 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
326 memcpy(header.addr2, src, ETH_ALEN);
327 memcpy(header.addr3, dest, ETH_ALEN);
328 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
329
330 memcpy(header.addr1, dest, ETH_ALEN);
331 memcpy(header.addr2, src, ETH_ALEN);
332 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
333 }
334 hdr_len = LIBIPW_3ADDR_LEN;
335
336 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
337 fc |= IEEE80211_STYPE_QOS_DATA;
338 hdr_len += 2;
339
340 skb->priority = libipw_classify(skb);
341 header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
342 }
343 header.frame_ctl = cpu_to_le16(fc);
344
345
346 skb_pull(skb, sizeof(struct ethhdr));
347
348
349 bytes = skb->len + SNAP_SIZE + sizeof(u16);
350
351
352 if ((host_encrypt || host_encrypt_msdu) &&
353 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
354 int res = 0;
355 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
356 crypt->ops->extra_msdu_postfix_len;
357 struct sk_buff *skb_new = dev_alloc_skb(len);
358
359 if (unlikely(!skb_new))
360 goto failed;
361
362 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
363 memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
364 snapped = 1;
365 libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
366 ether_type);
367 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
368 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
369 if (res < 0) {
370 LIBIPW_ERROR("msdu encryption failed\n");
371 dev_kfree_skb_any(skb_new);
372 goto failed;
373 }
374 dev_kfree_skb_any(skb);
375 skb = skb_new;
376 bytes += crypt->ops->extra_msdu_prefix_len +
377 crypt->ops->extra_msdu_postfix_len;
378 skb_pull(skb, hdr_len);
379 }
380
381 if (host_encrypt || ieee->host_open_frag) {
382
383
384 if (is_multicast_ether_addr(dest) ||
385 is_broadcast_ether_addr(dest))
386 frag_size = MAX_FRAG_THRESHOLD;
387 else
388 frag_size = ieee->fts;
389
390
391
392
393
394 bytes_per_frag = frag_size - hdr_len;
395 if (ieee->config &
396 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
397 bytes_per_frag -= LIBIPW_FCS_LEN;
398
399
400
401 if (host_encrypt)
402 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
403 crypt->ops->extra_mpdu_postfix_len;
404
405
406
407 nr_frags = bytes / bytes_per_frag;
408 bytes_last_frag = bytes % bytes_per_frag;
409 if (bytes_last_frag)
410 nr_frags++;
411 else
412 bytes_last_frag = bytes_per_frag;
413 } else {
414 nr_frags = 1;
415 bytes_per_frag = bytes_last_frag = bytes;
416 frag_size = bytes + hdr_len;
417 }
418
419 rts_required = (frag_size > ieee->rts
420 && ieee->config & CFG_LIBIPW_RTS);
421 if (rts_required)
422 nr_frags++;
423
424
425
426
427 txb = libipw_alloc_txb(nr_frags, frag_size,
428 ieee->tx_headroom, GFP_ATOMIC);
429 if (unlikely(!txb)) {
430 printk(KERN_WARNING "%s: Could not allocate TXB\n",
431 ieee->dev->name);
432 goto failed;
433 }
434 txb->encrypted = encrypt;
435 if (host_encrypt)
436 txb->payload_size = frag_size * (nr_frags - 1) +
437 bytes_last_frag;
438 else
439 txb->payload_size = bytes;
440
441 if (rts_required) {
442 skb_frag = txb->fragments[0];
443 frag_hdr =
444 (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
445
446
447
448
449 header.frame_ctl =
450 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
451 memcpy(frag_hdr, &header, hdr_len);
452
453
454
455
456 header.frame_ctl = cpu_to_le16(fc);
457
458 if (ieee->config &
459 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
460 skb_put(skb_frag, 4);
461
462 txb->rts_included = 1;
463 i = 1;
464 } else
465 i = 0;
466
467 for (; i < nr_frags; i++) {
468 skb_frag = txb->fragments[i];
469
470 if (host_encrypt || host_build_iv)
471 skb_reserve(skb_frag,
472 crypt->ops->extra_mpdu_prefix_len);
473
474 frag_hdr =
475 (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
476 memcpy(frag_hdr, &header, hdr_len);
477
478
479
480 if (i != nr_frags - 1) {
481 frag_hdr->frame_ctl =
482 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
483 bytes = bytes_per_frag;
484 } else {
485
486 bytes = bytes_last_frag;
487 }
488
489 if (i == 0 && !snapped) {
490 libipw_copy_snap(skb_put
491 (skb_frag, SNAP_SIZE + sizeof(u16)),
492 ether_type);
493 bytes -= SNAP_SIZE + sizeof(u16);
494 }
495
496 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
497
498
499 skb_pull(skb, bytes);
500
501
502
503 if (host_encrypt)
504 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
505 else if (host_build_iv) {
506 atomic_inc(&crypt->refcnt);
507 if (crypt->ops->build_iv)
508 crypt->ops->build_iv(skb_frag, hdr_len,
509 ieee->sec.keys[ieee->sec.active_key],
510 ieee->sec.key_sizes[ieee->sec.active_key],
511 crypt->priv);
512 atomic_dec(&crypt->refcnt);
513 }
514
515 if (ieee->config &
516 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
517 skb_put(skb_frag, 4);
518 }
519
520 success:
521 spin_unlock_irqrestore(&ieee->lock, flags);
522
523 dev_kfree_skb_any(skb);
524
525 if (txb) {
526 netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
527 if (ret == NETDEV_TX_OK) {
528 dev->stats.tx_packets++;
529 dev->stats.tx_bytes += txb->payload_size;
530 return NETDEV_TX_OK;
531 }
532
533 libipw_txb_free(txb);
534 }
535
536 return NETDEV_TX_OK;
537
538 failed:
539 spin_unlock_irqrestore(&ieee->lock, flags);
540 netif_stop_queue(dev);
541 dev->stats.tx_errors++;
542 return NETDEV_TX_BUSY;
543}
544EXPORT_SYMBOL(libipw_xmit);
545
546EXPORT_SYMBOL(libipw_txb_free);
547