1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/compiler.h>
27#include <linux/errno.h>
28#include <linux/if_arp.h>
29#include <linux/in6.h>
30#include <linux/in.h>
31#include <linux/ip.h>
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/netdevice.h>
35#include <linux/proc_fs.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/tcp.h>
39#include <linux/types.h>
40#include <linux/wireless.h>
41#include <linux/etherdevice.h>
42#include <linux/uaccess.h>
43
44#include "libipw.h"
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
127static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
128
129static int libipw_copy_snap(u8 * data, __be16 h_proto)
130{
131 struct libipw_snap_hdr *snap;
132 u8 *oui;
133
134 snap = (struct libipw_snap_hdr *)data;
135 snap->dsap = 0xaa;
136 snap->ssap = 0xaa;
137 snap->ctrl = 0x03;
138
139 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
140 oui = P802_1H_OUI;
141 else
142 oui = RFC1042_OUI;
143 snap->oui[0] = oui[0];
144 snap->oui[1] = oui[1];
145 snap->oui[2] = oui[2];
146
147 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
148
149 return SNAP_SIZE + sizeof(u16);
150}
151
152static int libipw_encrypt_fragment(struct libipw_device *ieee,
153 struct sk_buff *frag, int hdr_len)
154{
155 struct lib80211_crypt_data *crypt =
156 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
157 int res;
158
159 if (crypt == NULL)
160 return -1;
161
162
163
164 atomic_inc(&crypt->refcnt);
165 res = 0;
166 if (crypt->ops && crypt->ops->encrypt_mpdu)
167 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
168
169 atomic_dec(&crypt->refcnt);
170 if (res < 0) {
171 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
172 ieee->dev->name, frag->len);
173 ieee->ieee_stats.tx_discards++;
174 return -1;
175 }
176
177 return 0;
178}
179
180void libipw_txb_free(struct libipw_txb *txb)
181{
182 int i;
183 if (unlikely(!txb))
184 return;
185 for (i = 0; i < txb->nr_frags; i++)
186 if (txb->fragments[i])
187 dev_kfree_skb_any(txb->fragments[i]);
188 kfree(txb);
189}
190
191static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
192 int headroom, gfp_t gfp_mask)
193{
194 struct libipw_txb *txb;
195 int i;
196 txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags),
197 gfp_mask);
198 if (!txb)
199 return NULL;
200
201 memset(txb, 0, sizeof(struct libipw_txb));
202 txb->nr_frags = nr_frags;
203 txb->frag_size = txb_size;
204
205 for (i = 0; i < nr_frags; i++) {
206 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
207 gfp_mask);
208 if (unlikely(!txb->fragments[i])) {
209 i--;
210 break;
211 }
212 skb_reserve(txb->fragments[i], headroom);
213 }
214 if (unlikely(i != nr_frags)) {
215 while (i >= 0)
216 dev_kfree_skb_any(txb->fragments[i--]);
217 kfree(txb);
218 return NULL;
219 }
220 return txb;
221}
222
223static int libipw_classify(struct sk_buff *skb)
224{
225 struct ethhdr *eth;
226 struct iphdr *ip;
227
228 eth = (struct ethhdr *)skb->data;
229 if (eth->h_proto != htons(ETH_P_IP))
230 return 0;
231
232 ip = ip_hdr(skb);
233 switch (ip->tos & 0xfc) {
234 case 0x20:
235 return 2;
236 case 0x40:
237 return 1;
238 case 0x60:
239 return 3;
240 case 0x80:
241 return 4;
242 case 0xa0:
243 return 5;
244 case 0xc0:
245 return 6;
246 case 0xe0:
247 return 7;
248 default:
249 return 0;
250 }
251}
252
253
254
255netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
256{
257 struct libipw_device *ieee = netdev_priv(dev);
258 struct libipw_txb *txb = NULL;
259 struct libipw_hdr_3addrqos *frag_hdr;
260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
261 rts_required;
262 unsigned long flags;
263 int encrypt, host_encrypt, host_encrypt_msdu;
264 __be16 ether_type;
265 int bytes, fc, hdr_len;
266 struct sk_buff *skb_frag;
267 struct libipw_hdr_3addrqos header = {
268 .duration_id = 0,
269 .seq_ctl = 0,
270 .qos_ctl = 0
271 };
272 u8 dest[ETH_ALEN], src[ETH_ALEN];
273 struct lib80211_crypt_data *crypt;
274 int priority = skb->priority;
275 int snapped = 0;
276
277 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
278 return NETDEV_TX_BUSY;
279
280 spin_lock_irqsave(&ieee->lock, flags);
281
282
283
284 if (!ieee->hard_start_xmit) {
285 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
286 goto success;
287 }
288
289 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
290 printk(KERN_WARNING "%s: skb too small (%d).\n",
291 ieee->dev->name, skb->len);
292 goto success;
293 }
294
295 ether_type = ((struct ethhdr *)skb->data)->h_proto;
296
297 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
298
299 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
300 ieee->sec.encrypt;
301
302 host_encrypt = ieee->host_encrypt && encrypt && crypt;
303 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
304
305 if (!encrypt && ieee->ieee802_1x &&
306 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
307 dev->stats.tx_dropped++;
308 goto success;
309 }
310
311
312 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
313 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
314
315 if (host_encrypt)
316 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
317 IEEE80211_FCTL_PROTECTED;
318 else
319 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
320
321 if (ieee->iw_mode == IW_MODE_INFRA) {
322 fc |= IEEE80211_FCTL_TODS;
323
324 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
325 memcpy(header.addr2, src, ETH_ALEN);
326 memcpy(header.addr3, dest, ETH_ALEN);
327 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
328
329 memcpy(header.addr1, dest, ETH_ALEN);
330 memcpy(header.addr2, src, ETH_ALEN);
331 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
332 }
333 hdr_len = LIBIPW_3ADDR_LEN;
334
335 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
336 fc |= IEEE80211_STYPE_QOS_DATA;
337 hdr_len += 2;
338
339 skb->priority = libipw_classify(skb);
340 header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
341 }
342 header.frame_ctl = cpu_to_le16(fc);
343
344
345 skb_pull(skb, sizeof(struct ethhdr));
346
347
348 bytes = skb->len + SNAP_SIZE + sizeof(u16);
349
350
351 if ((host_encrypt || host_encrypt_msdu) &&
352 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
353 int res = 0;
354 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
355 crypt->ops->extra_msdu_postfix_len;
356 struct sk_buff *skb_new = dev_alloc_skb(len);
357
358 if (unlikely(!skb_new))
359 goto failed;
360
361 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
362 skb_put_data(skb_new, &header, hdr_len);
363 snapped = 1;
364 libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
365 ether_type);
366 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
367 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
368 if (res < 0) {
369 LIBIPW_ERROR("msdu encryption failed\n");
370 dev_kfree_skb_any(skb_new);
371 goto failed;
372 }
373 dev_kfree_skb_any(skb);
374 skb = skb_new;
375 bytes += crypt->ops->extra_msdu_prefix_len +
376 crypt->ops->extra_msdu_postfix_len;
377 skb_pull(skb, hdr_len);
378 }
379
380 if (host_encrypt || ieee->host_open_frag) {
381
382
383 if (is_multicast_ether_addr(dest) ||
384 is_broadcast_ether_addr(dest))
385 frag_size = MAX_FRAG_THRESHOLD;
386 else
387 frag_size = ieee->fts;
388
389
390
391
392
393 bytes_per_frag = frag_size - hdr_len;
394 if (ieee->config &
395 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
396 bytes_per_frag -= LIBIPW_FCS_LEN;
397
398
399
400 if (host_encrypt)
401 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
402 crypt->ops->extra_mpdu_postfix_len;
403
404
405
406 nr_frags = bytes / bytes_per_frag;
407 bytes_last_frag = bytes % bytes_per_frag;
408 if (bytes_last_frag)
409 nr_frags++;
410 else
411 bytes_last_frag = bytes_per_frag;
412 } else {
413 nr_frags = 1;
414 bytes_per_frag = bytes_last_frag = bytes;
415 frag_size = bytes + hdr_len;
416 }
417
418 rts_required = (frag_size > ieee->rts
419 && ieee->config & CFG_LIBIPW_RTS);
420 if (rts_required)
421 nr_frags++;
422
423
424
425
426 txb = libipw_alloc_txb(nr_frags, frag_size,
427 ieee->tx_headroom, GFP_ATOMIC);
428 if (unlikely(!txb)) {
429 printk(KERN_WARNING "%s: Could not allocate TXB\n",
430 ieee->dev->name);
431 goto failed;
432 }
433 txb->encrypted = encrypt;
434 if (host_encrypt)
435 txb->payload_size = frag_size * (nr_frags - 1) +
436 bytes_last_frag;
437 else
438 txb->payload_size = bytes;
439
440 if (rts_required) {
441 skb_frag = txb->fragments[0];
442 frag_hdr = skb_put(skb_frag, hdr_len);
443
444
445
446
447 header.frame_ctl =
448 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
449 memcpy(frag_hdr, &header, hdr_len);
450
451
452
453
454 header.frame_ctl = cpu_to_le16(fc);
455
456 if (ieee->config &
457 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
458 skb_put(skb_frag, 4);
459
460 txb->rts_included = 1;
461 i = 1;
462 } else
463 i = 0;
464
465 for (; i < nr_frags; i++) {
466 skb_frag = txb->fragments[i];
467
468 if (host_encrypt)
469 skb_reserve(skb_frag,
470 crypt->ops->extra_mpdu_prefix_len);
471
472 frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
473
474
475
476 if (i != nr_frags - 1) {
477 frag_hdr->frame_ctl =
478 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
479 bytes = bytes_per_frag;
480 } else {
481
482 bytes = bytes_last_frag;
483 }
484
485 if (i == 0 && !snapped) {
486 libipw_copy_snap(skb_put
487 (skb_frag, SNAP_SIZE + sizeof(u16)),
488 ether_type);
489 bytes -= SNAP_SIZE + sizeof(u16);
490 }
491
492 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
493
494
495 skb_pull(skb, bytes);
496
497
498
499 if (host_encrypt)
500 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
501
502 if (ieee->config &
503 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
504 skb_put(skb_frag, 4);
505 }
506
507 success:
508 spin_unlock_irqrestore(&ieee->lock, flags);
509
510 dev_kfree_skb_any(skb);
511
512 if (txb) {
513 netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
514 if (ret == NETDEV_TX_OK) {
515 dev->stats.tx_packets++;
516 dev->stats.tx_bytes += txb->payload_size;
517 return NETDEV_TX_OK;
518 }
519
520 libipw_txb_free(txb);
521 }
522
523 return NETDEV_TX_OK;
524
525 failed:
526 spin_unlock_irqrestore(&ieee->lock, flags);
527 netif_stop_queue(dev);
528 dev->stats.tx_errors++;
529 return NETDEV_TX_BUSY;
530}
531EXPORT_SYMBOL(libipw_xmit);
532
533EXPORT_SYMBOL(libipw_txb_free);
534