1
2
3
4
5
6
7
8
9
10
11
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/if_arp.h>
15#include <linux/in6.h>
16#include <linux/in.h>
17#include <linux/ip.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/proc_fs.h>
22#include <linux/skbuff.h>
23#include <linux/slab.h>
24#include <linux/tcp.h>
25#include <linux/types.h>
26#include <linux/wireless.h>
27#include <linux/etherdevice.h>
28#include <linux/uaccess.h>
29
30#include "libipw.h"
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
113static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
114
115static int libipw_copy_snap(u8 * data, __be16 h_proto)
116{
117 struct libipw_snap_hdr *snap;
118 u8 *oui;
119
120 snap = (struct libipw_snap_hdr *)data;
121 snap->dsap = 0xaa;
122 snap->ssap = 0xaa;
123 snap->ctrl = 0x03;
124
125 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
126 oui = P802_1H_OUI;
127 else
128 oui = RFC1042_OUI;
129 snap->oui[0] = oui[0];
130 snap->oui[1] = oui[1];
131 snap->oui[2] = oui[2];
132
133 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
134
135 return SNAP_SIZE + sizeof(u16);
136}
137
138static int libipw_encrypt_fragment(struct libipw_device *ieee,
139 struct sk_buff *frag, int hdr_len)
140{
141 struct lib80211_crypt_data *crypt =
142 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
143 int res;
144
145 if (crypt == NULL)
146 return -1;
147
148
149
150 atomic_inc(&crypt->refcnt);
151 res = 0;
152 if (crypt->ops && crypt->ops->encrypt_mpdu)
153 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
154
155 atomic_dec(&crypt->refcnt);
156 if (res < 0) {
157 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
158 ieee->dev->name, frag->len);
159 ieee->ieee_stats.tx_discards++;
160 return -1;
161 }
162
163 return 0;
164}
165
166void libipw_txb_free(struct libipw_txb *txb)
167{
168 int i;
169 if (unlikely(!txb))
170 return;
171 for (i = 0; i < txb->nr_frags; i++)
172 if (txb->fragments[i])
173 dev_kfree_skb_any(txb->fragments[i]);
174 kfree(txb);
175}
176
177static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
178 int headroom, gfp_t gfp_mask)
179{
180 struct libipw_txb *txb;
181 int i;
182 txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags),
183 gfp_mask);
184 if (!txb)
185 return NULL;
186
187 memset(txb, 0, sizeof(struct libipw_txb));
188 txb->nr_frags = nr_frags;
189 txb->frag_size = txb_size;
190
191 for (i = 0; i < nr_frags; i++) {
192 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
193 gfp_mask);
194 if (unlikely(!txb->fragments[i])) {
195 i--;
196 break;
197 }
198 skb_reserve(txb->fragments[i], headroom);
199 }
200 if (unlikely(i != nr_frags)) {
201 while (i >= 0)
202 dev_kfree_skb_any(txb->fragments[i--]);
203 kfree(txb);
204 return NULL;
205 }
206 return txb;
207}
208
209static int libipw_classify(struct sk_buff *skb)
210{
211 struct ethhdr *eth;
212 struct iphdr *ip;
213
214 eth = (struct ethhdr *)skb->data;
215 if (eth->h_proto != htons(ETH_P_IP))
216 return 0;
217
218 ip = ip_hdr(skb);
219 switch (ip->tos & 0xfc) {
220 case 0x20:
221 return 2;
222 case 0x40:
223 return 1;
224 case 0x60:
225 return 3;
226 case 0x80:
227 return 4;
228 case 0xa0:
229 return 5;
230 case 0xc0:
231 return 6;
232 case 0xe0:
233 return 7;
234 default:
235 return 0;
236 }
237}
238
239
240
241netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
242{
243 struct libipw_device *ieee = netdev_priv(dev);
244 struct libipw_txb *txb = NULL;
245 struct libipw_hdr_3addrqos *frag_hdr;
246 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
247 rts_required;
248 unsigned long flags;
249 int encrypt, host_encrypt, host_encrypt_msdu;
250 __be16 ether_type;
251 int bytes, fc, hdr_len;
252 struct sk_buff *skb_frag;
253 struct libipw_hdr_3addrqos header = {
254 .duration_id = 0,
255 .seq_ctl = 0,
256 .qos_ctl = 0
257 };
258 u8 dest[ETH_ALEN], src[ETH_ALEN];
259 struct lib80211_crypt_data *crypt;
260 int priority = skb->priority;
261 int snapped = 0;
262
263 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
264 return NETDEV_TX_BUSY;
265
266 spin_lock_irqsave(&ieee->lock, flags);
267
268
269
270 if (!ieee->hard_start_xmit) {
271 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
272 goto success;
273 }
274
275 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
276 printk(KERN_WARNING "%s: skb too small (%d).\n",
277 ieee->dev->name, skb->len);
278 goto success;
279 }
280
281 ether_type = ((struct ethhdr *)skb->data)->h_proto;
282
283 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
284
285 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
286 ieee->sec.encrypt;
287
288 host_encrypt = ieee->host_encrypt && encrypt && crypt;
289 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
290
291 if (!encrypt && ieee->ieee802_1x &&
292 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
293 dev->stats.tx_dropped++;
294 goto success;
295 }
296
297
298 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
299 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
300
301 if (host_encrypt)
302 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
303 IEEE80211_FCTL_PROTECTED;
304 else
305 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
306
307 if (ieee->iw_mode == IW_MODE_INFRA) {
308 fc |= IEEE80211_FCTL_TODS;
309
310 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
311 memcpy(header.addr2, src, ETH_ALEN);
312 memcpy(header.addr3, dest, ETH_ALEN);
313 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
314
315 memcpy(header.addr1, dest, ETH_ALEN);
316 memcpy(header.addr2, src, ETH_ALEN);
317 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
318 }
319 hdr_len = LIBIPW_3ADDR_LEN;
320
321 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
322 fc |= IEEE80211_STYPE_QOS_DATA;
323 hdr_len += 2;
324
325 skb->priority = libipw_classify(skb);
326 header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
327 }
328 header.frame_ctl = cpu_to_le16(fc);
329
330
331 skb_pull(skb, sizeof(struct ethhdr));
332
333
334 bytes = skb->len + SNAP_SIZE + sizeof(u16);
335
336
337 if ((host_encrypt || host_encrypt_msdu) &&
338 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
339 int res = 0;
340 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
341 crypt->ops->extra_msdu_postfix_len;
342 struct sk_buff *skb_new = dev_alloc_skb(len);
343
344 if (unlikely(!skb_new))
345 goto failed;
346
347 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
348 skb_put_data(skb_new, &header, hdr_len);
349 snapped = 1;
350 libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
351 ether_type);
352 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
353 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
354 if (res < 0) {
355 LIBIPW_ERROR("msdu encryption failed\n");
356 dev_kfree_skb_any(skb_new);
357 goto failed;
358 }
359 dev_kfree_skb_any(skb);
360 skb = skb_new;
361 bytes += crypt->ops->extra_msdu_prefix_len +
362 crypt->ops->extra_msdu_postfix_len;
363 skb_pull(skb, hdr_len);
364 }
365
366 if (host_encrypt || ieee->host_open_frag) {
367
368
369 if (is_multicast_ether_addr(dest) ||
370 is_broadcast_ether_addr(dest))
371 frag_size = MAX_FRAG_THRESHOLD;
372 else
373 frag_size = ieee->fts;
374
375
376
377
378
379 bytes_per_frag = frag_size - hdr_len;
380 if (ieee->config &
381 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
382 bytes_per_frag -= LIBIPW_FCS_LEN;
383
384
385
386 if (host_encrypt)
387 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
388 crypt->ops->extra_mpdu_postfix_len;
389
390
391
392 nr_frags = bytes / bytes_per_frag;
393 bytes_last_frag = bytes % bytes_per_frag;
394 if (bytes_last_frag)
395 nr_frags++;
396 else
397 bytes_last_frag = bytes_per_frag;
398 } else {
399 nr_frags = 1;
400 bytes_per_frag = bytes_last_frag = bytes;
401 frag_size = bytes + hdr_len;
402 }
403
404 rts_required = (frag_size > ieee->rts
405 && ieee->config & CFG_LIBIPW_RTS);
406 if (rts_required)
407 nr_frags++;
408
409
410
411
412 txb = libipw_alloc_txb(nr_frags, frag_size,
413 ieee->tx_headroom, GFP_ATOMIC);
414 if (unlikely(!txb)) {
415 printk(KERN_WARNING "%s: Could not allocate TXB\n",
416 ieee->dev->name);
417 goto failed;
418 }
419 txb->encrypted = encrypt;
420 if (host_encrypt)
421 txb->payload_size = frag_size * (nr_frags - 1) +
422 bytes_last_frag;
423 else
424 txb->payload_size = bytes;
425
426 if (rts_required) {
427 skb_frag = txb->fragments[0];
428 frag_hdr = skb_put(skb_frag, hdr_len);
429
430
431
432
433 header.frame_ctl =
434 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
435 memcpy(frag_hdr, &header, hdr_len);
436
437
438
439
440 header.frame_ctl = cpu_to_le16(fc);
441
442 if (ieee->config &
443 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
444 skb_put(skb_frag, 4);
445
446 txb->rts_included = 1;
447 i = 1;
448 } else
449 i = 0;
450
451 for (; i < nr_frags; i++) {
452 skb_frag = txb->fragments[i];
453
454 if (host_encrypt)
455 skb_reserve(skb_frag,
456 crypt->ops->extra_mpdu_prefix_len);
457
458 frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
459
460
461
462 if (i != nr_frags - 1) {
463 frag_hdr->frame_ctl =
464 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
465 bytes = bytes_per_frag;
466 } else {
467
468 bytes = bytes_last_frag;
469 }
470
471 if (i == 0 && !snapped) {
472 libipw_copy_snap(skb_put
473 (skb_frag, SNAP_SIZE + sizeof(u16)),
474 ether_type);
475 bytes -= SNAP_SIZE + sizeof(u16);
476 }
477
478 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
479
480
481 skb_pull(skb, bytes);
482
483
484
485 if (host_encrypt)
486 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
487
488 if (ieee->config &
489 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
490 skb_put(skb_frag, 4);
491 }
492
493 success:
494 spin_unlock_irqrestore(&ieee->lock, flags);
495
496 dev_kfree_skb_any(skb);
497
498 if (txb) {
499 netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
500 if (ret == NETDEV_TX_OK) {
501 dev->stats.tx_packets++;
502 dev->stats.tx_bytes += txb->payload_size;
503 return NETDEV_TX_OK;
504 }
505
506 libipw_txb_free(txb);
507 }
508
509 return NETDEV_TX_OK;
510
511 failed:
512 spin_unlock_irqrestore(&ieee->lock, flags);
513 netif_stop_queue(dev);
514 dev->stats.tx_errors++;
515 return NETDEV_TX_BUSY;
516}
517EXPORT_SYMBOL(libipw_xmit);
518
519EXPORT_SYMBOL(libipw_txb_free);
520