1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <linux/if_arp.h>
76#include <linux/slab.h>
77#include <linux/netdevice.h>
78#include <linux/ethtool.h>
79#include "i2400m.h"
80
81
82#define D_SUBMODULE netdev
83#include "debug-levels.h"
84
85enum {
86
87
88
89
90 I2400M_TX_TIMEOUT = 21 * HZ,
91
92
93
94
95 I2400M_TX_QLEN = 20,
96};
97
98
99static
100int i2400m_open(struct net_device *net_dev)
101{
102 int result;
103 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
104 struct device *dev = i2400m_dev(i2400m);
105
106 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
107
108 mutex_lock(&i2400m->init_mutex);
109 if (i2400m->updown)
110 result = 0;
111 else
112 result = -EBUSY;
113 mutex_unlock(&i2400m->init_mutex);
114 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
115 net_dev, i2400m, result);
116 return result;
117}
118
119
120static
121int i2400m_stop(struct net_device *net_dev)
122{
123 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
124 struct device *dev = i2400m_dev(i2400m);
125
126 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
127 i2400m_net_wake_stop(i2400m);
128 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
129 return 0;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152void i2400m_wake_tx_work(struct work_struct *ws)
153{
154 int result;
155 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
156 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
157 struct device *dev = i2400m_dev(i2400m);
158 struct sk_buff *skb = i2400m->wake_tx_skb;
159 unsigned long flags;
160
161 spin_lock_irqsave(&i2400m->tx_lock, flags);
162 skb = i2400m->wake_tx_skb;
163 i2400m->wake_tx_skb = NULL;
164 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
165
166 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
167 result = -EINVAL;
168 if (skb == NULL) {
169 dev_err(dev, "WAKE&TX: skb disappeared!\n");
170 goto out_put;
171 }
172
173
174
175 if (unlikely(!netif_carrier_ok(net_dev)))
176 goto out_kfree;
177 result = i2400m_cmd_exit_idle(i2400m);
178 if (result == -EILSEQ)
179 result = 0;
180 if (result < 0) {
181 dev_err(dev, "WAKE&TX: device didn't get out of idle: "
182 "%d - resetting\n", result);
183 i2400m_reset(i2400m, I2400M_RT_BUS);
184 goto error;
185 }
186 result = wait_event_timeout(i2400m->state_wq,
187 i2400m->state != I2400M_SS_IDLE,
188 net_dev->watchdog_timeo - HZ/2);
189 if (result == 0)
190 result = -ETIMEDOUT;
191 if (result < 0) {
192 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
193 "%d - resetting\n", result);
194 i2400m_reset(i2400m, I2400M_RT_BUS);
195 goto error;
196 }
197 msleep(20);
198 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
199error:
200 netif_wake_queue(net_dev);
201out_kfree:
202 kfree_skb(skb);
203out_put:
204 i2400m_put(i2400m);
205 d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
206 ws, i2400m, skb, result);
207}
208
209
210
211
212
213
214
215
216
217
218static
219void i2400m_tx_prep_header(struct sk_buff *skb)
220{
221 struct i2400m_pl_data_hdr *pl_hdr;
222 skb_pull(skb, ETH_HLEN);
223 pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr));
224 pl_hdr->reserved = 0;
225}
226
227
228
229
230
231
232
233
234
235void i2400m_net_wake_stop(struct i2400m *i2400m)
236{
237 struct device *dev = i2400m_dev(i2400m);
238
239 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
240
241
242
243
244 if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
245 && i2400m->wake_tx_skb != NULL) {
246 unsigned long flags;
247 struct sk_buff *wake_tx_skb;
248 spin_lock_irqsave(&i2400m->tx_lock, flags);
249 wake_tx_skb = i2400m->wake_tx_skb;
250 i2400m->wake_tx_skb = NULL;
251 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
252 i2400m_put(i2400m);
253 kfree_skb(wake_tx_skb);
254 }
255 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270static
271int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
272 struct sk_buff *skb)
273{
274 int result;
275 struct device *dev = i2400m_dev(i2400m);
276 unsigned long flags;
277
278 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
279 if (net_ratelimit()) {
280 d_printf(3, dev, "WAKE&NETTX: "
281 "skb %p sending %d bytes to radio\n",
282 skb, skb->len);
283 d_dump(4, dev, skb->data, skb->len);
284 }
285
286
287
288 result = 0;
289 spin_lock_irqsave(&i2400m->tx_lock, flags);
290 if (!work_pending(&i2400m->wake_tx_ws)) {
291 netif_stop_queue(net_dev);
292 i2400m_get(i2400m);
293 i2400m->wake_tx_skb = skb_get(skb);
294 i2400m_tx_prep_header(skb);
295 result = schedule_work(&i2400m->wake_tx_ws);
296 WARN_ON(result == 0);
297 }
298 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
299 if (result == 0) {
300
301
302
303
304 if (net_ratelimit())
305 d_printf(1, dev, "NETTX: device exiting idle, "
306 "dropping skb %p, queue running %d\n",
307 skb, netif_queue_stopped(net_dev));
308 result = -EBUSY;
309 }
310 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
311 return result;
312}
313
314
315
316
317
318
319
320
321
322
323static
324int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
325 struct sk_buff *skb)
326{
327 int result;
328 struct device *dev = i2400m_dev(i2400m);
329
330 d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
331 i2400m, net_dev, skb);
332
333 net_dev->trans_start = jiffies;
334 i2400m_tx_prep_header(skb);
335 d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
336 skb, skb->len);
337 d_dump(4, dev, skb->data, skb->len);
338 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
339 d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
340 i2400m, net_dev, skb, result);
341 return result;
342}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363static
364netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
365 struct net_device *net_dev)
366{
367 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
368 struct device *dev = i2400m_dev(i2400m);
369 int result;
370
371 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
372 if (skb_header_cloned(skb)) {
373
374
375
376
377
378
379 result = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
380 if (result) {
381 result = NETDEV_TX_BUSY;
382 goto error_expand;
383 }
384 }
385
386 if (i2400m->state == I2400M_SS_IDLE)
387 result = i2400m_net_wake_tx(i2400m, net_dev, skb);
388 else
389 result = i2400m_net_tx(i2400m, net_dev, skb);
390 if (result < 0)
391 net_dev->stats.tx_dropped++;
392 else {
393 net_dev->stats.tx_packets++;
394 net_dev->stats.tx_bytes += skb->len;
395 }
396 result = NETDEV_TX_OK;
397error_expand:
398 kfree_skb(skb);
399 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
400 return result;
401}
402
403
404static
405int i2400m_change_mtu(struct net_device *net_dev, int new_mtu)
406{
407 int result;
408 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
409 struct device *dev = i2400m_dev(i2400m);
410
411 if (new_mtu >= I2400M_MAX_MTU) {
412 dev_err(dev, "Cannot change MTU to %d (max is %d)\n",
413 new_mtu, I2400M_MAX_MTU);
414 result = -EINVAL;
415 } else {
416 net_dev->mtu = new_mtu;
417 result = 0;
418 }
419 return result;
420}
421
422
423static
424void i2400m_tx_timeout(struct net_device *net_dev)
425{
426
427
428
429
430
431
432
433 net_dev->stats.tx_errors++;
434}
435
436
437
438
439
440
441
442
443
444static
445void i2400m_rx_fake_eth_header(struct net_device *net_dev,
446 void *_eth_hdr, __be16 protocol)
447{
448 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
449 struct ethhdr *eth_hdr = _eth_hdr;
450
451 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
452 memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
453 sizeof(eth_hdr->h_source));
454 eth_hdr->h_proto = protocol;
455}
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
495 unsigned i, const void *buf, int buf_len)
496{
497 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
498 struct device *dev = i2400m_dev(i2400m);
499 struct sk_buff *skb;
500
501 d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
502 i2400m, buf, buf_len);
503 if (i) {
504 skb = skb_get(skb_rx);
505 d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
506 skb_pull(skb, buf - (void *) skb->data);
507 skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
508 } else {
509
510
511 skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
512 if (skb == NULL) {
513 dev_err(dev, "NETRX: no memory to realloc skb\n");
514 net_dev->stats.rx_dropped++;
515 goto error_skb_realloc;
516 }
517 memcpy(skb_put(skb, buf_len), buf, buf_len);
518 }
519 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
520 skb->data - ETH_HLEN,
521 cpu_to_be16(ETH_P_IP));
522 skb_set_mac_header(skb, -ETH_HLEN);
523 skb->dev = i2400m->wimax_dev.net_dev;
524 skb->protocol = htons(ETH_P_IP);
525 net_dev->stats.rx_packets++;
526 net_dev->stats.rx_bytes += buf_len;
527 d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
528 buf_len);
529 d_dump(4, dev, buf, buf_len);
530 netif_rx_ni(skb);
531error_skb_realloc:
532 d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
533 i2400m, buf, buf_len);
534}
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
560 enum i2400m_cs cs)
561{
562 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
563 struct device *dev = i2400m_dev(i2400m);
564 int protocol;
565
566 d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
567 i2400m, skb, skb->len, cs);
568 switch(cs) {
569 case I2400M_CS_IPV4_0:
570 case I2400M_CS_IPV4:
571 protocol = ETH_P_IP;
572 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
573 skb->data - ETH_HLEN,
574 cpu_to_be16(ETH_P_IP));
575 skb_set_mac_header(skb, -ETH_HLEN);
576 skb->dev = i2400m->wimax_dev.net_dev;
577 skb->protocol = htons(ETH_P_IP);
578 net_dev->stats.rx_packets++;
579 net_dev->stats.rx_bytes += skb->len;
580 break;
581 default:
582 dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
583 goto error;
584
585 }
586 d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
587 skb->len);
588 d_dump(4, dev, skb->data, skb->len);
589 netif_rx_ni(skb);
590error:
591 d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
592 i2400m, skb, skb->len, cs);
593}
594
595static const struct net_device_ops i2400m_netdev_ops = {
596 .ndo_open = i2400m_open,
597 .ndo_stop = i2400m_stop,
598 .ndo_start_xmit = i2400m_hard_start_xmit,
599 .ndo_tx_timeout = i2400m_tx_timeout,
600 .ndo_change_mtu = i2400m_change_mtu,
601};
602
603static void i2400m_get_drvinfo(struct net_device *net_dev,
604 struct ethtool_drvinfo *info)
605{
606 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
607
608 strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
609 strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
610 if (net_dev->dev.parent)
611 strncpy(info->bus_info, dev_name(net_dev->dev.parent),
612 sizeof(info->bus_info) - 1);
613}
614
615static const struct ethtool_ops i2400m_ethtool_ops = {
616 .get_drvinfo = i2400m_get_drvinfo,
617 .get_link = ethtool_op_get_link,
618};
619
620
621
622
623
624
625void i2400m_netdev_setup(struct net_device *net_dev)
626{
627 d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
628 ether_setup(net_dev);
629 net_dev->mtu = I2400M_MAX_MTU;
630 net_dev->tx_queue_len = I2400M_TX_QLEN;
631 net_dev->features =
632 NETIF_F_VLAN_CHALLENGED
633 | NETIF_F_HIGHDMA;
634 net_dev->flags =
635 IFF_NOARP
636 & (~IFF_BROADCAST
637 & ~IFF_MULTICAST);
638 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
639 net_dev->netdev_ops = &i2400m_netdev_ops;
640 net_dev->ethtool_ops = &i2400m_ethtool_ops;
641 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
642}
643EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
644
645