1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <linux/if_arp.h>
76#include <linux/slab.h>
77#include <linux/netdevice.h>
78#include <linux/ethtool.h>
79#include <linux/export.h>
80#include "i2400m.h"
81
82
83#define D_SUBMODULE netdev
84#include "debug-levels.h"
85
86enum {
87
88
89
90
91 I2400M_TX_TIMEOUT = 21 * HZ,
92
93
94
95
96 I2400M_TX_QLEN = 20,
97};
98
99
100static
101int i2400m_open(struct net_device *net_dev)
102{
103 int result;
104 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
105 struct device *dev = i2400m_dev(i2400m);
106
107 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
108
109 mutex_lock(&i2400m->init_mutex);
110 if (i2400m->updown)
111 result = 0;
112 else
113 result = -EBUSY;
114 mutex_unlock(&i2400m->init_mutex);
115 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
116 net_dev, i2400m, result);
117 return result;
118}
119
120
121static
122int i2400m_stop(struct net_device *net_dev)
123{
124 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
125 struct device *dev = i2400m_dev(i2400m);
126
127 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
128 i2400m_net_wake_stop(i2400m);
129 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
130 return 0;
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153void i2400m_wake_tx_work(struct work_struct *ws)
154{
155 int result;
156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
157 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
158 struct device *dev = i2400m_dev(i2400m);
159 struct sk_buff *skb;
160 unsigned long flags;
161
162 spin_lock_irqsave(&i2400m->tx_lock, flags);
163 skb = i2400m->wake_tx_skb;
164 i2400m->wake_tx_skb = NULL;
165 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
166
167 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
168 result = -EINVAL;
169 if (skb == NULL) {
170 dev_err(dev, "WAKE&TX: skb disappeared!\n");
171 goto out_put;
172 }
173
174
175
176 if (unlikely(!netif_carrier_ok(net_dev)))
177 goto out_kfree;
178 result = i2400m_cmd_exit_idle(i2400m);
179 if (result == -EILSEQ)
180 result = 0;
181 if (result < 0) {
182 dev_err(dev, "WAKE&TX: device didn't get out of idle: "
183 "%d - resetting\n", result);
184 i2400m_reset(i2400m, I2400M_RT_BUS);
185 goto error;
186 }
187 result = wait_event_timeout(i2400m->state_wq,
188 i2400m->state != I2400M_SS_IDLE,
189 net_dev->watchdog_timeo - HZ/2);
190 if (result == 0)
191 result = -ETIMEDOUT;
192 if (result < 0) {
193 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
194 "%d - resetting\n", result);
195 i2400m_reset(i2400m, I2400M_RT_BUS);
196 goto error;
197 }
198 msleep(20);
199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
200error:
201 netif_wake_queue(net_dev);
202out_kfree:
203 kfree_skb(skb);
204out_put:
205 i2400m_put(i2400m);
206 d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
207 ws, i2400m, skb, result);
208}
209
210
211
212
213
214
215
216
217
218
219static
220void i2400m_tx_prep_header(struct sk_buff *skb)
221{
222 struct i2400m_pl_data_hdr *pl_hdr;
223 skb_pull(skb, ETH_HLEN);
224 pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr));
225 pl_hdr->reserved = 0;
226}
227
228
229
230
231
232
233
234
235
236void i2400m_net_wake_stop(struct i2400m *i2400m)
237{
238 struct device *dev = i2400m_dev(i2400m);
239 struct sk_buff *wake_tx_skb;
240 unsigned long flags;
241
242 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
243
244
245
246
247 cancel_work_sync(&i2400m->wake_tx_ws);
248
249 spin_lock_irqsave(&i2400m->tx_lock, flags);
250 wake_tx_skb = i2400m->wake_tx_skb;
251 i2400m->wake_tx_skb = NULL;
252 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
253
254 if (wake_tx_skb) {
255 i2400m_put(i2400m);
256 kfree_skb(wake_tx_skb);
257 }
258
259 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274static
275int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
276 struct sk_buff *skb)
277{
278 int result;
279 struct device *dev = i2400m_dev(i2400m);
280 unsigned long flags;
281
282 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
283 if (net_ratelimit()) {
284 d_printf(3, dev, "WAKE&NETTX: "
285 "skb %p sending %d bytes to radio\n",
286 skb, skb->len);
287 d_dump(4, dev, skb->data, skb->len);
288 }
289
290
291
292 result = 0;
293 spin_lock_irqsave(&i2400m->tx_lock, flags);
294 if (!i2400m->wake_tx_skb) {
295 netif_stop_queue(net_dev);
296 i2400m_get(i2400m);
297 i2400m->wake_tx_skb = skb_get(skb);
298 i2400m_tx_prep_header(skb);
299 result = schedule_work(&i2400m->wake_tx_ws);
300 WARN_ON(result == 0);
301 }
302 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
303 if (result == 0) {
304
305
306
307
308 if (net_ratelimit())
309 d_printf(1, dev, "NETTX: device exiting idle, "
310 "dropping skb %p, queue running %d\n",
311 skb, netif_queue_stopped(net_dev));
312 result = -EBUSY;
313 }
314 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
315 return result;
316}
317
318
319
320
321
322
323
324
325
326
327static
328int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
329 struct sk_buff *skb)
330{
331 int result;
332 struct device *dev = i2400m_dev(i2400m);
333
334 d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
335 i2400m, net_dev, skb);
336
337 net_dev->trans_start = jiffies;
338 i2400m_tx_prep_header(skb);
339 d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
340 skb, skb->len);
341 d_dump(4, dev, skb->data, skb->len);
342 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
343 d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
344 i2400m, net_dev, skb, result);
345 return result;
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367static
368netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
369 struct net_device *net_dev)
370{
371 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
372 struct device *dev = i2400m_dev(i2400m);
373 int result = -1;
374
375 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
376
377 if (skb_cow_head(skb, 0))
378 goto drop;
379
380 if (i2400m->state == I2400M_SS_IDLE)
381 result = i2400m_net_wake_tx(i2400m, net_dev, skb);
382 else
383 result = i2400m_net_tx(i2400m, net_dev, skb);
384 if (result < 0) {
385drop:
386 net_dev->stats.tx_dropped++;
387 } else {
388 net_dev->stats.tx_packets++;
389 net_dev->stats.tx_bytes += skb->len;
390 }
391 dev_kfree_skb(skb);
392 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
393 return NETDEV_TX_OK;
394}
395
396
397static
398int i2400m_change_mtu(struct net_device *net_dev, int new_mtu)
399{
400 int result;
401 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
402 struct device *dev = i2400m_dev(i2400m);
403
404 if (new_mtu >= I2400M_MAX_MTU) {
405 dev_err(dev, "Cannot change MTU to %d (max is %d)\n",
406 new_mtu, I2400M_MAX_MTU);
407 result = -EINVAL;
408 } else {
409 net_dev->mtu = new_mtu;
410 result = 0;
411 }
412 return result;
413}
414
415
416static
417void i2400m_tx_timeout(struct net_device *net_dev)
418{
419
420
421
422
423
424
425
426 net_dev->stats.tx_errors++;
427}
428
429
430
431
432
433
434
435
436
437static
438void i2400m_rx_fake_eth_header(struct net_device *net_dev,
439 void *_eth_hdr, __be16 protocol)
440{
441 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
442 struct ethhdr *eth_hdr = _eth_hdr;
443
444 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
445 memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
446 sizeof(eth_hdr->h_source));
447 eth_hdr->h_proto = protocol;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
488 unsigned i, const void *buf, int buf_len)
489{
490 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
491 struct device *dev = i2400m_dev(i2400m);
492 struct sk_buff *skb;
493
494 d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
495 i2400m, buf, buf_len);
496 if (i) {
497 skb = skb_get(skb_rx);
498 d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
499 skb_pull(skb, buf - (void *) skb->data);
500 skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
501 } else {
502
503
504 skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
505 if (skb == NULL) {
506 dev_err(dev, "NETRX: no memory to realloc skb\n");
507 net_dev->stats.rx_dropped++;
508 goto error_skb_realloc;
509 }
510 memcpy(skb_put(skb, buf_len), buf, buf_len);
511 }
512 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
513 skb->data - ETH_HLEN,
514 cpu_to_be16(ETH_P_IP));
515 skb_set_mac_header(skb, -ETH_HLEN);
516 skb->dev = i2400m->wimax_dev.net_dev;
517 skb->protocol = htons(ETH_P_IP);
518 net_dev->stats.rx_packets++;
519 net_dev->stats.rx_bytes += buf_len;
520 d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
521 buf_len);
522 d_dump(4, dev, buf, buf_len);
523 netif_rx_ni(skb);
524error_skb_realloc:
525 d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
526 i2400m, buf, buf_len);
527}
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
553 enum i2400m_cs cs)
554{
555 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
556 struct device *dev = i2400m_dev(i2400m);
557 int protocol;
558
559 d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
560 i2400m, skb, skb->len, cs);
561 switch(cs) {
562 case I2400M_CS_IPV4_0:
563 case I2400M_CS_IPV4:
564 protocol = ETH_P_IP;
565 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
566 skb->data - ETH_HLEN,
567 cpu_to_be16(ETH_P_IP));
568 skb_set_mac_header(skb, -ETH_HLEN);
569 skb->dev = i2400m->wimax_dev.net_dev;
570 skb->protocol = htons(ETH_P_IP);
571 net_dev->stats.rx_packets++;
572 net_dev->stats.rx_bytes += skb->len;
573 break;
574 default:
575 dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
576 goto error;
577
578 }
579 d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
580 skb->len);
581 d_dump(4, dev, skb->data, skb->len);
582 netif_rx_ni(skb);
583error:
584 d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
585 i2400m, skb, skb->len, cs);
586}
587
588static const struct net_device_ops i2400m_netdev_ops = {
589 .ndo_open = i2400m_open,
590 .ndo_stop = i2400m_stop,
591 .ndo_start_xmit = i2400m_hard_start_xmit,
592 .ndo_tx_timeout = i2400m_tx_timeout,
593 .ndo_change_mtu = i2400m_change_mtu,
594};
595
596static void i2400m_get_drvinfo(struct net_device *net_dev,
597 struct ethtool_drvinfo *info)
598{
599 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
600
601 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
602 strlcpy(info->fw_version, i2400m->fw_name ? : "",
603 sizeof(info->fw_version));
604 if (net_dev->dev.parent)
605 strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
606 sizeof(info->bus_info));
607}
608
609static const struct ethtool_ops i2400m_ethtool_ops = {
610 .get_drvinfo = i2400m_get_drvinfo,
611 .get_link = ethtool_op_get_link,
612};
613
614
615
616
617
618
619void i2400m_netdev_setup(struct net_device *net_dev)
620{
621 d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
622 ether_setup(net_dev);
623 net_dev->mtu = I2400M_MAX_MTU;
624 net_dev->tx_queue_len = I2400M_TX_QLEN;
625 net_dev->features =
626 NETIF_F_VLAN_CHALLENGED
627 | NETIF_F_HIGHDMA;
628 net_dev->flags =
629 IFF_NOARP
630 & (~IFF_BROADCAST
631 & ~IFF_MULTICAST);
632 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
633 net_dev->netdev_ops = &i2400m_netdev_ops;
634 net_dev->ethtool_ops = &i2400m_ethtool_ops;
635 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
636}
637EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
638
639