1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <linux/if_arp.h>
76#include <linux/slab.h>
77#include <linux/netdevice.h>
78#include <linux/ethtool.h>
79#include <linux/export.h>
80#include "i2400m.h"
81
82
83#define D_SUBMODULE netdev
84#include "debug-levels.h"
85
86enum {
87
88
89
90
91 I2400M_TX_TIMEOUT = 21 * HZ,
92
93
94
95
96 I2400M_TX_QLEN = 20,
97};
98
99
100static
101int i2400m_open(struct net_device *net_dev)
102{
103 int result;
104 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
105 struct device *dev = i2400m_dev(i2400m);
106
107 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
108
109 mutex_lock(&i2400m->init_mutex);
110 if (i2400m->updown)
111 result = 0;
112 else
113 result = -EBUSY;
114 mutex_unlock(&i2400m->init_mutex);
115 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
116 net_dev, i2400m, result);
117 return result;
118}
119
120
121static
122int i2400m_stop(struct net_device *net_dev)
123{
124 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
125 struct device *dev = i2400m_dev(i2400m);
126
127 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
128 i2400m_net_wake_stop(i2400m);
129 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
130 return 0;
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153void i2400m_wake_tx_work(struct work_struct *ws)
154{
155 int result;
156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
157 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
158 struct device *dev = i2400m_dev(i2400m);
159 struct sk_buff *skb;
160 unsigned long flags;
161
162 spin_lock_irqsave(&i2400m->tx_lock, flags);
163 skb = i2400m->wake_tx_skb;
164 i2400m->wake_tx_skb = NULL;
165 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
166
167 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
168 result = -EINVAL;
169 if (skb == NULL) {
170 dev_err(dev, "WAKE&TX: skb disappeared!\n");
171 goto out_put;
172 }
173
174
175
176 if (unlikely(!netif_carrier_ok(net_dev)))
177 goto out_kfree;
178 result = i2400m_cmd_exit_idle(i2400m);
179 if (result == -EILSEQ)
180 result = 0;
181 if (result < 0) {
182 dev_err(dev, "WAKE&TX: device didn't get out of idle: "
183 "%d - resetting\n", result);
184 i2400m_reset(i2400m, I2400M_RT_BUS);
185 goto error;
186 }
187 result = wait_event_timeout(i2400m->state_wq,
188 i2400m->state != I2400M_SS_IDLE,
189 net_dev->watchdog_timeo - HZ/2);
190 if (result == 0)
191 result = -ETIMEDOUT;
192 if (result < 0) {
193 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
194 "%d - resetting\n", result);
195 i2400m_reset(i2400m, I2400M_RT_BUS);
196 goto error;
197 }
198 msleep(20);
199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
200error:
201 netif_wake_queue(net_dev);
202out_kfree:
203 kfree_skb(skb);
204out_put:
205 i2400m_put(i2400m);
206 d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
207 ws, i2400m, skb, result);
208}
209
210
211
212
213
214
215
216
217
218
219static
220void i2400m_tx_prep_header(struct sk_buff *skb)
221{
222 struct i2400m_pl_data_hdr *pl_hdr;
223 skb_pull(skb, ETH_HLEN);
224 pl_hdr = skb_push(skb, sizeof(*pl_hdr));
225 pl_hdr->reserved = 0;
226}
227
228
229
230
231
232
233
234
235
236void i2400m_net_wake_stop(struct i2400m *i2400m)
237{
238 struct device *dev = i2400m_dev(i2400m);
239 struct sk_buff *wake_tx_skb;
240 unsigned long flags;
241
242 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
243
244
245
246
247 cancel_work_sync(&i2400m->wake_tx_ws);
248
249 spin_lock_irqsave(&i2400m->tx_lock, flags);
250 wake_tx_skb = i2400m->wake_tx_skb;
251 i2400m->wake_tx_skb = NULL;
252 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
253
254 if (wake_tx_skb) {
255 i2400m_put(i2400m);
256 kfree_skb(wake_tx_skb);
257 }
258
259 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274static
275int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
276 struct sk_buff *skb)
277{
278 int result;
279 struct device *dev = i2400m_dev(i2400m);
280 unsigned long flags;
281
282 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
283 if (net_ratelimit()) {
284 d_printf(3, dev, "WAKE&NETTX: "
285 "skb %p sending %d bytes to radio\n",
286 skb, skb->len);
287 d_dump(4, dev, skb->data, skb->len);
288 }
289
290
291
292 result = 0;
293 spin_lock_irqsave(&i2400m->tx_lock, flags);
294 if (!i2400m->wake_tx_skb) {
295 netif_stop_queue(net_dev);
296 i2400m_get(i2400m);
297 i2400m->wake_tx_skb = skb_get(skb);
298 i2400m_tx_prep_header(skb);
299 result = schedule_work(&i2400m->wake_tx_ws);
300 WARN_ON(result == 0);
301 }
302 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
303 if (result == 0) {
304
305
306
307
308 if (net_ratelimit())
309 d_printf(1, dev, "NETTX: device exiting idle, "
310 "dropping skb %p, queue running %d\n",
311 skb, netif_queue_stopped(net_dev));
312 result = -EBUSY;
313 }
314 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
315 return result;
316}
317
318
319
320
321
322
323
324
325
326
327static
328int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
329 struct sk_buff *skb)
330{
331 int result;
332 struct device *dev = i2400m_dev(i2400m);
333
334 d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
335 i2400m, net_dev, skb);
336
337 netif_trans_update(net_dev);
338 i2400m_tx_prep_header(skb);
339 d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
340 skb, skb->len);
341 d_dump(4, dev, skb->data, skb->len);
342 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
343 d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
344 i2400m, net_dev, skb, result);
345 return result;
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367static
368netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
369 struct net_device *net_dev)
370{
371 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
372 struct device *dev = i2400m_dev(i2400m);
373 int result = -1;
374
375 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
376
377 if (skb_cow_head(skb, 0))
378 goto drop;
379
380 if (i2400m->state == I2400M_SS_IDLE)
381 result = i2400m_net_wake_tx(i2400m, net_dev, skb);
382 else
383 result = i2400m_net_tx(i2400m, net_dev, skb);
384 if (result < 0) {
385drop:
386 net_dev->stats.tx_dropped++;
387 } else {
388 net_dev->stats.tx_packets++;
389 net_dev->stats.tx_bytes += skb->len;
390 }
391 dev_kfree_skb(skb);
392 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
393 return NETDEV_TX_OK;
394}
395
396
397static
398void i2400m_tx_timeout(struct net_device *net_dev)
399{
400
401
402
403
404
405
406
407 net_dev->stats.tx_errors++;
408}
409
410
411
412
413
414
415
416
417
418static
419void i2400m_rx_fake_eth_header(struct net_device *net_dev,
420 void *_eth_hdr, __be16 protocol)
421{
422 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
423 struct ethhdr *eth_hdr = _eth_hdr;
424
425 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
426 memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
427 sizeof(eth_hdr->h_source));
428 eth_hdr->h_proto = protocol;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
469 unsigned i, const void *buf, int buf_len)
470{
471 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
472 struct device *dev = i2400m_dev(i2400m);
473 struct sk_buff *skb;
474
475 d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
476 i2400m, buf, buf_len);
477 if (i) {
478 skb = skb_get(skb_rx);
479 d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
480 skb_pull(skb, buf - (void *) skb->data);
481 skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
482 } else {
483
484
485 skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
486 if (skb == NULL) {
487 dev_err(dev, "NETRX: no memory to realloc skb\n");
488 net_dev->stats.rx_dropped++;
489 goto error_skb_realloc;
490 }
491 skb_put_data(skb, buf, buf_len);
492 }
493 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
494 skb->data - ETH_HLEN,
495 cpu_to_be16(ETH_P_IP));
496 skb_set_mac_header(skb, -ETH_HLEN);
497 skb->dev = i2400m->wimax_dev.net_dev;
498 skb->protocol = htons(ETH_P_IP);
499 net_dev->stats.rx_packets++;
500 net_dev->stats.rx_bytes += buf_len;
501 d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
502 buf_len);
503 d_dump(4, dev, buf, buf_len);
504 netif_rx_ni(skb);
505error_skb_realloc:
506 d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
507 i2400m, buf, buf_len);
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
534 enum i2400m_cs cs)
535{
536 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
537 struct device *dev = i2400m_dev(i2400m);
538 int protocol;
539
540 d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
541 i2400m, skb, skb->len, cs);
542 switch(cs) {
543 case I2400M_CS_IPV4_0:
544 case I2400M_CS_IPV4:
545 protocol = ETH_P_IP;
546 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
547 skb->data - ETH_HLEN,
548 cpu_to_be16(ETH_P_IP));
549 skb_set_mac_header(skb, -ETH_HLEN);
550 skb->dev = i2400m->wimax_dev.net_dev;
551 skb->protocol = htons(ETH_P_IP);
552 net_dev->stats.rx_packets++;
553 net_dev->stats.rx_bytes += skb->len;
554 break;
555 default:
556 dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
557 goto error;
558
559 }
560 d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
561 skb->len);
562 d_dump(4, dev, skb->data, skb->len);
563 netif_rx_ni(skb);
564error:
565 d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
566 i2400m, skb, skb->len, cs);
567}
568
569static const struct net_device_ops i2400m_netdev_ops = {
570 .ndo_open = i2400m_open,
571 .ndo_stop = i2400m_stop,
572 .ndo_start_xmit = i2400m_hard_start_xmit,
573 .ndo_tx_timeout = i2400m_tx_timeout,
574};
575
576static void i2400m_get_drvinfo(struct net_device *net_dev,
577 struct ethtool_drvinfo *info)
578{
579 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
580
581 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
582 strlcpy(info->fw_version, i2400m->fw_name ? : "",
583 sizeof(info->fw_version));
584 if (net_dev->dev.parent)
585 strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
586 sizeof(info->bus_info));
587}
588
589static const struct ethtool_ops i2400m_ethtool_ops = {
590 .get_drvinfo = i2400m_get_drvinfo,
591 .get_link = ethtool_op_get_link,
592};
593
594
595
596
597
598
599void i2400m_netdev_setup(struct net_device *net_dev)
600{
601 d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
602 ether_setup(net_dev);
603 net_dev->mtu = I2400M_MAX_MTU;
604 net_dev->min_mtu = 0;
605 net_dev->max_mtu = I2400M_MAX_MTU;
606 net_dev->tx_queue_len = I2400M_TX_QLEN;
607 net_dev->features =
608 NETIF_F_VLAN_CHALLENGED
609 | NETIF_F_HIGHDMA;
610 net_dev->flags =
611 IFF_NOARP
612 & (~IFF_BROADCAST
613 & ~IFF_MULTICAST);
614 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
615 net_dev->netdev_ops = &i2400m_netdev_ops;
616 net_dev->ethtool_ops = &i2400m_ethtool_ops;
617 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
618}
619EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
620
621