1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <linux/if_arp.h>
76#include <linux/slab.h>
77#include <linux/netdevice.h>
78#include <linux/ethtool.h>
79#include <linux/export.h>
80#include "i2400m.h"
81
82
83#define D_SUBMODULE netdev
84#include "debug-levels.h"
85
86enum {
87
88
89
90
91 I2400M_TX_TIMEOUT = 21 * HZ,
92
93
94
95
96 I2400M_TX_QLEN = 20,
97};
98
99
100static
101int i2400m_open(struct net_device *net_dev)
102{
103 int result;
104 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
105 struct device *dev = i2400m_dev(i2400m);
106
107 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
108
109 mutex_lock(&i2400m->init_mutex);
110 if (i2400m->updown)
111 result = 0;
112 else
113 result = -EBUSY;
114 mutex_unlock(&i2400m->init_mutex);
115 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
116 net_dev, i2400m, result);
117 return result;
118}
119
120
121static
122int i2400m_stop(struct net_device *net_dev)
123{
124 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
125 struct device *dev = i2400m_dev(i2400m);
126
127 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
128 i2400m_net_wake_stop(i2400m);
129 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
130 return 0;
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153void i2400m_wake_tx_work(struct work_struct *ws)
154{
155 int result;
156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
157 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
158 struct device *dev = i2400m_dev(i2400m);
159 struct sk_buff *skb;
160 unsigned long flags;
161
162 spin_lock_irqsave(&i2400m->tx_lock, flags);
163 skb = i2400m->wake_tx_skb;
164 i2400m->wake_tx_skb = NULL;
165 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
166
167 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
168 result = -EINVAL;
169 if (skb == NULL) {
170 dev_err(dev, "WAKE&TX: skb disappeared!\n");
171 goto out_put;
172 }
173
174
175
176 if (unlikely(!netif_carrier_ok(net_dev)))
177 goto out_kfree;
178 result = i2400m_cmd_exit_idle(i2400m);
179 if (result == -EILSEQ)
180 result = 0;
181 if (result < 0) {
182 dev_err(dev, "WAKE&TX: device didn't get out of idle: "
183 "%d - resetting\n", result);
184 i2400m_reset(i2400m, I2400M_RT_BUS);
185 goto error;
186 }
187 result = wait_event_timeout(i2400m->state_wq,
188 i2400m->state != I2400M_SS_IDLE,
189 net_dev->watchdog_timeo - HZ/2);
190 if (result == 0)
191 result = -ETIMEDOUT;
192 if (result < 0) {
193 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
194 "%d - resetting\n", result);
195 i2400m_reset(i2400m, I2400M_RT_BUS);
196 goto error;
197 }
198 msleep(20);
199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
200error:
201 netif_wake_queue(net_dev);
202out_kfree:
203 kfree_skb(skb);
204out_put:
205 i2400m_put(i2400m);
206 d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
207 ws, i2400m, skb, result);
208}
209
210
211
212
213
214
215
216
217
218
219static
220void i2400m_tx_prep_header(struct sk_buff *skb)
221{
222 struct i2400m_pl_data_hdr *pl_hdr;
223 skb_pull(skb, ETH_HLEN);
224 pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr));
225 pl_hdr->reserved = 0;
226}
227
228
229
230
231
232
233
234
235
236void i2400m_net_wake_stop(struct i2400m *i2400m)
237{
238 struct device *dev = i2400m_dev(i2400m);
239 struct sk_buff *wake_tx_skb;
240 unsigned long flags;
241
242 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
243
244
245
246
247 cancel_work_sync(&i2400m->wake_tx_ws);
248
249 spin_lock_irqsave(&i2400m->tx_lock, flags);
250 wake_tx_skb = i2400m->wake_tx_skb;
251 i2400m->wake_tx_skb = NULL;
252 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
253
254 if (wake_tx_skb) {
255 i2400m_put(i2400m);
256 kfree_skb(wake_tx_skb);
257 }
258
259 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274static
275int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
276 struct sk_buff *skb)
277{
278 int result;
279 struct device *dev = i2400m_dev(i2400m);
280 unsigned long flags;
281
282 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
283 if (net_ratelimit()) {
284 d_printf(3, dev, "WAKE&NETTX: "
285 "skb %p sending %d bytes to radio\n",
286 skb, skb->len);
287 d_dump(4, dev, skb->data, skb->len);
288 }
289
290
291
292 result = 0;
293 spin_lock_irqsave(&i2400m->tx_lock, flags);
294 if (!i2400m->wake_tx_skb) {
295 netif_stop_queue(net_dev);
296 i2400m_get(i2400m);
297 i2400m->wake_tx_skb = skb_get(skb);
298 i2400m_tx_prep_header(skb);
299 result = schedule_work(&i2400m->wake_tx_ws);
300 WARN_ON(result == 0);
301 }
302 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
303 if (result == 0) {
304
305
306
307
308 if (net_ratelimit())
309 d_printf(1, dev, "NETTX: device exiting idle, "
310 "dropping skb %p, queue running %d\n",
311 skb, netif_queue_stopped(net_dev));
312 result = -EBUSY;
313 }
314 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
315 return result;
316}
317
318
319
320
321
322
323
324
325
326
327static
328int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
329 struct sk_buff *skb)
330{
331 int result;
332 struct device *dev = i2400m_dev(i2400m);
333
334 d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
335 i2400m, net_dev, skb);
336
337 net_dev->trans_start = jiffies;
338 i2400m_tx_prep_header(skb);
339 d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
340 skb, skb->len);
341 d_dump(4, dev, skb->data, skb->len);
342 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
343 d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
344 i2400m, net_dev, skb, result);
345 return result;
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367static
368netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
369 struct net_device *net_dev)
370{
371 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
372 struct device *dev = i2400m_dev(i2400m);
373 int result = -1;
374
375 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
376
377 if (skb_header_cloned(skb) &&
378 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
379 goto drop;
380
381 if (i2400m->state == I2400M_SS_IDLE)
382 result = i2400m_net_wake_tx(i2400m, net_dev, skb);
383 else
384 result = i2400m_net_tx(i2400m, net_dev, skb);
385 if (result < 0) {
386drop:
387 net_dev->stats.tx_dropped++;
388 } else {
389 net_dev->stats.tx_packets++;
390 net_dev->stats.tx_bytes += skb->len;
391 }
392 dev_kfree_skb(skb);
393 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
394 return NETDEV_TX_OK;
395}
396
397
398static
399int i2400m_change_mtu(struct net_device *net_dev, int new_mtu)
400{
401 int result;
402 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
403 struct device *dev = i2400m_dev(i2400m);
404
405 if (new_mtu >= I2400M_MAX_MTU) {
406 dev_err(dev, "Cannot change MTU to %d (max is %d)\n",
407 new_mtu, I2400M_MAX_MTU);
408 result = -EINVAL;
409 } else {
410 net_dev->mtu = new_mtu;
411 result = 0;
412 }
413 return result;
414}
415
416
417static
418void i2400m_tx_timeout(struct net_device *net_dev)
419{
420
421
422
423
424
425
426
427 net_dev->stats.tx_errors++;
428}
429
430
431
432
433
434
435
436
437
438static
439void i2400m_rx_fake_eth_header(struct net_device *net_dev,
440 void *_eth_hdr, __be16 protocol)
441{
442 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
443 struct ethhdr *eth_hdr = _eth_hdr;
444
445 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
446 memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
447 sizeof(eth_hdr->h_source));
448 eth_hdr->h_proto = protocol;
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
489 unsigned i, const void *buf, int buf_len)
490{
491 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
492 struct device *dev = i2400m_dev(i2400m);
493 struct sk_buff *skb;
494
495 d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
496 i2400m, buf, buf_len);
497 if (i) {
498 skb = skb_get(skb_rx);
499 d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
500 skb_pull(skb, buf - (void *) skb->data);
501 skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
502 } else {
503
504
505 skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
506 if (skb == NULL) {
507 dev_err(dev, "NETRX: no memory to realloc skb\n");
508 net_dev->stats.rx_dropped++;
509 goto error_skb_realloc;
510 }
511 memcpy(skb_put(skb, buf_len), buf, buf_len);
512 }
513 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
514 skb->data - ETH_HLEN,
515 cpu_to_be16(ETH_P_IP));
516 skb_set_mac_header(skb, -ETH_HLEN);
517 skb->dev = i2400m->wimax_dev.net_dev;
518 skb->protocol = htons(ETH_P_IP);
519 net_dev->stats.rx_packets++;
520 net_dev->stats.rx_bytes += buf_len;
521 d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
522 buf_len);
523 d_dump(4, dev, buf, buf_len);
524 netif_rx_ni(skb);
525error_skb_realloc:
526 d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
527 i2400m, buf, buf_len);
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
554 enum i2400m_cs cs)
555{
556 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
557 struct device *dev = i2400m_dev(i2400m);
558 int protocol;
559
560 d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
561 i2400m, skb, skb->len, cs);
562 switch(cs) {
563 case I2400M_CS_IPV4_0:
564 case I2400M_CS_IPV4:
565 protocol = ETH_P_IP;
566 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
567 skb->data - ETH_HLEN,
568 cpu_to_be16(ETH_P_IP));
569 skb_set_mac_header(skb, -ETH_HLEN);
570 skb->dev = i2400m->wimax_dev.net_dev;
571 skb->protocol = htons(ETH_P_IP);
572 net_dev->stats.rx_packets++;
573 net_dev->stats.rx_bytes += skb->len;
574 break;
575 default:
576 dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
577 goto error;
578
579 }
580 d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
581 skb->len);
582 d_dump(4, dev, skb->data, skb->len);
583 netif_rx_ni(skb);
584error:
585 d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
586 i2400m, skb, skb->len, cs);
587}
588
589static const struct net_device_ops i2400m_netdev_ops = {
590 .ndo_open = i2400m_open,
591 .ndo_stop = i2400m_stop,
592 .ndo_start_xmit = i2400m_hard_start_xmit,
593 .ndo_tx_timeout = i2400m_tx_timeout,
594 .ndo_change_mtu = i2400m_change_mtu,
595};
596
597static void i2400m_get_drvinfo(struct net_device *net_dev,
598 struct ethtool_drvinfo *info)
599{
600 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
601
602 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
603 strlcpy(info->fw_version, i2400m->fw_name ? : "",
604 sizeof(info->fw_version));
605 if (net_dev->dev.parent)
606 strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
607 sizeof(info->bus_info));
608}
609
610static const struct ethtool_ops i2400m_ethtool_ops = {
611 .get_drvinfo = i2400m_get_drvinfo,
612 .get_link = ethtool_op_get_link,
613};
614
615
616
617
618
619
620void i2400m_netdev_setup(struct net_device *net_dev)
621{
622 d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
623 ether_setup(net_dev);
624 net_dev->mtu = I2400M_MAX_MTU;
625 net_dev->tx_queue_len = I2400M_TX_QLEN;
626 net_dev->features =
627 NETIF_F_VLAN_CHALLENGED
628 | NETIF_F_HIGHDMA;
629 net_dev->flags =
630 IFF_NOARP
631 & (~IFF_BROADCAST
632 & ~IFF_MULTICAST);
633 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
634 net_dev->netdev_ops = &i2400m_netdev_ops;
635 net_dev->ethtool_ops = &i2400m_ethtool_ops;
636 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
637}
638EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
639
640