1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60#include <linux/if_arp.h>
61#include <linux/slab.h>
62#include <linux/netdevice.h>
63#include <linux/ethtool.h>
64#include <linux/export.h>
65#include "i2400m.h"
66
67
68#define D_SUBMODULE netdev
69#include "debug-levels.h"
70
71enum {
72
73
74
75
76 I2400M_TX_TIMEOUT = 21 * HZ,
77
78
79
80
81 I2400M_TX_QLEN = 20,
82};
83
84
85static
86int i2400m_open(struct net_device *net_dev)
87{
88 int result;
89 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
90 struct device *dev = i2400m_dev(i2400m);
91
92 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
93
94 mutex_lock(&i2400m->init_mutex);
95 if (i2400m->updown)
96 result = 0;
97 else
98 result = -EBUSY;
99 mutex_unlock(&i2400m->init_mutex);
100 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
101 net_dev, i2400m, result);
102 return result;
103}
104
105
106static
107int i2400m_stop(struct net_device *net_dev)
108{
109 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
110 struct device *dev = i2400m_dev(i2400m);
111
112 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
113 i2400m_net_wake_stop(i2400m);
114 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
115 return 0;
116}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138void i2400m_wake_tx_work(struct work_struct *ws)
139{
140 int result;
141 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
142 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
143 struct device *dev = i2400m_dev(i2400m);
144 struct sk_buff *skb;
145 unsigned long flags;
146
147 spin_lock_irqsave(&i2400m->tx_lock, flags);
148 skb = i2400m->wake_tx_skb;
149 i2400m->wake_tx_skb = NULL;
150 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
151
152 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
153 result = -EINVAL;
154 if (skb == NULL) {
155 dev_err(dev, "WAKE&TX: skb disappeared!\n");
156 goto out_put;
157 }
158
159
160
161 if (unlikely(!netif_carrier_ok(net_dev)))
162 goto out_kfree;
163 result = i2400m_cmd_exit_idle(i2400m);
164 if (result == -EILSEQ)
165 result = 0;
166 if (result < 0) {
167 dev_err(dev, "WAKE&TX: device didn't get out of idle: "
168 "%d - resetting\n", result);
169 i2400m_reset(i2400m, I2400M_RT_BUS);
170 goto error;
171 }
172 result = wait_event_timeout(i2400m->state_wq,
173 i2400m->state != I2400M_SS_IDLE,
174 net_dev->watchdog_timeo - HZ/2);
175 if (result == 0)
176 result = -ETIMEDOUT;
177 if (result < 0) {
178 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
179 "%d - resetting\n", result);
180 i2400m_reset(i2400m, I2400M_RT_BUS);
181 goto error;
182 }
183 msleep(20);
184 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
185error:
186 netif_wake_queue(net_dev);
187out_kfree:
188 kfree_skb(skb);
189out_put:
190 i2400m_put(i2400m);
191 d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
192 ws, i2400m, skb, result);
193}
194
195
196
197
198
199
200
201
202
203
204static
205void i2400m_tx_prep_header(struct sk_buff *skb)
206{
207 struct i2400m_pl_data_hdr *pl_hdr;
208 skb_pull(skb, ETH_HLEN);
209 pl_hdr = skb_push(skb, sizeof(*pl_hdr));
210 pl_hdr->reserved = 0;
211}
212
213
214
215
216
217
218
219
220
221void i2400m_net_wake_stop(struct i2400m *i2400m)
222{
223 struct device *dev = i2400m_dev(i2400m);
224 struct sk_buff *wake_tx_skb;
225 unsigned long flags;
226
227 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
228
229
230
231
232 cancel_work_sync(&i2400m->wake_tx_ws);
233
234 spin_lock_irqsave(&i2400m->tx_lock, flags);
235 wake_tx_skb = i2400m->wake_tx_skb;
236 i2400m->wake_tx_skb = NULL;
237 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
238
239 if (wake_tx_skb) {
240 i2400m_put(i2400m);
241 kfree_skb(wake_tx_skb);
242 }
243
244 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
245}
246
247
248
249
250
251
252
253
254
255
256
257
258
259static
260int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
261 struct sk_buff *skb)
262{
263 int result;
264 struct device *dev = i2400m_dev(i2400m);
265 unsigned long flags;
266
267 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
268 if (net_ratelimit()) {
269 d_printf(3, dev, "WAKE&NETTX: "
270 "skb %p sending %d bytes to radio\n",
271 skb, skb->len);
272 d_dump(4, dev, skb->data, skb->len);
273 }
274
275
276
277 result = 0;
278 spin_lock_irqsave(&i2400m->tx_lock, flags);
279 if (!i2400m->wake_tx_skb) {
280 netif_stop_queue(net_dev);
281 i2400m_get(i2400m);
282 i2400m->wake_tx_skb = skb_get(skb);
283 i2400m_tx_prep_header(skb);
284 result = schedule_work(&i2400m->wake_tx_ws);
285 WARN_ON(result == 0);
286 }
287 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
288 if (result == 0) {
289
290
291
292
293 if (net_ratelimit())
294 d_printf(1, dev, "NETTX: device exiting idle, "
295 "dropping skb %p, queue running %d\n",
296 skb, netif_queue_stopped(net_dev));
297 result = -EBUSY;
298 }
299 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
300 return result;
301}
302
303
304
305
306
307
308
309
310
311
312static
313int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
314 struct sk_buff *skb)
315{
316 int result;
317 struct device *dev = i2400m_dev(i2400m);
318
319 d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
320 i2400m, net_dev, skb);
321
322 netif_trans_update(net_dev);
323 i2400m_tx_prep_header(skb);
324 d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
325 skb, skb->len);
326 d_dump(4, dev, skb->data, skb->len);
327 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
328 d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
329 i2400m, net_dev, skb, result);
330 return result;
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352static
353netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
354 struct net_device *net_dev)
355{
356 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
357 struct device *dev = i2400m_dev(i2400m);
358 int result = -1;
359
360 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
361
362 if (skb_cow_head(skb, 0))
363 goto drop;
364
365 if (i2400m->state == I2400M_SS_IDLE)
366 result = i2400m_net_wake_tx(i2400m, net_dev, skb);
367 else
368 result = i2400m_net_tx(i2400m, net_dev, skb);
369 if (result < 0) {
370drop:
371 net_dev->stats.tx_dropped++;
372 } else {
373 net_dev->stats.tx_packets++;
374 net_dev->stats.tx_bytes += skb->len;
375 }
376 dev_kfree_skb(skb);
377 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
378 return NETDEV_TX_OK;
379}
380
381
382static
383void i2400m_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
384{
385
386
387
388
389
390
391
392 net_dev->stats.tx_errors++;
393}
394
395
396
397
398
399
400
401
402
403static
404void i2400m_rx_fake_eth_header(struct net_device *net_dev,
405 void *_eth_hdr, __be16 protocol)
406{
407 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
408 struct ethhdr *eth_hdr = _eth_hdr;
409
410 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
411 memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
412 sizeof(eth_hdr->h_source));
413 eth_hdr->h_proto = protocol;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
454 unsigned i, const void *buf, int buf_len)
455{
456 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
457 struct device *dev = i2400m_dev(i2400m);
458 struct sk_buff *skb;
459
460 d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
461 i2400m, buf, buf_len);
462 if (i) {
463 skb = skb_get(skb_rx);
464 d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
465 skb_pull(skb, buf - (void *) skb->data);
466 skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
467 } else {
468
469
470 skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
471 if (skb == NULL) {
472 dev_err(dev, "NETRX: no memory to realloc skb\n");
473 net_dev->stats.rx_dropped++;
474 goto error_skb_realloc;
475 }
476 skb_put_data(skb, buf, buf_len);
477 }
478 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
479 skb->data - ETH_HLEN,
480 cpu_to_be16(ETH_P_IP));
481 skb_set_mac_header(skb, -ETH_HLEN);
482 skb->dev = i2400m->wimax_dev.net_dev;
483 skb->protocol = htons(ETH_P_IP);
484 net_dev->stats.rx_packets++;
485 net_dev->stats.rx_bytes += buf_len;
486 d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
487 buf_len);
488 d_dump(4, dev, buf, buf_len);
489 netif_rx_ni(skb);
490error_skb_realloc:
491 d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
492 i2400m, buf, buf_len);
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
519 enum i2400m_cs cs)
520{
521 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
522 struct device *dev = i2400m_dev(i2400m);
523
524 d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
525 i2400m, skb, skb->len, cs);
526 switch(cs) {
527 case I2400M_CS_IPV4_0:
528 case I2400M_CS_IPV4:
529 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
530 skb->data - ETH_HLEN,
531 cpu_to_be16(ETH_P_IP));
532 skb_set_mac_header(skb, -ETH_HLEN);
533 skb->dev = i2400m->wimax_dev.net_dev;
534 skb->protocol = htons(ETH_P_IP);
535 net_dev->stats.rx_packets++;
536 net_dev->stats.rx_bytes += skb->len;
537 break;
538 default:
539 dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
540 goto error;
541
542 }
543 d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
544 skb->len);
545 d_dump(4, dev, skb->data, skb->len);
546 netif_rx_ni(skb);
547error:
548 d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
549 i2400m, skb, skb->len, cs);
550}
551
552static const struct net_device_ops i2400m_netdev_ops = {
553 .ndo_open = i2400m_open,
554 .ndo_stop = i2400m_stop,
555 .ndo_start_xmit = i2400m_hard_start_xmit,
556 .ndo_tx_timeout = i2400m_tx_timeout,
557};
558
559static void i2400m_get_drvinfo(struct net_device *net_dev,
560 struct ethtool_drvinfo *info)
561{
562 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
563
564 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
565 strlcpy(info->fw_version, i2400m->fw_name ? : "",
566 sizeof(info->fw_version));
567 if (net_dev->dev.parent)
568 strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
569 sizeof(info->bus_info));
570}
571
572static const struct ethtool_ops i2400m_ethtool_ops = {
573 .get_drvinfo = i2400m_get_drvinfo,
574 .get_link = ethtool_op_get_link,
575};
576
577
578
579
580
581
582void i2400m_netdev_setup(struct net_device *net_dev)
583{
584 d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
585 ether_setup(net_dev);
586 net_dev->mtu = I2400M_MAX_MTU;
587 net_dev->min_mtu = 0;
588 net_dev->max_mtu = I2400M_MAX_MTU;
589 net_dev->tx_queue_len = I2400M_TX_QLEN;
590 net_dev->features =
591 NETIF_F_VLAN_CHALLENGED
592 | NETIF_F_HIGHDMA;
593 net_dev->flags =
594 IFF_NOARP
595 & (~IFF_BROADCAST
596 & ~IFF_MULTICAST);
597 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
598 net_dev->netdev_ops = &i2400m_netdev_ops;
599 net_dev->ethtool_ops = &i2400m_ethtool_ops;
600 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
601}
602EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
603
604