1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/slab.h>
20#include <linux/init.h>
21#include <linux/list.h>
22#include <linux/wait.h>
23#include <linux/kobject.h>
24#include "mostcore.h"
25#include "networking.h"
26
27#define MEP_HDR_LEN 8
28#define MDP_HDR_LEN 16
29#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
30
31#define PMHL 5
32
33#define PMS_TELID_UNSEGM_MAMAC 0x0A
34#define PMS_FIFONO_MDP 0x01
35#define PMS_FIFONO_MEP 0x04
36#define PMS_MSGTYPE_DATA 0x04
37#define PMS_DEF_PRIO 0
38#define MEP_DEF_RETRY 15
39
40#define PMS_FIFONO_MASK 0x07
41#define PMS_FIFONO_SHIFT 3
42#define PMS_RETRY_SHIFT 4
43#define PMS_TELID_MASK 0x0F
44#define PMS_TELID_SHIFT 4
45
46#define HB(value) ((u8)((u16)(value) >> 8))
47#define LB(value) ((u8)(value))
48
49#define EXTRACT_BIT_SET(bitset_name, value) \
50 (((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
51
52#define PMS_IS_MEP(buf, len) \
53 ((len) > MEP_HDR_LEN && \
54 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
55
56#define PMS_IS_MAMAC(buf, len) \
57 ((len) > MDP_HDR_LEN && \
58 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
59 EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
60
61struct net_dev_channel {
62 bool linked;
63 int ch_id;
64};
65
66struct net_dev_context {
67 struct most_interface *iface;
68 bool channels_opened;
69 bool is_mamac;
70 struct net_device *dev;
71 struct net_dev_channel rx;
72 struct net_dev_channel tx;
73 struct completion mac_compl;
74 struct list_head list;
75};
76
77static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
78static struct spinlock list_lock;
79static struct most_aim aim;
80
81static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
82{
83 u8 *buff = mbo->virt_address;
84 const u8 broadcast[] = { 0x03, 0xFF };
85 const u8 *dest_addr = skb->data + 4;
86 const u8 *eth_type = skb->data + 12;
87 unsigned int payload_len = skb->len - ETH_HLEN;
88 unsigned int mdp_len = payload_len + MDP_HDR_LEN;
89
90 if (mbo->buffer_length < mdp_len) {
91 pr_err("drop: too small buffer! (%d for %d)\n",
92 mbo->buffer_length, mdp_len);
93 return -EINVAL;
94 }
95
96 if (skb->len < ETH_HLEN) {
97 pr_err("drop: too small packet! (%d)\n", skb->len);
98 return -EINVAL;
99 }
100
101 if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
102 dest_addr = broadcast;
103
104 *buff++ = HB(mdp_len - 2);
105 *buff++ = LB(mdp_len - 2);
106
107 *buff++ = PMHL;
108 *buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
109 *buff++ = PMS_DEF_PRIO;
110 *buff++ = dest_addr[0];
111 *buff++ = dest_addr[1];
112 *buff++ = 0x00;
113
114 *buff++ = HB(payload_len + 6);
115 *buff++ = LB(payload_len + 6);
116
117
118
119 *buff++ = eth_type[0];
120 *buff++ = eth_type[1];
121 *buff++ = 0;
122 *buff++ = 0;
123
124 *buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
125 *buff++ = LB(payload_len);
126
127 memcpy(buff, skb->data + ETH_HLEN, payload_len);
128 mbo->buffer_length = mdp_len;
129 return 0;
130}
131
132static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
133{
134 u8 *buff = mbo->virt_address;
135 unsigned int mep_len = skb->len + MEP_HDR_LEN;
136
137 if (mbo->buffer_length < mep_len) {
138 pr_err("drop: too small buffer! (%d for %d)\n",
139 mbo->buffer_length, mep_len);
140 return -EINVAL;
141 }
142
143 *buff++ = HB(mep_len - 2);
144 *buff++ = LB(mep_len - 2);
145
146 *buff++ = PMHL;
147 *buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
148 *buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
149 *buff++ = 0;
150 *buff++ = 0;
151 *buff++ = 0;
152
153 memcpy(buff, skb->data, skb->len);
154 mbo->buffer_length = mep_len;
155 return 0;
156}
157
158static int most_nd_set_mac_address(struct net_device *dev, void *p)
159{
160 struct net_dev_context *nd = dev->ml_priv;
161 int err = eth_mac_addr(dev, p);
162
163 if (err)
164 return err;
165
166 BUG_ON(nd->dev != dev);
167
168 nd->is_mamac =
169 (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
170 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
171
172
173
174
175
176 dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
177
178 return 0;
179}
180
181static int most_nd_open(struct net_device *dev)
182{
183 struct net_dev_context *nd = dev->ml_priv;
184 long ret;
185
186 netdev_info(dev, "open net device\n");
187
188 BUG_ON(nd->dev != dev);
189
190 if (nd->channels_opened)
191 return -EFAULT;
192
193 BUG_ON(!nd->tx.linked || !nd->rx.linked);
194
195 if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
196 netdev_err(dev, "most_start_channel() failed\n");
197 return -EBUSY;
198 }
199
200 if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
201 netdev_err(dev, "most_start_channel() failed\n");
202 most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
203 return -EBUSY;
204 }
205
206 if (!is_valid_ether_addr(dev->dev_addr)) {
207 nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
208 ret = wait_for_completion_interruptible_timeout(
209 &nd->mac_compl, msecs_to_jiffies(5000));
210 if (!ret) {
211 netdev_err(dev, "mac timeout\n");
212 ret = -EBUSY;
213 goto err;
214 }
215
216 if (ret < 0) {
217 netdev_warn(dev, "mac waiting interrupted\n");
218 goto err;
219 }
220 }
221
222 nd->channels_opened = true;
223 netif_wake_queue(dev);
224 return 0;
225
226err:
227 most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
228 most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
229 return ret;
230}
231
232static int most_nd_stop(struct net_device *dev)
233{
234 struct net_dev_context *nd = dev->ml_priv;
235
236 netdev_info(dev, "stop net device\n");
237
238 BUG_ON(nd->dev != dev);
239 netif_stop_queue(dev);
240
241 if (nd->channels_opened) {
242 most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
243 most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
244 nd->channels_opened = false;
245 }
246
247 return 0;
248}
249
250static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
251 struct net_device *dev)
252{
253 struct net_dev_context *nd = dev->ml_priv;
254 struct mbo *mbo;
255 int ret;
256
257 BUG_ON(nd->dev != dev);
258
259 mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
260
261 if (!mbo) {
262 netif_stop_queue(dev);
263 dev->stats.tx_fifo_errors++;
264 return NETDEV_TX_BUSY;
265 }
266
267 if (nd->is_mamac)
268 ret = skb_to_mamac(skb, mbo);
269 else
270 ret = skb_to_mep(skb, mbo);
271
272 if (ret) {
273 most_put_mbo(mbo);
274 dev->stats.tx_dropped++;
275 kfree_skb(skb);
276 return NETDEV_TX_OK;
277 }
278
279 most_submit_mbo(mbo);
280 dev->stats.tx_packets++;
281 dev->stats.tx_bytes += skb->len;
282 kfree_skb(skb);
283 return NETDEV_TX_OK;
284}
285
286static const struct net_device_ops most_nd_ops = {
287 .ndo_open = most_nd_open,
288 .ndo_stop = most_nd_stop,
289 .ndo_start_xmit = most_nd_start_xmit,
290 .ndo_set_mac_address = most_nd_set_mac_address,
291};
292
293static void most_nd_setup(struct net_device *dev)
294{
295 ether_setup(dev);
296 dev->netdev_ops = &most_nd_ops;
297}
298
299static void most_net_rm_netdev_safe(struct net_dev_context *nd)
300{
301 if (!nd->dev)
302 return;
303
304 pr_info("remove net device %p\n", nd->dev);
305
306 unregister_netdev(nd->dev);
307 free_netdev(nd->dev);
308 nd->dev = NULL;
309}
310
311static struct net_dev_context *get_net_dev_context(
312 struct most_interface *iface)
313{
314 struct net_dev_context *nd, *tmp;
315 unsigned long flags;
316
317 spin_lock_irqsave(&list_lock, flags);
318 list_for_each_entry_safe(nd, tmp, &net_devices, list) {
319 if (nd->iface == iface) {
320 spin_unlock_irqrestore(&list_lock, flags);
321 return nd;
322 }
323 }
324 spin_unlock_irqrestore(&list_lock, flags);
325 return NULL;
326}
327
328static int aim_probe_channel(struct most_interface *iface, int channel_idx,
329 struct most_channel_config *ccfg,
330 struct kobject *parent, char *name)
331{
332 struct net_dev_context *nd;
333 struct net_dev_channel *ch;
334 unsigned long flags;
335
336 if (!iface)
337 return -EINVAL;
338
339 if (ccfg->data_type != MOST_CH_ASYNC)
340 return -EINVAL;
341
342 nd = get_net_dev_context(iface);
343
344 if (!nd) {
345 nd = kzalloc(sizeof(*nd), GFP_KERNEL);
346 if (!nd)
347 return -ENOMEM;
348
349 init_completion(&nd->mac_compl);
350 nd->iface = iface;
351
352 spin_lock_irqsave(&list_lock, flags);
353 list_add(&nd->list, &net_devices);
354 spin_unlock_irqrestore(&list_lock, flags);
355 }
356
357 ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
358 if (ch->linked) {
359 pr_err("only one channel per instance & direction allowed\n");
360 return -EINVAL;
361 }
362
363 if (nd->tx.linked || nd->rx.linked) {
364 struct net_device *dev =
365 alloc_netdev(0, "meth%d", NET_NAME_UNKNOWN,
366 most_nd_setup);
367
368 if (!dev) {
369 pr_err("no memory for net_device\n");
370 return -ENOMEM;
371 }
372
373 nd->dev = dev;
374 ch->ch_id = channel_idx;
375 ch->linked = true;
376
377 dev->ml_priv = nd;
378 if (register_netdev(dev)) {
379 pr_err("registering net device failed\n");
380 ch->linked = false;
381 free_netdev(dev);
382 return -EINVAL;
383 }
384 }
385
386 ch->ch_id = channel_idx;
387 ch->linked = true;
388
389 return 0;
390}
391
392static int aim_disconnect_channel(struct most_interface *iface,
393 int channel_idx)
394{
395 struct net_dev_context *nd;
396 struct net_dev_channel *ch;
397 unsigned long flags;
398
399 nd = get_net_dev_context(iface);
400 if (!nd)
401 return -EINVAL;
402
403 if (nd->rx.linked && channel_idx == nd->rx.ch_id)
404 ch = &nd->rx;
405 else if (nd->tx.linked && channel_idx == nd->tx.ch_id)
406 ch = &nd->tx;
407 else
408 return -EINVAL;
409
410 ch->linked = false;
411
412
413
414
415
416 most_net_rm_netdev_safe(nd);
417
418 if (!nd->rx.linked && !nd->tx.linked) {
419 spin_lock_irqsave(&list_lock, flags);
420 list_del(&nd->list);
421 spin_unlock_irqrestore(&list_lock, flags);
422 kfree(nd);
423 }
424
425 return 0;
426}
427
428static int aim_resume_tx_channel(struct most_interface *iface,
429 int channel_idx)
430{
431 struct net_dev_context *nd;
432
433 nd = get_net_dev_context(iface);
434 if (!nd || !nd->channels_opened || nd->tx.ch_id != channel_idx)
435 return 0;
436
437 if (!nd->dev)
438 return 0;
439
440 netif_wake_queue(nd->dev);
441 return 0;
442}
443
444static int aim_rx_data(struct mbo *mbo)
445{
446 const u32 zero = 0;
447 struct net_dev_context *nd;
448 char *buf = mbo->virt_address;
449 u32 len = mbo->processed_length;
450 struct sk_buff *skb;
451 struct net_device *dev;
452 unsigned int skb_len;
453
454 nd = get_net_dev_context(mbo->ifp);
455 if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
456 return -EIO;
457
458 dev = nd->dev;
459 if (!dev) {
460 pr_err_once("drop packet: missing net_device\n");
461 return -EIO;
462 }
463
464 if (nd->is_mamac) {
465 if (!PMS_IS_MAMAC(buf, len))
466 return -EIO;
467
468 skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
469 } else {
470 if (!PMS_IS_MEP(buf, len))
471 return -EIO;
472
473 skb = dev_alloc_skb(len - MEP_HDR_LEN);
474 }
475
476 if (!skb) {
477 dev->stats.rx_dropped++;
478 pr_err_once("drop packet: no memory for skb\n");
479 goto out;
480 }
481
482 skb->dev = dev;
483
484 if (nd->is_mamac) {
485
486 ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
487
488
489 memcpy(skb_put(skb, 4), &zero, 4);
490 memcpy(skb_put(skb, 2), buf + 5, 2);
491
492
493 memcpy(skb_put(skb, 2), buf + 10, 2);
494
495 buf += MDP_HDR_LEN;
496 len -= MDP_HDR_LEN;
497 } else {
498 buf += MEP_HDR_LEN;
499 len -= MEP_HDR_LEN;
500 }
501
502 memcpy(skb_put(skb, len), buf, len);
503 skb->protocol = eth_type_trans(skb, dev);
504 skb_len = skb->len;
505 if (netif_rx(skb) == NET_RX_SUCCESS) {
506 dev->stats.rx_packets++;
507 dev->stats.rx_bytes += skb_len;
508 } else {
509 dev->stats.rx_dropped++;
510 }
511
512out:
513 most_put_mbo(mbo);
514 return 0;
515}
516
517static struct most_aim aim = {
518 .name = "networking",
519 .probe_channel = aim_probe_channel,
520 .disconnect_channel = aim_disconnect_channel,
521 .tx_completion = aim_resume_tx_channel,
522 .rx_completion = aim_rx_data,
523};
524
525static int __init most_net_init(void)
526{
527 pr_info("most_net_init()\n");
528 spin_lock_init(&list_lock);
529 return most_register_aim(&aim);
530}
531
532static void __exit most_net_exit(void)
533{
534 struct net_dev_context *nd, *tmp;
535 unsigned long flags;
536
537 spin_lock_irqsave(&list_lock, flags);
538 list_for_each_entry_safe(nd, tmp, &net_devices, list) {
539 list_del(&nd->list);
540 spin_unlock_irqrestore(&list_lock, flags);
541
542
543
544
545 most_net_rm_netdev_safe(nd);
546 kfree(nd);
547 spin_lock_irqsave(&list_lock, flags);
548 }
549 spin_unlock_irqrestore(&list_lock, flags);
550
551 most_deregister_aim(&aim);
552 pr_info("most_net_exit()\n");
553}
554
555
556
557
558
559
560
561void most_deliver_netinfo(struct most_interface *iface,
562 unsigned char link_stat, unsigned char *mac_addr)
563{
564 struct net_dev_context *nd;
565 struct net_device *dev;
566 const u8 *m = mac_addr;
567
568 nd = get_net_dev_context(iface);
569 if (!nd)
570 return;
571
572 dev = nd->dev;
573 if (!dev)
574 return;
575
576 if (m && is_valid_ether_addr(m)) {
577 if (!is_valid_ether_addr(dev->dev_addr)) {
578 netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
579 m[0], m[1], m[2], m[3], m[4], m[5]);
580 ether_addr_copy(dev->dev_addr, m);
581 complete(&nd->mac_compl);
582 } else if (!ether_addr_equal(dev->dev_addr, m)) {
583 netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
584 m[0], m[1], m[2], m[3], m[4], m[5]);
585 }
586 }
587}
588EXPORT_SYMBOL(most_deliver_netinfo);
589
590module_init(most_net_init);
591module_exit(most_net_exit);
592MODULE_LICENSE("GPL");
593MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
594MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
595