1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/kernel.h>
39#include <linux/string.h>
40#include <linux/mm.h>
41#include <linux/socket.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/ip.h>
45#include <linux/netdevice.h>
46#include <linux/nvmem-consumer.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/errno.h>
50#include <linux/init.h>
51#include <linux/if_ether.h>
52#include <linux/of_net.h>
53#include <linux/pci.h>
54#include <linux/property.h>
55#include <net/dst.h>
56#include <net/arp.h>
57#include <net/sock.h>
58#include <net/ipv6.h>
59#include <net/ip.h>
60#include <net/dsa.h>
61#include <net/flow_dissector.h>
62#include <net/gro.h>
63#include <linux/uaccess.h>
64#include <net/pkt_sched.h>
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79int eth_header(struct sk_buff *skb, struct net_device *dev,
80 unsigned short type,
81 const void *daddr, const void *saddr, unsigned int len)
82{
83 struct ethhdr *eth = skb_push(skb, ETH_HLEN);
84
85 if (type != ETH_P_802_3 && type != ETH_P_802_2)
86 eth->h_proto = htons(type);
87 else
88 eth->h_proto = htons(len);
89
90
91
92
93
94 if (!saddr)
95 saddr = dev->dev_addr;
96 memcpy(eth->h_source, saddr, ETH_ALEN);
97
98 if (daddr) {
99 memcpy(eth->h_dest, daddr, ETH_ALEN);
100 return ETH_HLEN;
101 }
102
103
104
105
106
107 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
108 eth_zero_addr(eth->h_dest);
109 return ETH_HLEN;
110 }
111
112 return -ETH_HLEN;
113}
114EXPORT_SYMBOL(eth_header);
115
116
117
118
119
120
121
122
123
124
125u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len)
126{
127 const unsigned int flags = FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
128 const struct ethhdr *eth = (const struct ethhdr *)data;
129 struct flow_keys_basic keys;
130
131
132 if (unlikely(len < sizeof(*eth)))
133 return len;
134
135
136 if (!skb_flow_dissect_flow_keys_basic(dev_net(dev), NULL, &keys, data,
137 eth->h_proto, sizeof(*eth),
138 len, flags))
139 return max_t(u32, keys.control.thoff, sizeof(*eth));
140
141
142 return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
143}
144EXPORT_SYMBOL(eth_get_headlen);
145
146
147
148
149
150
151
152
153
154
155__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
156{
157 unsigned short _service_access_point;
158 const unsigned short *sap;
159 const struct ethhdr *eth;
160
161 skb->dev = dev;
162 skb_reset_mac_header(skb);
163
164 eth = (struct ethhdr *)skb->data;
165 skb_pull_inline(skb, ETH_HLEN);
166
167 if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
168 dev->dev_addr))) {
169 if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
170 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
171 skb->pkt_type = PACKET_BROADCAST;
172 else
173 skb->pkt_type = PACKET_MULTICAST;
174 } else {
175 skb->pkt_type = PACKET_OTHERHOST;
176 }
177 }
178
179
180
181
182
183
184
185 if (unlikely(netdev_uses_dsa(dev)))
186 return htons(ETH_P_XDSA);
187
188 if (likely(eth_proto_is_802_3(eth->h_proto)))
189 return eth->h_proto;
190
191
192
193
194
195
196
197 sap = skb_header_pointer(skb, 0, sizeof(*sap), &_service_access_point);
198 if (sap && *sap == 0xFFFF)
199 return htons(ETH_P_802_3);
200
201
202
203
204 return htons(ETH_P_802_2);
205}
206EXPORT_SYMBOL(eth_type_trans);
207
208
209
210
211
212
213int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
214{
215 const struct ethhdr *eth = eth_hdr(skb);
216 memcpy(haddr, eth->h_source, ETH_ALEN);
217 return ETH_ALEN;
218}
219EXPORT_SYMBOL(eth_header_parse);
220
221
222
223
224
225
226
227
228
229int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
230{
231 struct ethhdr *eth;
232 const struct net_device *dev = neigh->dev;
233
234 eth = (struct ethhdr *)
235 (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
236
237 if (type == htons(ETH_P_802_3))
238 return -1;
239
240 eth->h_proto = type;
241 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
242 memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
243
244
245
246
247 smp_store_release(&hh->hh_len, ETH_HLEN);
248
249 return 0;
250}
251EXPORT_SYMBOL(eth_header_cache);
252
253
254
255
256
257
258
259
260
261void eth_header_cache_update(struct hh_cache *hh,
262 const struct net_device *dev,
263 const unsigned char *haddr)
264{
265 memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
266 haddr, ETH_ALEN);
267}
268EXPORT_SYMBOL(eth_header_cache_update);
269
270
271
272
273
274__be16 eth_header_parse_protocol(const struct sk_buff *skb)
275{
276 const struct ethhdr *eth = eth_hdr(skb);
277
278 return eth->h_proto;
279}
280EXPORT_SYMBOL(eth_header_parse_protocol);
281
282
283
284
285
286
287int eth_prepare_mac_addr_change(struct net_device *dev, void *p)
288{
289 struct sockaddr *addr = p;
290
291 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
292 return -EBUSY;
293 if (!is_valid_ether_addr(addr->sa_data))
294 return -EADDRNOTAVAIL;
295 return 0;
296}
297EXPORT_SYMBOL(eth_prepare_mac_addr_change);
298
299
300
301
302
303
304void eth_commit_mac_addr_change(struct net_device *dev, void *p)
305{
306 struct sockaddr *addr = p;
307
308 eth_hw_addr_set(dev, addr->sa_data);
309}
310EXPORT_SYMBOL(eth_commit_mac_addr_change);
311
312
313
314
315
316
317
318
319
320
321
322int eth_mac_addr(struct net_device *dev, void *p)
323{
324 int ret;
325
326 ret = eth_prepare_mac_addr_change(dev, p);
327 if (ret < 0)
328 return ret;
329 eth_commit_mac_addr_change(dev, p);
330 return 0;
331}
332EXPORT_SYMBOL(eth_mac_addr);
333
334int eth_validate_addr(struct net_device *dev)
335{
336 if (!is_valid_ether_addr(dev->dev_addr))
337 return -EADDRNOTAVAIL;
338
339 return 0;
340}
341EXPORT_SYMBOL(eth_validate_addr);
342
343const struct header_ops eth_header_ops ____cacheline_aligned = {
344 .create = eth_header,
345 .parse = eth_header_parse,
346 .cache = eth_header_cache,
347 .cache_update = eth_header_cache_update,
348 .parse_protocol = eth_header_parse_protocol,
349};
350
351
352
353
354
355
356
357void ether_setup(struct net_device *dev)
358{
359 dev->header_ops = ð_header_ops;
360 dev->type = ARPHRD_ETHER;
361 dev->hard_header_len = ETH_HLEN;
362 dev->min_header_len = ETH_HLEN;
363 dev->mtu = ETH_DATA_LEN;
364 dev->min_mtu = ETH_MIN_MTU;
365 dev->max_mtu = ETH_DATA_LEN;
366 dev->addr_len = ETH_ALEN;
367 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
368 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
369 dev->priv_flags |= IFF_TX_SKB_SHARING;
370
371 eth_broadcast_addr(dev->broadcast);
372
373}
374EXPORT_SYMBOL(ether_setup);
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
392 unsigned int rxqs)
393{
394 return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_UNKNOWN,
395 ether_setup, txqs, rxqs);
396}
397EXPORT_SYMBOL(alloc_etherdev_mqs);
398
399ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
400{
401 return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
402}
403EXPORT_SYMBOL(sysfs_format_mac);
404
405struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
406{
407 const struct packet_offload *ptype;
408 unsigned int hlen, off_eth;
409 struct sk_buff *pp = NULL;
410 struct ethhdr *eh, *eh2;
411 struct sk_buff *p;
412 __be16 type;
413 int flush = 1;
414
415 off_eth = skb_gro_offset(skb);
416 hlen = off_eth + sizeof(*eh);
417 eh = skb_gro_header_fast(skb, off_eth);
418 if (skb_gro_header_hard(skb, hlen)) {
419 eh = skb_gro_header_slow(skb, hlen, off_eth);
420 if (unlikely(!eh))
421 goto out;
422 }
423
424 flush = 0;
425
426 list_for_each_entry(p, head, list) {
427 if (!NAPI_GRO_CB(p)->same_flow)
428 continue;
429
430 eh2 = (struct ethhdr *)(p->data + off_eth);
431 if (compare_ether_header(eh, eh2)) {
432 NAPI_GRO_CB(p)->same_flow = 0;
433 continue;
434 }
435 }
436
437 type = eh->h_proto;
438
439 rcu_read_lock();
440 ptype = gro_find_receive_by_type(type);
441 if (ptype == NULL) {
442 flush = 1;
443 goto out_unlock;
444 }
445
446 skb_gro_pull(skb, sizeof(*eh));
447 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
448
449 pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
450 ipv6_gro_receive, inet_gro_receive,
451 head, skb);
452
453out_unlock:
454 rcu_read_unlock();
455out:
456 skb_gro_flush_final(skb, pp, flush);
457
458 return pp;
459}
460EXPORT_SYMBOL(eth_gro_receive);
461
462int eth_gro_complete(struct sk_buff *skb, int nhoff)
463{
464 struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
465 __be16 type = eh->h_proto;
466 struct packet_offload *ptype;
467 int err = -ENOSYS;
468
469 if (skb->encapsulation)
470 skb_set_inner_mac_header(skb, nhoff);
471
472 rcu_read_lock();
473 ptype = gro_find_complete_by_type(type);
474 if (ptype != NULL)
475 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
476 ipv6_gro_complete, inet_gro_complete,
477 skb, nhoff + sizeof(*eh));
478
479 rcu_read_unlock();
480 return err;
481}
482EXPORT_SYMBOL(eth_gro_complete);
483
484static struct packet_offload eth_packet_offload __read_mostly = {
485 .type = cpu_to_be16(ETH_P_TEB),
486 .priority = 10,
487 .callbacks = {
488 .gro_receive = eth_gro_receive,
489 .gro_complete = eth_gro_complete,
490 },
491};
492
493static int __init eth_offload_init(void)
494{
495 dev_add_offload(ð_packet_offload);
496
497 return 0;
498}
499
500fs_initcall(eth_offload_init);
501
502unsigned char * __weak arch_get_platform_mac_address(void)
503{
504 return NULL;
505}
506
507int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
508{
509 unsigned char *addr;
510 int ret;
511
512 ret = of_get_mac_address(dev->of_node, mac_addr);
513 if (!ret)
514 return 0;
515
516 addr = arch_get_platform_mac_address();
517 if (!addr)
518 return -ENODEV;
519
520 ether_addr_copy(mac_addr, addr);
521
522 return 0;
523}
524EXPORT_SYMBOL(eth_platform_get_mac_address);
525
526
527
528
529
530
531
532
533
534int platform_get_ethdev_address(struct device *dev, struct net_device *netdev)
535{
536 u8 addr[ETH_ALEN] __aligned(2);
537 int ret;
538
539 ret = eth_platform_get_mac_address(dev, addr);
540 if (!ret)
541 eth_hw_addr_set(netdev, addr);
542 return ret;
543}
544EXPORT_SYMBOL(platform_get_ethdev_address);
545
546
547
548
549
550
551
552
553
554
555int nvmem_get_mac_address(struct device *dev, void *addrbuf)
556{
557 struct nvmem_cell *cell;
558 const void *mac;
559 size_t len;
560
561 cell = nvmem_cell_get(dev, "mac-address");
562 if (IS_ERR(cell))
563 return PTR_ERR(cell);
564
565 mac = nvmem_cell_read(cell, &len);
566 nvmem_cell_put(cell);
567
568 if (IS_ERR(mac))
569 return PTR_ERR(mac);
570
571 if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
572 kfree(mac);
573 return -EINVAL;
574 }
575
576 ether_addr_copy(addrbuf, mac);
577 kfree(mac);
578
579 return 0;
580}
581
582static int fwnode_get_mac_addr(struct fwnode_handle *fwnode,
583 const char *name, char *addr)
584{
585 int ret;
586
587 ret = fwnode_property_read_u8_array(fwnode, name, addr, ETH_ALEN);
588 if (ret)
589 return ret;
590
591 if (!is_valid_ether_addr(addr))
592 return -EINVAL;
593 return 0;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr)
619{
620 if (!fwnode_get_mac_addr(fwnode, "mac-address", addr) ||
621 !fwnode_get_mac_addr(fwnode, "local-mac-address", addr) ||
622 !fwnode_get_mac_addr(fwnode, "address", addr))
623 return 0;
624
625 return -ENOENT;
626}
627EXPORT_SYMBOL(fwnode_get_mac_address);
628
629
630
631
632
633
634int device_get_mac_address(struct device *dev, char *addr)
635{
636 return fwnode_get_mac_address(dev_fwnode(dev), addr);
637}
638EXPORT_SYMBOL(device_get_mac_address);
639
640
641
642
643
644
645
646
647
648int device_get_ethdev_address(struct device *dev, struct net_device *netdev)
649{
650 u8 addr[ETH_ALEN];
651 int ret;
652
653 ret = device_get_mac_address(dev, addr);
654 if (!ret)
655 eth_hw_addr_set(netdev, addr);
656 return ret;
657}
658EXPORT_SYMBOL(device_get_ethdev_address);
659