1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/module.h>
41#include <linux/types.h>
42#include <linux/kernel.h>
43#include <linux/string.h>
44#include <linux/mm.h>
45#include <linux/socket.h>
46#include <linux/in.h>
47#include <linux/inet.h>
48#include <linux/ip.h>
49#include <linux/netdevice.h>
50#include <linux/etherdevice.h>
51#include <linux/skbuff.h>
52#include <linux/errno.h>
53#include <linux/init.h>
54#include <linux/if_ether.h>
55#include <linux/of_net.h>
56#include <linux/pci.h>
57#include <net/dst.h>
58#include <net/arp.h>
59#include <net/sock.h>
60#include <net/ipv6.h>
61#include <net/ip.h>
62#include <net/dsa.h>
63#include <net/flow_dissector.h>
64#include <linux/uaccess.h>
65
66__setup("ether=", netdev_boot_setup);
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81int eth_header(struct sk_buff *skb, struct net_device *dev,
82 unsigned short type,
83 const void *daddr, const void *saddr, unsigned int len)
84{
85 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
86
87 if (type != ETH_P_802_3 && type != ETH_P_802_2)
88 eth->h_proto = htons(type);
89 else
90 eth->h_proto = htons(len);
91
92
93
94
95
96 if (!saddr)
97 saddr = dev->dev_addr;
98 memcpy(eth->h_source, saddr, ETH_ALEN);
99
100 if (daddr) {
101 memcpy(eth->h_dest, daddr, ETH_ALEN);
102 return ETH_HLEN;
103 }
104
105
106
107
108
109 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
110 eth_zero_addr(eth->h_dest);
111 return ETH_HLEN;
112 }
113
114 return -ETH_HLEN;
115}
116EXPORT_SYMBOL(eth_header);
117
118
119
120
121
122
123
124
125
126u32 eth_get_headlen(void *data, unsigned int len)
127{
128 const unsigned int flags = FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
129 const struct ethhdr *eth = (const struct ethhdr *)data;
130 struct flow_keys keys;
131
132
133 if (unlikely(len < sizeof(*eth)))
134 return len;
135
136
137 if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
138 sizeof(*eth), len, flags))
139 return max_t(u32, keys.control.thoff, sizeof(*eth));
140
141
142 return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
143}
144EXPORT_SYMBOL(eth_get_headlen);
145
146
147
148
149
150
151
152
153
154
155__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
156{
157 unsigned short _service_access_point;
158 const unsigned short *sap;
159 const struct ethhdr *eth;
160
161 skb->dev = dev;
162 skb_reset_mac_header(skb);
163
164 eth = (struct ethhdr *)skb->data;
165 skb_pull_inline(skb, ETH_HLEN);
166
167 if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
168 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
169 skb->pkt_type = PACKET_BROADCAST;
170 else
171 skb->pkt_type = PACKET_MULTICAST;
172 }
173 else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
174 dev->dev_addr)))
175 skb->pkt_type = PACKET_OTHERHOST;
176
177
178
179
180
181
182
183 if (unlikely(netdev_uses_dsa(dev)))
184 return htons(ETH_P_XDSA);
185
186 if (likely(eth_proto_is_802_3(eth->h_proto)))
187 return eth->h_proto;
188
189
190
191
192
193
194
195 sap = skb_header_pointer(skb, 0, sizeof(*sap), &_service_access_point);
196 if (sap && *sap == 0xFFFF)
197 return htons(ETH_P_802_3);
198
199
200
201
202 return htons(ETH_P_802_2);
203}
204EXPORT_SYMBOL(eth_type_trans);
205
206
207
208
209
210
211int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
212{
213 const struct ethhdr *eth = eth_hdr(skb);
214 memcpy(haddr, eth->h_source, ETH_ALEN);
215 return ETH_ALEN;
216}
217EXPORT_SYMBOL(eth_header_parse);
218
219
220
221
222
223
224
225
226
227int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
228{
229 struct ethhdr *eth;
230 const struct net_device *dev = neigh->dev;
231
232 eth = (struct ethhdr *)
233 (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
234
235 if (type == htons(ETH_P_802_3))
236 return -1;
237
238 eth->h_proto = type;
239 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
240 memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
241 hh->hh_len = ETH_HLEN;
242 return 0;
243}
244EXPORT_SYMBOL(eth_header_cache);
245
246
247
248
249
250
251
252
253
254void eth_header_cache_update(struct hh_cache *hh,
255 const struct net_device *dev,
256 const unsigned char *haddr)
257{
258 memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
259 haddr, ETH_ALEN);
260}
261EXPORT_SYMBOL(eth_header_cache_update);
262
263
264
265
266
267
268int eth_prepare_mac_addr_change(struct net_device *dev, void *p)
269{
270 struct sockaddr *addr = p;
271
272 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
273 return -EBUSY;
274 if (!is_valid_ether_addr(addr->sa_data))
275 return -EADDRNOTAVAIL;
276 return 0;
277}
278EXPORT_SYMBOL(eth_prepare_mac_addr_change);
279
280
281
282
283
284
285void eth_commit_mac_addr_change(struct net_device *dev, void *p)
286{
287 struct sockaddr *addr = p;
288
289 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
290}
291EXPORT_SYMBOL(eth_commit_mac_addr_change);
292
293
294
295
296
297
298
299
300
301
302
303int eth_mac_addr(struct net_device *dev, void *p)
304{
305 int ret;
306
307 ret = eth_prepare_mac_addr_change(dev, p);
308 if (ret < 0)
309 return ret;
310 eth_commit_mac_addr_change(dev, p);
311 return 0;
312}
313EXPORT_SYMBOL(eth_mac_addr);
314
315
316
317
318
319
320
321
322
323int eth_change_mtu(struct net_device *dev, int new_mtu)
324{
325 if (new_mtu < 68 || new_mtu > ETH_DATA_LEN)
326 return -EINVAL;
327 dev->mtu = new_mtu;
328 return 0;
329}
330EXPORT_SYMBOL(eth_change_mtu);
331
332int eth_validate_addr(struct net_device *dev)
333{
334 if (!is_valid_ether_addr(dev->dev_addr))
335 return -EADDRNOTAVAIL;
336
337 return 0;
338}
339EXPORT_SYMBOL(eth_validate_addr);
340
341const struct header_ops eth_header_ops ____cacheline_aligned = {
342 .create = eth_header,
343 .parse = eth_header_parse,
344 .cache = eth_header_cache,
345 .cache_update = eth_header_cache_update,
346};
347
348
349
350
351
352
353
354void ether_setup(struct net_device *dev)
355{
356 dev->header_ops = ð_header_ops;
357 dev->type = ARPHRD_ETHER;
358 dev->hard_header_len = ETH_HLEN;
359 dev->mtu = ETH_DATA_LEN;
360 dev->addr_len = ETH_ALEN;
361 dev->tx_queue_len = 1000;
362 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
363 dev->priv_flags |= IFF_TX_SKB_SHARING;
364
365 eth_broadcast_addr(dev->broadcast);
366
367}
368EXPORT_SYMBOL(ether_setup);
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
386 unsigned int rxqs)
387{
388 return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_UNKNOWN,
389 ether_setup, txqs, rxqs);
390}
391EXPORT_SYMBOL(alloc_etherdev_mqs);
392
393ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
394{
395 return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
396}
397EXPORT_SYMBOL(sysfs_format_mac);
398
399struct sk_buff **eth_gro_receive(struct sk_buff **head,
400 struct sk_buff *skb)
401{
402 struct sk_buff *p, **pp = NULL;
403 struct ethhdr *eh, *eh2;
404 unsigned int hlen, off_eth;
405 const struct packet_offload *ptype;
406 __be16 type;
407 int flush = 1;
408
409 off_eth = skb_gro_offset(skb);
410 hlen = off_eth + sizeof(*eh);
411 eh = skb_gro_header_fast(skb, off_eth);
412 if (skb_gro_header_hard(skb, hlen)) {
413 eh = skb_gro_header_slow(skb, hlen, off_eth);
414 if (unlikely(!eh))
415 goto out;
416 }
417
418 flush = 0;
419
420 for (p = *head; p; p = p->next) {
421 if (!NAPI_GRO_CB(p)->same_flow)
422 continue;
423
424 eh2 = (struct ethhdr *)(p->data + off_eth);
425 if (compare_ether_header(eh, eh2)) {
426 NAPI_GRO_CB(p)->same_flow = 0;
427 continue;
428 }
429 }
430
431 type = eh->h_proto;
432
433 rcu_read_lock();
434 ptype = gro_find_receive_by_type(type);
435 if (ptype == NULL) {
436 flush = 1;
437 goto out_unlock;
438 }
439
440 skb_gro_pull(skb, sizeof(*eh));
441 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
442 pp = ptype->callbacks.gro_receive(head, skb);
443
444out_unlock:
445 rcu_read_unlock();
446out:
447 NAPI_GRO_CB(skb)->flush |= flush;
448
449 return pp;
450}
451EXPORT_SYMBOL(eth_gro_receive);
452
453int eth_gro_complete(struct sk_buff *skb, int nhoff)
454{
455 struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
456 __be16 type = eh->h_proto;
457 struct packet_offload *ptype;
458 int err = -ENOSYS;
459
460 if (skb->encapsulation)
461 skb_set_inner_mac_header(skb, nhoff);
462
463 rcu_read_lock();
464 ptype = gro_find_complete_by_type(type);
465 if (ptype != NULL)
466 err = ptype->callbacks.gro_complete(skb, nhoff +
467 sizeof(struct ethhdr));
468
469 rcu_read_unlock();
470 return err;
471}
472EXPORT_SYMBOL(eth_gro_complete);
473
474static struct packet_offload eth_packet_offload __read_mostly = {
475 .type = cpu_to_be16(ETH_P_TEB),
476 .priority = 10,
477 .callbacks = {
478 .gro_receive = eth_gro_receive,
479 .gro_complete = eth_gro_complete,
480 },
481};
482
483static int __init eth_offload_init(void)
484{
485 dev_add_offload(ð_packet_offload);
486
487 return 0;
488}
489
490fs_initcall(eth_offload_init);
491
492unsigned char * __weak arch_get_platform_mac_address(void)
493{
494 return NULL;
495}
496
497int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
498{
499 const unsigned char *addr;
500 struct device_node *dp;
501
502 if (dev_is_pci(dev))
503 dp = pci_device_to_OF_node(to_pci_dev(dev));
504 else
505 dp = dev->of_node;
506
507 addr = NULL;
508 if (dp)
509 addr = of_get_mac_address(dp);
510 if (!addr)
511 addr = arch_get_platform_mac_address();
512
513 if (!addr)
514 return -ENODEV;
515
516 ether_addr_copy(mac_addr, addr);
517 return 0;
518}
519EXPORT_SYMBOL(eth_platform_get_mac_address);
520