1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef _LINUX_ETHERDEVICE_H
18#define _LINUX_ETHERDEVICE_H
19
20#include <linux/if_ether.h>
21#include <linux/netdevice.h>
22#include <linux/random.h>
23#include <linux/crc32.h>
24#include <asm/unaligned.h>
25#include <asm/bitsperlong.h>
26
27#ifdef __KERNEL__
28struct device;
29struct fwnode_handle;
30
31int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
32int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
33unsigned char *arch_get_platform_mac_address(void);
34int nvmem_get_mac_address(struct device *dev, void *addrbuf);
35int device_get_mac_address(struct device *dev, char *addr);
36int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
37int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
38
39u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
40__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
41extern const struct header_ops eth_header_ops;
42
43int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
44 const void *daddr, const void *saddr, unsigned len);
45int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
46int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
47 __be16 type);
48void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
49 const unsigned char *haddr);
50__be16 eth_header_parse_protocol(const struct sk_buff *skb);
51int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
52void eth_commit_mac_addr_change(struct net_device *dev, void *p);
53int eth_mac_addr(struct net_device *dev, void *p);
54int eth_validate_addr(struct net_device *dev);
55
56struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
57 unsigned int rxqs);
58#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
59#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
60
61struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
62 unsigned int txqs,
63 unsigned int rxqs);
64#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
65
66struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
67int eth_gro_complete(struct sk_buff *skb, int nhoff);
68
69
70static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
71{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
72#define eth_stp_addr eth_reserved_addr_base
73
74
75
76
77
78
79
80
81
82
83static inline bool is_link_local_ether_addr(const u8 *addr)
84{
85 __be16 *a = (__be16 *)addr;
86 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
87 static const __be16 m = cpu_to_be16(0xfff0);
88
89#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
90 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
91 (__force int)((a[2] ^ b[2]) & m)) == 0;
92#else
93 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
94#endif
95}
96
97
98
99
100
101
102
103
104
105static inline bool is_zero_ether_addr(const u8 *addr)
106{
107#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
108 return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
109#else
110 return (*(const u16 *)(addr + 0) |
111 *(const u16 *)(addr + 2) |
112 *(const u16 *)(addr + 4)) == 0;
113#endif
114}
115
116
117
118
119
120
121
122
123static inline bool is_multicast_ether_addr(const u8 *addr)
124{
125#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
126 u32 a = *(const u32 *)addr;
127#else
128 u16 a = *(const u16 *)addr;
129#endif
130#ifdef __BIG_ENDIAN
131 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
132#else
133 return 0x01 & a;
134#endif
135}
136
137static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
138{
139#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
140#ifdef __BIG_ENDIAN
141 return 0x01 & ((*(const u64 *)addr) >> 56);
142#else
143 return 0x01 & (*(const u64 *)addr);
144#endif
145#else
146 return is_multicast_ether_addr(addr);
147#endif
148}
149
150
151
152
153
154
155
156static inline bool is_local_ether_addr(const u8 *addr)
157{
158 return 0x02 & addr[0];
159}
160
161
162
163
164
165
166
167
168
169static inline bool is_broadcast_ether_addr(const u8 *addr)
170{
171 return (*(const u16 *)(addr + 0) &
172 *(const u16 *)(addr + 2) &
173 *(const u16 *)(addr + 4)) == 0xffff;
174}
175
176
177
178
179
180
181
182static inline bool is_unicast_ether_addr(const u8 *addr)
183{
184 return !is_multicast_ether_addr(addr);
185}
186
187
188
189
190
191
192
193
194
195
196
197
198static inline bool is_valid_ether_addr(const u8 *addr)
199{
200
201
202 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
203}
204
205
206
207
208
209
210
211
212
213static inline bool eth_proto_is_802_3(__be16 proto)
214{
215#ifndef __BIG_ENDIAN
216
217 proto &= htons(0xFF00);
218#endif
219
220 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
221}
222
223
224
225
226
227
228
229
230static inline void eth_random_addr(u8 *addr)
231{
232 get_random_bytes(addr, ETH_ALEN);
233 addr[0] &= 0xfe;
234 addr[0] |= 0x02;
235}
236
237
238
239
240
241
242
243static inline void eth_broadcast_addr(u8 *addr)
244{
245 memset(addr, 0xff, ETH_ALEN);
246}
247
248
249
250
251
252
253
254static inline void eth_zero_addr(u8 *addr)
255{
256 memset(addr, 0x00, ETH_ALEN);
257}
258
259
260
261
262
263
264
265
266
267
268static inline void eth_hw_addr_random(struct net_device *dev)
269{
270 u8 addr[ETH_ALEN];
271
272 eth_random_addr(addr);
273 __dev_addr_set(dev, addr, ETH_ALEN);
274 dev->addr_assign_type = NET_ADDR_RANDOM;
275}
276
277
278
279
280
281
282
283static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
284{
285 return ether_crc(ETH_ALEN, ha->addr);
286}
287
288
289
290
291
292
293
294
295static inline void ether_addr_copy(u8 *dst, const u8 *src)
296{
297#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
298 *(u32 *)dst = *(const u32 *)src;
299 *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
300#else
301 u16 *a = (u16 *)dst;
302 const u16 *b = (const u16 *)src;
303
304 a[0] = b[0];
305 a[1] = b[1];
306 a[2] = b[2];
307#endif
308}
309
310
311
312
313
314
315
316
317static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
318{
319 __dev_addr_set(dev, addr, ETH_ALEN);
320}
321
322
323
324
325
326
327
328
329
330static inline void eth_hw_addr_inherit(struct net_device *dst,
331 struct net_device *src)
332{
333 dst->addr_assign_type = src->addr_assign_type;
334 eth_hw_addr_set(dst, src->dev_addr);
335}
336
337
338
339
340
341
342
343
344
345
346static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
347{
348#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
349 u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
350 ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
351
352 return fold == 0;
353#else
354 const u16 *a = (const u16 *)addr1;
355 const u16 *b = (const u16 *)addr2;
356
357 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
358#endif
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
376{
377#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
378 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
379
380#ifdef __BIG_ENDIAN
381 return (fold >> 16) == 0;
382#else
383 return (fold << 16) == 0;
384#endif
385#else
386 return ether_addr_equal(addr1, addr2);
387#endif
388}
389
390
391
392
393
394
395
396
397
398
399static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
400{
401#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
402 return ether_addr_equal(addr1, addr2);
403#else
404 return memcmp(addr1, addr2, ETH_ALEN) == 0;
405#endif
406}
407
408
409
410
411
412
413
414
415
416
417
418static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
419 const u8 *mask)
420{
421 int i;
422
423 for (i = 0; i < ETH_ALEN; i++) {
424 if ((addr1[i] ^ addr2[i]) & mask[i])
425 return false;
426 }
427
428 return true;
429}
430
431
432
433
434
435
436
437static inline u64 ether_addr_to_u64(const u8 *addr)
438{
439 u64 u = 0;
440 int i;
441
442 for (i = 0; i < ETH_ALEN; i++)
443 u = u << 8 | addr[i];
444
445 return u;
446}
447
448
449
450
451
452
453static inline void u64_to_ether_addr(u64 u, u8 *addr)
454{
455 int i;
456
457 for (i = ETH_ALEN - 1; i >= 0; i--) {
458 addr[i] = u & 0xff;
459 u = u >> 8;
460 }
461}
462
463
464
465
466
467
468static inline void eth_addr_dec(u8 *addr)
469{
470 u64 u = ether_addr_to_u64(addr);
471
472 u--;
473 u64_to_ether_addr(u, addr);
474}
475
476
477
478
479
480static inline void eth_addr_inc(u8 *addr)
481{
482 u64 u = ether_addr_to_u64(addr);
483
484 u++;
485 u64_to_ether_addr(u, addr);
486}
487
488
489
490
491
492
493
494
495
496
497
498
499static inline bool is_etherdev_addr(const struct net_device *dev,
500 const u8 addr[6 + 2])
501{
502 struct netdev_hw_addr *ha;
503 bool res = false;
504
505 rcu_read_lock();
506 for_each_dev_addr(dev, ha) {
507 res = ether_addr_equal_64bits(addr, ha->addr);
508 if (res)
509 break;
510 }
511 rcu_read_unlock();
512 return res;
513}
514#endif
515
516
517
518
519
520
521
522
523
524
525
526
527
528static inline unsigned long compare_ether_header(const void *a, const void *b)
529{
530#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
531 unsigned long fold;
532
533
534
535
536
537
538
539
540
541 fold = *(unsigned long *)a ^ *(unsigned long *)b;
542 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
543 return fold;
544#else
545 u32 *a32 = (u32 *)((u8 *)a + 2);
546 u32 *b32 = (u32 *)((u8 *)b + 2);
547
548 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
549 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
550#endif
551}
552
553
554
555
556
557
558
559
560
561
562
563static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
564 unsigned int id)
565{
566 u64 u = ether_addr_to_u64(base_addr);
567 u8 addr[ETH_ALEN];
568
569 u += id;
570 u64_to_ether_addr(u, addr);
571 eth_hw_addr_set(dev, addr);
572}
573
574
575
576
577
578
579
580
581static inline int eth_skb_pad(struct sk_buff *skb)
582{
583 return skb_put_padto(skb, ETH_ZLEN);
584}
585
586#endif
587