1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#ifndef _LINUX_ETHERDEVICE_H
23#define _LINUX_ETHERDEVICE_H
24
25#include <linux/if_ether.h>
26#include <linux/netdevice.h>
27#include <linux/random.h>
28#include <asm/unaligned.h>
29#include <asm/bitsperlong.h>
30
31#ifdef __KERNEL__
32struct device;
33int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
34unsigned char *arch_get_platform_get_mac_address(void);
35u32 eth_get_headlen(void *data, unsigned int max_len);
36__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
37extern const struct header_ops eth_header_ops;
38
39int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
40 const void *daddr, const void *saddr, unsigned len);
41int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
42int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
43 __be16 type);
44void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
45 const unsigned char *haddr);
46int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
47void eth_commit_mac_addr_change(struct net_device *dev, void *p);
48int eth_mac_addr(struct net_device *dev, void *p);
49int eth_change_mtu(struct net_device *dev, int new_mtu);
50int eth_validate_addr(struct net_device *dev);
51
52struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
53 unsigned int rxqs);
54#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
55#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
56
57struct sk_buff **eth_gro_receive(struct sk_buff **head,
58 struct sk_buff *skb);
59int eth_gro_complete(struct sk_buff *skb, int nhoff);
60
61
62static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
63{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
64
65
66
67
68
69
70
71
72
73
74static inline bool is_link_local_ether_addr(const u8 *addr)
75{
76 __be16 *a = (__be16 *)addr;
77 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
78 static const __be16 m = cpu_to_be16(0xfff0);
79
80#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
81 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
82 (__force int)((a[2] ^ b[2]) & m)) == 0;
83#else
84 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
85#endif
86}
87
88
89
90
91
92
93
94
95
96static inline bool is_zero_ether_addr(const u8 *addr)
97{
98#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
99 return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
100#else
101 return (*(const u16 *)(addr + 0) |
102 *(const u16 *)(addr + 2) |
103 *(const u16 *)(addr + 4)) == 0;
104#endif
105}
106
107
108
109
110
111
112
113
114static inline bool is_multicast_ether_addr(const u8 *addr)
115{
116#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
117 u32 a = *(const u32 *)addr;
118#else
119 u16 a = *(const u16 *)addr;
120#endif
121#ifdef __BIG_ENDIAN
122 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
123#else
124 return 0x01 & a;
125#endif
126}
127
128static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
129{
130#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
131#ifdef __BIG_ENDIAN
132 return 0x01 & ((*(const u64 *)addr) >> 56);
133#else
134 return 0x01 & (*(const u64 *)addr);
135#endif
136#else
137 return is_multicast_ether_addr(addr);
138#endif
139}
140
141
142
143
144
145
146
147static inline bool is_local_ether_addr(const u8 *addr)
148{
149 return 0x02 & addr[0];
150}
151
152
153
154
155
156
157
158
159
160static inline bool is_broadcast_ether_addr(const u8 *addr)
161{
162 return (*(const u16 *)(addr + 0) &
163 *(const u16 *)(addr + 2) &
164 *(const u16 *)(addr + 4)) == 0xffff;
165}
166
167
168
169
170
171
172
173static inline bool is_unicast_ether_addr(const u8 *addr)
174{
175 return !is_multicast_ether_addr(addr);
176}
177
178
179
180
181
182
183
184
185
186
187
188
189static inline bool is_valid_ether_addr(const u8 *addr)
190{
191
192
193 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
194}
195
196
197
198
199
200
201
202
203
204static inline bool eth_proto_is_802_3(__be16 proto)
205{
206#ifndef __BIG_ENDIAN
207
208 proto &= htons(0xFF00);
209#endif
210
211 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
212}
213
214
215
216
217
218
219
220
221static inline void eth_random_addr(u8 *addr)
222{
223 get_random_bytes(addr, ETH_ALEN);
224 addr[0] &= 0xfe;
225 addr[0] |= 0x02;
226}
227
228#define random_ether_addr(addr) eth_random_addr(addr)
229
230
231
232
233
234
235
236static inline void eth_broadcast_addr(u8 *addr)
237{
238 memset(addr, 0xff, ETH_ALEN);
239}
240
241
242
243
244
245
246
247static inline void eth_zero_addr(u8 *addr)
248{
249 memset(addr, 0x00, ETH_ALEN);
250}
251
252
253
254
255
256
257
258
259
260
261static inline void eth_hw_addr_random(struct net_device *dev)
262{
263 dev->addr_assign_type = NET_ADDR_RANDOM;
264 eth_random_addr(dev->dev_addr);
265}
266
267
268
269
270
271
272
273
274static inline void ether_addr_copy(u8 *dst, const u8 *src)
275{
276#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
277 *(u32 *)dst = *(const u32 *)src;
278 *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
279#else
280 u16 *a = (u16 *)dst;
281 const u16 *b = (const u16 *)src;
282
283 a[0] = b[0];
284 a[1] = b[1];
285 a[2] = b[2];
286#endif
287}
288
289
290
291
292
293
294
295
296
297static inline void eth_hw_addr_inherit(struct net_device *dst,
298 struct net_device *src)
299{
300 dst->addr_assign_type = src->addr_assign_type;
301 ether_addr_copy(dst->dev_addr, src->dev_addr);
302}
303
304
305
306
307
308
309
310
311
312
313static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
314{
315#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
316 u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
317 ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
318
319 return fold == 0;
320#else
321 const u16 *a = (const u16 *)addr1;
322 const u16 *b = (const u16 *)addr2;
323
324 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
325#endif
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
343 const u8 addr2[6+2])
344{
345#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
346 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
347
348#ifdef __BIG_ENDIAN
349 return (fold >> 16) == 0;
350#else
351 return (fold << 16) == 0;
352#endif
353#else
354 return ether_addr_equal(addr1, addr2);
355#endif
356}
357
358
359
360
361
362
363
364
365
366
367static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
368{
369#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
370 return ether_addr_equal(addr1, addr2);
371#else
372 return memcmp(addr1, addr2, ETH_ALEN) == 0;
373#endif
374}
375
376
377
378
379
380
381
382
383
384
385
386static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
387 const u8 *mask)
388{
389 int i;
390
391 for (i = 0; i < ETH_ALEN; i++) {
392 if ((addr1[i] ^ addr2[i]) & mask[i])
393 return false;
394 }
395
396 return true;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410static inline bool is_etherdev_addr(const struct net_device *dev,
411 const u8 addr[6 + 2])
412{
413 struct netdev_hw_addr *ha;
414 bool res = false;
415
416 rcu_read_lock();
417 for_each_dev_addr(dev, ha) {
418 res = ether_addr_equal_64bits(addr, ha->addr);
419 if (res)
420 break;
421 }
422 rcu_read_unlock();
423 return res;
424}
425#endif
426
427
428
429
430
431
432
433
434
435
436
437
438
439static inline unsigned long compare_ether_header(const void *a, const void *b)
440{
441#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
442 unsigned long fold;
443
444
445
446
447
448
449
450
451
452 fold = *(unsigned long *)a ^ *(unsigned long *)b;
453 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
454 return fold;
455#else
456 u32 *a32 = (u32 *)((u8 *)a + 2);
457 u32 *b32 = (u32 *)((u8 *)b + 2);
458
459 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
460 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
461#endif
462}
463
464
465
466
467
468
469
470
471static inline int eth_skb_pad(struct sk_buff *skb)
472{
473 return skb_put_padto(skb, ETH_ZLEN);
474}
475
476#endif
477