1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#ifndef _LINUX_ETHERDEVICE_H
23#define _LINUX_ETHERDEVICE_H
24
25#include <linux/if_ether.h>
26#include <linux/netdevice.h>
27#include <linux/random.h>
28#include <asm/unaligned.h>
29#include <asm/bitsperlong.h>
30
31#ifdef __KERNEL__
32__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
33extern const struct header_ops eth_header_ops;
34
35int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
36 const void *daddr, const void *saddr, unsigned len);
37int eth_rebuild_header(struct sk_buff *skb);
38int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
39int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
40 __be16 type);
41void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
42 const unsigned char *haddr);
43int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
44void eth_commit_mac_addr_change(struct net_device *dev, void *p);
45int eth_mac_addr(struct net_device *dev, void *p);
46int eth_change_mtu(struct net_device *dev, int new_mtu);
47int eth_validate_addr(struct net_device *dev);
48
49struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
50 unsigned int rxqs);
51#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
52#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
53
54
55static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
56{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
57
58
59
60
61
62
63
64
65
66
67static inline bool is_link_local_ether_addr(const u8 *addr)
68{
69 __be16 *a = (__be16 *)addr;
70 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
71 static const __be16 m = cpu_to_be16(0xfff0);
72
73#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
74 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
75 ((a[2] ^ b[2]) & m)) == 0;
76#else
77 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
78#endif
79}
80
81
82
83
84
85
86
87
88
89static inline bool is_zero_ether_addr(const u8 *addr)
90{
91#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
92 return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
93#else
94 return (*(const u16 *)(addr + 0) |
95 *(const u16 *)(addr + 2) |
96 *(const u16 *)(addr + 4)) == 0;
97#endif
98}
99
100
101
102
103
104
105
106
107static inline bool is_multicast_ether_addr(const u8 *addr)
108{
109 return 0x01 & addr[0];
110}
111
112
113
114
115
116
117
118static inline bool is_local_ether_addr(const u8 *addr)
119{
120 return 0x02 & addr[0];
121}
122
123
124
125
126
127
128
129
130
131static inline bool is_broadcast_ether_addr(const u8 *addr)
132{
133 return (*(const u16 *)(addr + 0) &
134 *(const u16 *)(addr + 2) &
135 *(const u16 *)(addr + 4)) == 0xffff;
136}
137
138
139
140
141
142
143
144static inline bool is_unicast_ether_addr(const u8 *addr)
145{
146 return !is_multicast_ether_addr(addr);
147}
148
149
150
151
152
153
154
155
156
157
158
159
160static inline bool is_valid_ether_addr(const u8 *addr)
161{
162
163
164 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
165}
166
167
168
169
170
171
172
173
174static inline void eth_random_addr(u8 *addr)
175{
176 get_random_bytes(addr, ETH_ALEN);
177 addr[0] &= 0xfe;
178 addr[0] |= 0x02;
179}
180
181#define random_ether_addr(addr) eth_random_addr(addr)
182
183
184
185
186
187
188
189static inline void eth_broadcast_addr(u8 *addr)
190{
191 memset(addr, 0xff, ETH_ALEN);
192}
193
194
195
196
197
198
199
200static inline void eth_zero_addr(u8 *addr)
201{
202 memset(addr, 0x00, ETH_ALEN);
203}
204
205
206
207
208
209
210
211
212
213
214static inline void eth_hw_addr_random(struct net_device *dev)
215{
216 dev->addr_assign_type = NET_ADDR_RANDOM;
217 eth_random_addr(dev->dev_addr);
218}
219
220
221
222
223
224
225
226
227static inline void ether_addr_copy(u8 *dst, const u8 *src)
228{
229#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
230 *(u32 *)dst = *(const u32 *)src;
231 *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
232#else
233 u16 *a = (u16 *)dst;
234 const u16 *b = (const u16 *)src;
235
236 a[0] = b[0];
237 a[1] = b[1];
238 a[2] = b[2];
239#endif
240}
241
242
243
244
245
246
247
248
249
250static inline void eth_hw_addr_inherit(struct net_device *dst,
251 struct net_device *src)
252{
253 dst->addr_assign_type = src->addr_assign_type;
254 ether_addr_copy(dst->dev_addr, src->dev_addr);
255}
256
257
258
259
260
261
262
263
264
265
266static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
267{
268#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
269 u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
270 ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
271
272 return fold == 0;
273#else
274 const u16 *a = (const u16 *)addr1;
275 const u16 *b = (const u16 *)addr2;
276
277 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
278#endif
279}
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
296 const u8 addr2[6+2])
297{
298#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
299 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
300
301#ifdef __BIG_ENDIAN
302 return (fold >> 16) == 0;
303#else
304 return (fold << 16) == 0;
305#endif
306#else
307 return ether_addr_equal(addr1, addr2);
308#endif
309}
310
311
312
313
314
315
316
317
318
319
320static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
321{
322#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
323 return ether_addr_equal(addr1, addr2);
324#else
325 return memcmp(addr1, addr2, ETH_ALEN) == 0;
326#endif
327}
328
329
330
331
332
333
334
335
336
337
338
339
340static inline bool is_etherdev_addr(const struct net_device *dev,
341 const u8 addr[6 + 2])
342{
343 struct netdev_hw_addr *ha;
344 bool res = false;
345
346 rcu_read_lock();
347 for_each_dev_addr(dev, ha) {
348 res = ether_addr_equal_64bits(addr, ha->addr);
349 if (res)
350 break;
351 }
352 rcu_read_unlock();
353 return res;
354}
355#endif
356
357
358
359
360
361
362
363
364
365
366
367
368
369static inline unsigned long compare_ether_header(const void *a, const void *b)
370{
371#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
372 unsigned long fold;
373
374
375
376
377
378
379
380
381
382 fold = *(unsigned long *)a ^ *(unsigned long *)b;
383 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
384 return fold;
385#else
386 u32 *a32 = (u32 *)((u8 *)a + 2);
387 u32 *b32 = (u32 *)((u8 *)b + 2);
388
389 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
390 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
391#endif
392}
393
394#endif
395