1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#ifndef _CHELSIO_L2T_H
33#define _CHELSIO_L2T_H
34
35#include <linux/spinlock.h>
36#include "t3cdev.h"
37#include <linux/atomic.h>
38
39enum {
40 L2T_STATE_VALID,
41 L2T_STATE_STALE,
42 L2T_STATE_RESOLVING,
43 L2T_STATE_UNUSED
44};
45
46struct neighbour;
47struct sk_buff;
48
49
50
51
52
53
54
55
56
57struct l2t_entry {
58 u16 state;
59 u16 idx;
60 u32 addr;
61 int ifindex;
62 u16 smt_idx;
63 u16 vlan;
64 struct neighbour *neigh;
65 struct l2t_entry *first;
66 struct l2t_entry *next;
67 struct sk_buff_head arpq;
68 spinlock_t lock;
69 atomic_t refcnt;
70 u8 dmac[6];
71};
72
73struct l2t_data {
74 unsigned int nentries;
75 struct l2t_entry *rover;
76 atomic_t nfree;
77 rwlock_t lock;
78 struct l2t_entry l2tab[0];
79 struct rcu_head rcu_head;
80};
81
82typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
83 struct sk_buff * skb);
84
85
86
87
88struct l2t_skb_cb {
89 arp_failure_handler_func arp_failure_handler;
90};
91
92#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
93
94static inline void set_arp_failure_handler(struct sk_buff *skb,
95 arp_failure_handler_func hnd)
96{
97 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
98}
99
100
101
102
103#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
104
105#define W_TCB_L2T_IX 0
106#define S_TCB_L2T_IX 7
107#define M_TCB_L2T_IX 0x7ffULL
108#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
109
110void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
111void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
112struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
113 struct net_device *dev, const void *daddr);
114int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
115 struct l2t_entry *e);
116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
117struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
118
119int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
120
121static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
122 struct l2t_entry *e)
123{
124 if (likely(e->state == L2T_STATE_VALID))
125 return cxgb3_ofld_send(dev, skb);
126 return t3_l2t_send_slow(dev, skb, e);
127}
128
129static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
130{
131 struct l2t_data *d;
132
133 rcu_read_lock();
134 d = L2DATA(t);
135
136 if (atomic_dec_and_test(&e->refcnt) && d)
137 t3_l2e_free(d, e);
138
139 rcu_read_unlock();
140}
141
142static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
143{
144 if (d && atomic_add_return(1, &e->refcnt) == 1)
145 atomic_dec(&d->nfree);
146}
147
148#endif
149