1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include "cxgb4.h"
36#include "smt.h"
37#include "t4_msg.h"
38#include "t4fw_api.h"
39#include "t4_regs.h"
40#include "t4_values.h"
41
42struct smt_data *t4_init_smt(void)
43{
44 unsigned int smt_size;
45 struct smt_data *s;
46 int i;
47
48 smt_size = SMT_SIZE;
49
50 s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry),
51 GFP_KERNEL);
52 if (!s)
53 return NULL;
54 s->smt_size = smt_size;
55 rwlock_init(&s->lock);
56 for (i = 0; i < s->smt_size; ++i) {
57 s->smtab[i].idx = i;
58 s->smtab[i].state = SMT_STATE_UNUSED;
59 memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
60 spin_lock_init(&s->smtab[i].lock);
61 atomic_set(&s->smtab[i].refcnt, 0);
62 }
63 return s;
64}
65
66static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
67{
68 struct smt_entry *first_free = NULL;
69 struct smt_entry *e, *end;
70
71 for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
72 if (atomic_read(&e->refcnt) == 0) {
73 if (!first_free)
74 first_free = e;
75 } else {
76 if (e->state == SMT_STATE_SWITCHING) {
77
78
79
80 if (memcmp(e->src_mac, smac, ETH_ALEN) == 0)
81 goto found_reuse;
82 }
83 }
84 }
85
86 if (first_free) {
87 e = first_free;
88 goto found;
89 }
90 return NULL;
91
92found:
93 e->state = SMT_STATE_UNUSED;
94
95found_reuse:
96 return e;
97}
98
99static void t4_smte_free(struct smt_entry *e)
100{
101 spin_lock_bh(&e->lock);
102 if (atomic_read(&e->refcnt) == 0) {
103 e->state = SMT_STATE_UNUSED;
104 }
105 spin_unlock_bh(&e->lock);
106}
107
108
109
110
111
112
113void cxgb4_smt_release(struct smt_entry *e)
114{
115 if (atomic_dec_and_test(&e->refcnt))
116 t4_smte_free(e);
117}
118EXPORT_SYMBOL(cxgb4_smt_release);
119
120void do_smt_write_rpl(struct adapter *adap, const struct cpl_smt_write_rpl *rpl)
121{
122 unsigned int smtidx = TID_TID_G(GET_TID(rpl));
123 struct smt_data *s = adap->smt;
124
125 if (unlikely(rpl->status != CPL_ERR_NONE)) {
126 struct smt_entry *e = &s->smtab[smtidx];
127
128 dev_err(adap->pdev_dev,
129 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
130 rpl->status, smtidx);
131 spin_lock(&e->lock);
132 e->state = SMT_STATE_ERROR;
133 spin_unlock(&e->lock);
134 return;
135 }
136}
137
138static int write_smt_entry(struct adapter *adapter, struct smt_entry *e)
139{
140 struct cpl_t6_smt_write_req *t6req;
141 struct smt_data *s = adapter->smt;
142 struct cpl_smt_write_req *req;
143 struct sk_buff *skb;
144 int size;
145 u8 row;
146
147 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
148 size = sizeof(*req);
149 skb = alloc_skb(size, GFP_ATOMIC);
150 if (!skb)
151 return -ENOMEM;
152
153
154
155 req = (struct cpl_smt_write_req *)__skb_put(skb, size);
156 INIT_TP_WR(req, 0);
157
158
159
160
161 row = (e->idx >> 1);
162 if (e->idx & 1) {
163 req->pfvf1 = 0x0;
164 memcpy(req->src_mac1, e->src_mac, ETH_ALEN);
165
166
167
168
169 req->pfvf0 = 0x0;
170 memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
171 ETH_ALEN);
172 } else {
173 req->pfvf0 = 0x0;
174 memcpy(req->src_mac0, e->src_mac, ETH_ALEN);
175
176
177
178
179 req->pfvf1 = 0x0;
180 memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
181 ETH_ALEN);
182 }
183 } else {
184 size = sizeof(*t6req);
185 skb = alloc_skb(size, GFP_ATOMIC);
186 if (!skb)
187 return -ENOMEM;
188
189 t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size);
190 INIT_TP_WR(t6req, 0);
191 req = (struct cpl_smt_write_req *)t6req;
192
193
194 req->pfvf0 = 0x0;
195 memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN);
196 row = e->idx;
197 }
198
199 OPCODE_TID(req) =
200 htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx |
201 TID_QID_V(adapter->sge.fw_evtq.abs_id)));
202 req->params = htonl(SMTW_NORPL_V(0) |
203 SMTW_IDX_V(row) |
204 SMTW_OVLAN_IDX_V(0));
205 t4_mgmt_tx(adapter, skb);
206 return 0;
207}
208
209static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
210 u8 *smac)
211{
212 struct smt_data *s = adap->smt;
213 struct smt_entry *e;
214
215 write_lock_bh(&s->lock);
216 e = find_or_alloc_smte(s, smac);
217 if (e) {
218 spin_lock(&e->lock);
219 if (!atomic_read(&e->refcnt)) {
220 atomic_set(&e->refcnt, 1);
221 e->state = SMT_STATE_SWITCHING;
222 e->pfvf = pfvf;
223 memcpy(e->src_mac, smac, ETH_ALEN);
224 write_smt_entry(adap, e);
225 } else {
226 atomic_inc(&e->refcnt);
227 }
228 spin_unlock(&e->lock);
229 }
230 write_unlock_bh(&s->lock);
231 return e;
232}
233
234
235
236
237
238
239
240
241struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac)
242{
243 struct adapter *adap = netdev2adap(dev);
244
245 return t4_smt_alloc_switching(adap, 0x0, smac);
246}
247EXPORT_SYMBOL(cxgb4_smt_alloc_switching);
248