1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <net/tc_act/tc_gact.h>
36#include <net/tc_act/tc_mirred.h>
37
38#include "cxgb4.h"
39#include "cxgb4_tc_u32_parse.h"
40#include "cxgb4_tc_u32.h"
41
42
43static int fill_match_fields(struct adapter *adap,
44 struct ch_filter_specification *fs,
45 struct tc_cls_u32_offload *cls,
46 const struct cxgb4_match_field *entry,
47 bool next_header)
48{
49 unsigned int i, j;
50 u32 val, mask;
51 int off, err;
52 bool found;
53
54 for (i = 0; i < cls->knode.sel->nkeys; i++) {
55 off = cls->knode.sel->keys[i].off;
56 val = cls->knode.sel->keys[i].val;
57 mask = cls->knode.sel->keys[i].mask;
58
59 if (next_header) {
60
61 if (!cls->knode.sel->keys[i].offmask)
62 continue;
63 } else {
64
65 if (cls->knode.sel->keys[i].offmask)
66 continue;
67 }
68
69 found = false;
70
71 for (j = 0; entry[j].val; j++) {
72 if (off == entry[j].off) {
73 found = true;
74 err = entry[j].val(fs, val, mask);
75 if (err)
76 return err;
77 break;
78 }
79 }
80
81 if (!found)
82 return -EINVAL;
83 }
84
85 return 0;
86}
87
88
89static int fill_action_fields(struct adapter *adap,
90 struct ch_filter_specification *fs,
91 struct tc_cls_u32_offload *cls)
92{
93 unsigned int num_actions = 0;
94 const struct tc_action *a;
95 struct tcf_exts *exts;
96 LIST_HEAD(actions);
97
98 exts = cls->knode.exts;
99 if (tc_no_actions(exts))
100 return -EINVAL;
101
102 tcf_exts_to_list(exts, &actions);
103 list_for_each_entry(a, &actions, list) {
104
105 if (num_actions)
106 return -EINVAL;
107
108
109 if (is_tcf_gact_shot(a)) {
110 fs->action = FILTER_DROP;
111 num_actions++;
112 continue;
113 }
114
115
116 if (is_tcf_mirred_redirect(a)) {
117 struct net_device *n_dev;
118 unsigned int i, index;
119 bool found = false;
120
121 index = tcf_mirred_ifindex(a);
122 for_each_port(adap, i) {
123 n_dev = adap->port[i];
124 if (index == n_dev->ifindex) {
125 fs->action = FILTER_SWITCH;
126 fs->eport = i;
127 found = true;
128 break;
129 }
130 }
131
132
133
134
135 if (!found)
136 return -EINVAL;
137
138 num_actions++;
139 continue;
140 }
141
142
143 return -EINVAL;
144 }
145
146 return 0;
147}
148
149int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
150 struct tc_cls_u32_offload *cls)
151{
152 const struct cxgb4_match_field *start, *link_start = NULL;
153 struct adapter *adapter = netdev2adap(dev);
154 struct ch_filter_specification fs;
155 struct cxgb4_tc_u32_table *t;
156 struct cxgb4_link *link;
157 unsigned int filter_id;
158 u32 uhtid, link_uhtid;
159 bool is_ipv6 = false;
160 int ret;
161
162 if (!can_tc_u32_offload(dev))
163 return -EOPNOTSUPP;
164
165 if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
166 return -EOPNOTSUPP;
167
168
169 filter_id = cls->knode.handle & 0xFFFFF;
170
171 if (filter_id > adapter->tids.nftids) {
172 dev_err(adapter->pdev_dev,
173 "Location %d out of range for insertion. Max: %d\n",
174 filter_id, adapter->tids.nftids);
175 return -ERANGE;
176 }
177
178 t = adapter->tc_u32;
179 uhtid = TC_U32_USERHTID(cls->knode.handle);
180 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
181
182
183
184
185 if (uhtid != 0x800 && uhtid >= t->size)
186 return -EINVAL;
187
188
189 if (link_uhtid >= t->size)
190 return -EINVAL;
191
192 memset(&fs, 0, sizeof(fs));
193
194 if (protocol == htons(ETH_P_IPV6)) {
195 start = cxgb4_ipv6_fields;
196 is_ipv6 = true;
197 } else {
198 start = cxgb4_ipv4_fields;
199 is_ipv6 = false;
200 }
201
202 if (uhtid != 0x800) {
203
204 if (!t->table[uhtid - 1].link_handle)
205 return -EINVAL;
206
207
208 link_start = t->table[uhtid - 1].match_field;
209 if (!link_start)
210 return -EINVAL;
211 }
212
213
214
215
216 if (link_uhtid) {
217 const struct cxgb4_next_header *next;
218 bool found = false;
219 unsigned int i, j;
220 u32 val, mask;
221 int off;
222
223 if (t->table[link_uhtid - 1].link_handle) {
224 dev_err(adapter->pdev_dev,
225 "Link handle exists for: 0x%x\n",
226 link_uhtid);
227 return -EINVAL;
228 }
229
230 next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
231
232
233 for (i = 0; next[i].jump; i++) {
234 if (next[i].offoff != cls->knode.sel->offoff ||
235 next[i].shift != cls->knode.sel->offshift ||
236 next[i].mask != cls->knode.sel->offmask ||
237 next[i].offset != cls->knode.sel->off)
238 continue;
239
240
241
242
243
244 for (j = 0; j < cls->knode.sel->nkeys; j++) {
245 off = cls->knode.sel->keys[j].off;
246 val = cls->knode.sel->keys[j].val;
247 mask = cls->knode.sel->keys[j].mask;
248
249 if (next[i].match_off == off &&
250 next[i].match_val == val &&
251 next[i].match_mask == mask) {
252 found = true;
253 break;
254 }
255 }
256
257 if (!found)
258 continue;
259
260
261
262
263
264
265 ret = fill_match_fields(adapter, &fs, cls,
266 start, false);
267 if (ret)
268 goto out;
269
270 link = &t->table[link_uhtid - 1];
271 link->match_field = next[i].jump;
272 link->link_handle = cls->knode.handle;
273 memcpy(&link->fs, &fs, sizeof(fs));
274 break;
275 }
276
277
278 if (!found)
279 return -EINVAL;
280
281 return 0;
282 }
283
284
285
286
287
288 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
289
290 memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
291 ret = fill_match_fields(adapter, &fs, cls,
292 link_start, true);
293 if (ret)
294 goto out;
295 }
296
297 ret = fill_match_fields(adapter, &fs, cls, start, false);
298 if (ret)
299 goto out;
300
301
302
303
304 ret = fill_action_fields(adapter, &fs, cls);
305 if (ret)
306 goto out;
307
308
309
310
311
312
313
314
315
316 fs.val.iport = netdev2pinfo(dev)->port_id;
317 fs.mask.iport = ~0;
318
319
320 fs.hitcnts = 1;
321
322
323 fs.type = is_ipv6 ? 1 : 0;
324
325
326 ret = cxgb4_set_filter(dev, filter_id, &fs);
327 if (ret)
328 goto out;
329
330
331
332
333
334 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
335 set_bit(filter_id, t->table[uhtid - 1].tid_map);
336
337out:
338 return ret;
339}
340
341int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
342 struct tc_cls_u32_offload *cls)
343{
344 struct adapter *adapter = netdev2adap(dev);
345 unsigned int filter_id, max_tids, i, j;
346 struct cxgb4_link *link = NULL;
347 struct cxgb4_tc_u32_table *t;
348 u32 handle, uhtid;
349 int ret;
350
351 if (!can_tc_u32_offload(dev))
352 return -EOPNOTSUPP;
353
354
355 filter_id = cls->knode.handle & 0xFFFFF;
356
357 if (filter_id > adapter->tids.nftids) {
358 dev_err(adapter->pdev_dev,
359 "Location %d out of range for deletion. Max: %d\n",
360 filter_id, adapter->tids.nftids);
361 return -ERANGE;
362 }
363
364 t = adapter->tc_u32;
365 handle = cls->knode.handle;
366 uhtid = TC_U32_USERHTID(cls->knode.handle);
367
368
369
370
371 if (uhtid != 0x800 && uhtid >= t->size)
372 return -EINVAL;
373
374
375 if (uhtid != 0x800) {
376 link = &t->table[uhtid - 1];
377 if (!link->link_handle)
378 return -EINVAL;
379
380 if (!test_bit(filter_id, link->tid_map))
381 return -EINVAL;
382 }
383
384 ret = cxgb4_del_filter(dev, filter_id);
385 if (ret)
386 goto out;
387
388 if (link)
389 clear_bit(filter_id, link->tid_map);
390
391
392
393
394 max_tids = adapter->tids.nftids;
395 for (i = 0; i < t->size; i++) {
396 link = &t->table[i];
397
398 if (link->link_handle == handle) {
399 for (j = 0; j < max_tids; j++) {
400 if (!test_bit(j, link->tid_map))
401 continue;
402
403 ret = __cxgb4_del_filter(dev, j, NULL);
404 if (ret)
405 goto out;
406
407 clear_bit(j, link->tid_map);
408 }
409
410
411 link->match_field = NULL;
412 link->link_handle = 0;
413 memset(&link->fs, 0, sizeof(link->fs));
414 break;
415 }
416 }
417
418out:
419 return ret;
420}
421
422void cxgb4_cleanup_tc_u32(struct adapter *adap)
423{
424 struct cxgb4_tc_u32_table *t;
425 unsigned int i;
426
427 if (!adap->tc_u32)
428 return;
429
430
431 t = adap->tc_u32;
432 for (i = 0; i < t->size; i++) {
433 struct cxgb4_link *link = &t->table[i];
434
435 t4_free_mem(link->tid_map);
436 }
437 t4_free_mem(adap->tc_u32);
438}
439
440struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
441 unsigned int size)
442{
443 struct cxgb4_tc_u32_table *t;
444 unsigned int i;
445
446 if (!size)
447 return NULL;
448
449 t = t4_alloc_mem(sizeof(*t) +
450 (size * sizeof(struct cxgb4_link)));
451 if (!t)
452 return NULL;
453
454 t->size = size;
455
456 for (i = 0; i < t->size; i++) {
457 struct cxgb4_link *link = &t->table[i];
458 unsigned int bmap_size;
459 unsigned int max_tids;
460
461 max_tids = adap->tids.nftids;
462 bmap_size = BITS_TO_LONGS(max_tids);
463 link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
464 if (!link->tid_map)
465 goto out_no_mem;
466 bitmap_zero(link->tid_map, max_tids);
467 }
468
469 return t;
470
471out_no_mem:
472 for (i = 0; i < t->size; i++) {
473 struct cxgb4_link *link = &t->table[i];
474
475 if (link->tid_map)
476 t4_free_mem(link->tid_map);
477 }
478
479 if (t)
480 t4_free_mem(t);
481
482 return NULL;
483}
484