1
2
3
4
5
6
7
8
9
10#include <linux/in.h>
11#include <net/ip.h>
12#include "efx.h"
13#include "filter.h"
14#include "io.h"
15#include "nic.h"
16#include "regs.h"
17
18
19
20
21
22#define FILTER_CTL_SRCH_FUDGE_WILD 3
23#define FILTER_CTL_SRCH_FUDGE_FULL 1
24
25
26
27
28
29#define FILTER_CTL_SRCH_MAX 200
30
31
32
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
35enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_RX_DEF,
39 EFX_FILTER_TABLE_TX_MAC,
40 EFX_FILTER_TABLE_COUNT,
41};
42
43enum efx_filter_index {
44 EFX_FILTER_INDEX_UC_DEF,
45 EFX_FILTER_INDEX_MC_DEF,
46 EFX_FILTER_SIZE_RX_DEF,
47};
48
49struct efx_filter_table {
50 enum efx_filter_table_id id;
51 u32 offset;
52 unsigned size;
53 unsigned step;
54 unsigned used;
55 unsigned long *used_bitmap;
56 struct efx_filter_spec *spec;
57 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
58};
59
60struct efx_filter_state {
61 spinlock_t lock;
62 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
63#ifdef CONFIG_RFS_ACCEL
64 u32 *rps_flow_id;
65 unsigned rps_expire_index;
66#endif
67};
68
69static void efx_filter_table_clear_entry(struct efx_nic *efx,
70 struct efx_filter_table *table,
71 unsigned int filter_idx);
72
73
74
75static u16 efx_filter_hash(u32 key)
76{
77 u16 tmp;
78
79
80 tmp = 0x1fff ^ key >> 16;
81 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
82 tmp = tmp ^ tmp >> 9;
83
84 tmp = tmp ^ tmp << 13 ^ key;
85 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
86 return tmp ^ tmp >> 9;
87}
88
89
90
91static u16 efx_filter_increment(u32 key)
92{
93 return key * 2 - 1;
94}
95
96static enum efx_filter_table_id
97efx_filter_spec_table_id(const struct efx_filter_spec *spec)
98{
99 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
100 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
101 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
102 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
103 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
104 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
105 BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
106 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
107 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
108}
109
110static struct efx_filter_table *
111efx_filter_spec_table(struct efx_filter_state *state,
112 const struct efx_filter_spec *spec)
113{
114 if (spec->type == EFX_FILTER_UNSPEC)
115 return NULL;
116 else
117 return &state->table[efx_filter_spec_table_id(spec)];
118}
119
120static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
121{
122 memset(table->search_depth, 0, sizeof(table->search_depth));
123}
124
125static void efx_filter_push_rx_config(struct efx_nic *efx)
126{
127 struct efx_filter_state *state = efx->filter_state;
128 struct efx_filter_table *table;
129 efx_oword_t filter_ctl;
130
131 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
132
133 table = &state->table[EFX_FILTER_TABLE_RX_IP];
134 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
135 table->search_depth[EFX_FILTER_TCP_FULL] +
136 FILTER_CTL_SRCH_FUDGE_FULL);
137 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
138 table->search_depth[EFX_FILTER_TCP_WILD] +
139 FILTER_CTL_SRCH_FUDGE_WILD);
140 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
141 table->search_depth[EFX_FILTER_UDP_FULL] +
142 FILTER_CTL_SRCH_FUDGE_FULL);
143 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
144 table->search_depth[EFX_FILTER_UDP_WILD] +
145 FILTER_CTL_SRCH_FUDGE_WILD);
146
147 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
148 if (table->size) {
149 EFX_SET_OWORD_FIELD(
150 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
151 table->search_depth[EFX_FILTER_MAC_FULL] +
152 FILTER_CTL_SRCH_FUDGE_FULL);
153 EFX_SET_OWORD_FIELD(
154 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
155 table->search_depth[EFX_FILTER_MAC_WILD] +
156 FILTER_CTL_SRCH_FUDGE_WILD);
157 }
158
159 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
160 if (table->size) {
161 EFX_SET_OWORD_FIELD(
162 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
163 table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_RSS));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS));
175
176
177
178
179
180 EFX_SET_OWORD_FIELD(
181 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
182 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
183 table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
184 EFX_FILTER_FLAG_RX_SCATTER));
185 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
186
187
188
189
190
191 EFX_SET_OWORD_FIELD(
192 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
193 efx->rx_scatter);
194 }
195
196 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
197}
198
199static void efx_filter_push_tx_limits(struct efx_nic *efx)
200{
201 struct efx_filter_state *state = efx->filter_state;
202 struct efx_filter_table *table;
203 efx_oword_t tx_cfg;
204
205 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
206
207 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
208 if (table->size) {
209 EFX_SET_OWORD_FIELD(
210 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
211 table->search_depth[EFX_FILTER_MAC_FULL] +
212 FILTER_CTL_SRCH_FUDGE_FULL);
213 EFX_SET_OWORD_FIELD(
214 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
215 table->search_depth[EFX_FILTER_MAC_WILD] +
216 FILTER_CTL_SRCH_FUDGE_WILD);
217 }
218
219 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
220}
221
222static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
223 __be32 host1, __be16 port1,
224 __be32 host2, __be16 port2)
225{
226 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
227 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
228 spec->data[2] = ntohl(host2);
229}
230
231static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
232 __be32 *host1, __be16 *port1,
233 __be32 *host2, __be16 *port2)
234{
235 *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
236 *port1 = htons(spec->data[0]);
237 *host2 = htonl(spec->data[2]);
238 *port2 = htons(spec->data[1] >> 16);
239}
240
241
242
243
244
245
246
247
248int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
249 __be32 host, __be16 port)
250{
251 __be32 host1;
252 __be16 port1;
253
254 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
255
256
257 if (spec->type != EFX_FILTER_UNSPEC)
258 return -EPROTONOSUPPORT;
259
260 if (port == 0)
261 return -EINVAL;
262
263 switch (proto) {
264 case IPPROTO_TCP:
265 spec->type = EFX_FILTER_TCP_WILD;
266 break;
267 case IPPROTO_UDP:
268 spec->type = EFX_FILTER_UDP_WILD;
269 break;
270 default:
271 return -EPROTONOSUPPORT;
272 }
273
274
275
276
277
278
279 host1 = 0;
280 if (proto != IPPROTO_UDP) {
281 port1 = 0;
282 } else {
283 port1 = port;
284 port = 0;
285 }
286
287 __efx_filter_set_ipv4(spec, host1, port1, host, port);
288 return 0;
289}
290
291int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
292 u8 *proto, __be32 *host, __be16 *port)
293{
294 __be32 host1;
295 __be16 port1;
296
297 switch (spec->type) {
298 case EFX_FILTER_TCP_WILD:
299 *proto = IPPROTO_TCP;
300 __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
301 return 0;
302 case EFX_FILTER_UDP_WILD:
303 *proto = IPPROTO_UDP;
304 __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
305 return 0;
306 default:
307 return -EINVAL;
308 }
309}
310
311
312
313
314
315
316
317
318
319
320int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
321 __be32 host, __be16 port,
322 __be32 rhost, __be16 rport)
323{
324 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
325
326
327 if (spec->type != EFX_FILTER_UNSPEC)
328 return -EPROTONOSUPPORT;
329
330 if (port == 0 || rport == 0)
331 return -EINVAL;
332
333 switch (proto) {
334 case IPPROTO_TCP:
335 spec->type = EFX_FILTER_TCP_FULL;
336 break;
337 case IPPROTO_UDP:
338 spec->type = EFX_FILTER_UDP_FULL;
339 break;
340 default:
341 return -EPROTONOSUPPORT;
342 }
343
344 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
345 return 0;
346}
347
348int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
349 u8 *proto, __be32 *host, __be16 *port,
350 __be32 *rhost, __be16 *rport)
351{
352 switch (spec->type) {
353 case EFX_FILTER_TCP_FULL:
354 *proto = IPPROTO_TCP;
355 break;
356 case EFX_FILTER_UDP_FULL:
357 *proto = IPPROTO_UDP;
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 __efx_filter_get_ipv4(spec, rhost, rport, host, port);
364 return 0;
365}
366
367
368
369
370
371
372
373int efx_filter_set_eth_local(struct efx_filter_spec *spec,
374 u16 vid, const u8 *addr)
375{
376 EFX_BUG_ON_PARANOID(!(spec->flags &
377 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
378
379
380 if (spec->type != EFX_FILTER_UNSPEC)
381 return -EPROTONOSUPPORT;
382
383 if (vid == EFX_FILTER_VID_UNSPEC) {
384 spec->type = EFX_FILTER_MAC_WILD;
385 spec->data[0] = 0;
386 } else {
387 spec->type = EFX_FILTER_MAC_FULL;
388 spec->data[0] = vid;
389 }
390
391 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
392 spec->data[2] = addr[0] << 8 | addr[1];
393 return 0;
394}
395
396
397
398
399
400int efx_filter_set_uc_def(struct efx_filter_spec *spec)
401{
402 EFX_BUG_ON_PARANOID(!(spec->flags &
403 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
404
405 if (spec->type != EFX_FILTER_UNSPEC)
406 return -EINVAL;
407
408 spec->type = EFX_FILTER_UC_DEF;
409 memset(spec->data, 0, sizeof(spec->data));
410 return 0;
411}
412
413
414
415
416
417int efx_filter_set_mc_def(struct efx_filter_spec *spec)
418{
419 EFX_BUG_ON_PARANOID(!(spec->flags &
420 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
421
422 if (spec->type != EFX_FILTER_UNSPEC)
423 return -EINVAL;
424
425 spec->type = EFX_FILTER_MC_DEF;
426 memset(spec->data, 0, sizeof(spec->data));
427 return 0;
428}
429
430static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
431{
432 struct efx_filter_state *state = efx->filter_state;
433 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
434 struct efx_filter_spec *spec = &table->spec[filter_idx];
435 enum efx_filter_flags flags = 0;
436
437
438
439
440 if (efx->n_rx_channels > 1)
441 flags |= EFX_FILTER_FLAG_RX_RSS;
442
443 if (efx->rx_scatter)
444 flags |= EFX_FILTER_FLAG_RX_SCATTER;
445
446 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
447 spec->type = EFX_FILTER_UC_DEF + filter_idx;
448 table->used_bitmap[0] |= 1 << filter_idx;
449}
450
451int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
452 u16 *vid, u8 *addr)
453{
454 switch (spec->type) {
455 case EFX_FILTER_MAC_WILD:
456 *vid = EFX_FILTER_VID_UNSPEC;
457 break;
458 case EFX_FILTER_MAC_FULL:
459 *vid = spec->data[0];
460 break;
461 default:
462 return -EINVAL;
463 }
464
465 addr[0] = spec->data[2] >> 8;
466 addr[1] = spec->data[2];
467 addr[2] = spec->data[1] >> 24;
468 addr[3] = spec->data[1] >> 16;
469 addr[4] = spec->data[1] >> 8;
470 addr[5] = spec->data[1];
471 return 0;
472}
473
474
475static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
476{
477 u32 data3;
478
479 switch (efx_filter_spec_table_id(spec)) {
480 case EFX_FILTER_TABLE_RX_IP: {
481 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
482 spec->type == EFX_FILTER_UDP_WILD);
483 EFX_POPULATE_OWORD_7(
484 *filter,
485 FRF_BZ_RSS_EN,
486 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
487 FRF_BZ_SCATTER_EN,
488 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
489 FRF_BZ_TCP_UDP, is_udp,
490 FRF_BZ_RXQ_ID, spec->dmaq_id,
491 EFX_DWORD_2, spec->data[2],
492 EFX_DWORD_1, spec->data[1],
493 EFX_DWORD_0, spec->data[0]);
494 data3 = is_udp;
495 break;
496 }
497
498 case EFX_FILTER_TABLE_RX_MAC: {
499 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
500 EFX_POPULATE_OWORD_7(
501 *filter,
502 FRF_CZ_RMFT_RSS_EN,
503 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
504 FRF_CZ_RMFT_SCATTER_EN,
505 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
506 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
507 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
508 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
509 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
510 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
511 data3 = is_wild;
512 break;
513 }
514
515 case EFX_FILTER_TABLE_TX_MAC: {
516 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
517 EFX_POPULATE_OWORD_5(*filter,
518 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
519 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
520 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
521 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
522 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
523 data3 = is_wild | spec->dmaq_id << 1;
524 break;
525 }
526
527 default:
528 BUG();
529 }
530
531 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
532}
533
534static bool efx_filter_equal(const struct efx_filter_spec *left,
535 const struct efx_filter_spec *right)
536{
537 if (left->type != right->type ||
538 memcmp(left->data, right->data, sizeof(left->data)))
539 return false;
540
541 if (left->flags & EFX_FILTER_FLAG_TX &&
542 left->dmaq_id != right->dmaq_id)
543 return false;
544
545 return true;
546}
547
548
549
550
551
552
553
554
555
556
557#define EFX_FILTER_MATCH_PRI_COUNT 5
558
559static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
560 [EFX_FILTER_TCP_FULL] = 0,
561 [EFX_FILTER_UDP_FULL] = 0,
562 [EFX_FILTER_TCP_WILD] = 1,
563 [EFX_FILTER_UDP_WILD] = 1,
564 [EFX_FILTER_MAC_FULL] = 2,
565 [EFX_FILTER_MAC_WILD] = 3,
566 [EFX_FILTER_UC_DEF] = 4,
567 [EFX_FILTER_MC_DEF] = 4,
568};
569
570static const enum efx_filter_table_id efx_filter_range_table[] = {
571 EFX_FILTER_TABLE_RX_IP,
572 EFX_FILTER_TABLE_RX_IP,
573 EFX_FILTER_TABLE_RX_MAC,
574 EFX_FILTER_TABLE_RX_MAC,
575 EFX_FILTER_TABLE_RX_DEF,
576 EFX_FILTER_TABLE_COUNT,
577 EFX_FILTER_TABLE_COUNT,
578 EFX_FILTER_TABLE_TX_MAC,
579 EFX_FILTER_TABLE_TX_MAC,
580};
581
582#define EFX_FILTER_INDEX_WIDTH 13
583#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
584
585static inline u32
586efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
587{
588 unsigned int range;
589
590 range = efx_filter_type_match_pri[spec->type];
591 if (!(spec->flags & EFX_FILTER_FLAG_RX))
592 range += EFX_FILTER_MATCH_PRI_COUNT;
593
594 return range << EFX_FILTER_INDEX_WIDTH | index;
595}
596
597static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
598{
599 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
600
601 if (range < ARRAY_SIZE(efx_filter_range_table))
602 return efx_filter_range_table[range];
603 else
604 return EFX_FILTER_TABLE_COUNT;
605}
606
607static inline unsigned int efx_filter_id_index(u32 id)
608{
609 return id & EFX_FILTER_INDEX_MASK;
610}
611
612static inline u8 efx_filter_id_flags(u32 id)
613{
614 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
615
616 if (range < EFX_FILTER_MATCH_PRI_COUNT)
617 return EFX_FILTER_FLAG_RX;
618 else
619 return EFX_FILTER_FLAG_TX;
620}
621
622u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
623{
624 struct efx_filter_state *state = efx->filter_state;
625 unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
626 enum efx_filter_table_id table_id;
627
628 do {
629 table_id = efx_filter_range_table[range];
630 if (state->table[table_id].size != 0)
631 return range << EFX_FILTER_INDEX_WIDTH |
632 state->table[table_id].size;
633 } while (range--);
634
635 return 0;
636}
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
657 bool replace_equal)
658{
659 struct efx_filter_state *state = efx->filter_state;
660 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
661 efx_oword_t filter;
662 int rep_index, ins_index;
663 unsigned int depth = 0;
664 int rc;
665
666 if (!table || table->size == 0)
667 return -EINVAL;
668
669 netif_vdbg(efx, hw, efx->net_dev,
670 "%s: type %d search_depth=%d", __func__, spec->type,
671 table->search_depth[spec->type]);
672
673 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
674
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_UC_DEF;
679 ins_index = rep_index;
680
681 spin_lock_bh(&state->lock);
682 } else {
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698 u32 key = efx_filter_build(&filter, spec);
699 unsigned int hash = efx_filter_hash(key);
700 unsigned int incr = efx_filter_increment(key);
701 unsigned int max_rep_depth = table->search_depth[spec->type];
702 unsigned int max_ins_depth =
703 spec->priority <= EFX_FILTER_PRI_HINT ?
704 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
705 unsigned int i = hash & (table->size - 1);
706
707 ins_index = -1;
708 depth = 1;
709
710 spin_lock_bh(&state->lock);
711
712 for (;;) {
713 if (!test_bit(i, table->used_bitmap)) {
714 if (ins_index < 0)
715 ins_index = i;
716 } else if (efx_filter_equal(spec, &table->spec[i])) {
717
718 if (ins_index < 0)
719 ins_index = i;
720 rep_index = i;
721 break;
722 }
723
724 if (depth >= max_rep_depth &&
725 (ins_index >= 0 || depth >= max_ins_depth)) {
726
727 if (ins_index < 0) {
728 rc = -EBUSY;
729 goto out;
730 }
731 rep_index = -1;
732 break;
733 }
734
735 i = (i + incr) & (table->size - 1);
736 ++depth;
737 }
738 }
739
740
741
742
743 if (rep_index >= 0) {
744 struct efx_filter_spec *saved_spec = &table->spec[rep_index];
745
746 if (spec->priority == saved_spec->priority && !replace_equal) {
747 rc = -EEXIST;
748 goto out;
749 }
750 if (spec->priority < saved_spec->priority) {
751 rc = -EPERM;
752 goto out;
753 }
754 }
755
756
757 if (ins_index != rep_index) {
758 __set_bit(ins_index, table->used_bitmap);
759 ++table->used;
760 }
761 table->spec[ins_index] = *spec;
762
763 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
764 efx_filter_push_rx_config(efx);
765 } else {
766 if (table->search_depth[spec->type] < depth) {
767 table->search_depth[spec->type] = depth;
768 if (spec->flags & EFX_FILTER_FLAG_TX)
769 efx_filter_push_tx_limits(efx);
770 else
771 efx_filter_push_rx_config(efx);
772 }
773
774 efx_writeo(efx, &filter,
775 table->offset + table->step * ins_index);
776
777
778
779
780 if (ins_index != rep_index && rep_index >= 0)
781 efx_filter_table_clear_entry(efx, table, rep_index);
782 }
783
784 netif_vdbg(efx, hw, efx->net_dev,
785 "%s: filter type %d index %d rxq %u set",
786 __func__, spec->type, ins_index, spec->dmaq_id);
787 rc = efx_filter_make_id(spec, ins_index);
788
789out:
790 spin_unlock_bh(&state->lock);
791 return rc;
792}
793
794static void efx_filter_table_clear_entry(struct efx_nic *efx,
795 struct efx_filter_table *table,
796 unsigned int filter_idx)
797{
798 static efx_oword_t filter;
799
800 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
801
802 efx_filter_reset_rx_def(efx, filter_idx);
803 efx_filter_push_rx_config(efx);
804 } else if (test_bit(filter_idx, table->used_bitmap)) {
805 __clear_bit(filter_idx, table->used_bitmap);
806 --table->used;
807 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
808
809 efx_writeo(efx, &filter,
810 table->offset + table->step * filter_idx);
811 }
812}
813
814
815
816
817
818
819
820
821
822
823int efx_filter_remove_id_safe(struct efx_nic *efx,
824 enum efx_filter_priority priority,
825 u32 filter_id)
826{
827 struct efx_filter_state *state = efx->filter_state;
828 enum efx_filter_table_id table_id;
829 struct efx_filter_table *table;
830 unsigned int filter_idx;
831 struct efx_filter_spec *spec;
832 u8 filter_flags;
833 int rc;
834
835 table_id = efx_filter_id_table_id(filter_id);
836 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
837 return -ENOENT;
838 table = &state->table[table_id];
839
840 filter_idx = efx_filter_id_index(filter_id);
841 if (filter_idx >= table->size)
842 return -ENOENT;
843 spec = &table->spec[filter_idx];
844
845 filter_flags = efx_filter_id_flags(filter_id);
846
847 spin_lock_bh(&state->lock);
848
849 if (test_bit(filter_idx, table->used_bitmap) &&
850 spec->priority == priority) {
851 efx_filter_table_clear_entry(efx, table, filter_idx);
852 if (table->used == 0)
853 efx_filter_table_reset_search_depth(table);
854 rc = 0;
855 } else {
856 rc = -ENOENT;
857 }
858
859 spin_unlock_bh(&state->lock);
860
861 return rc;
862}
863
864
865
866
867
868
869
870
871
872
873
874int efx_filter_get_filter_safe(struct efx_nic *efx,
875 enum efx_filter_priority priority,
876 u32 filter_id, struct efx_filter_spec *spec_buf)
877{
878 struct efx_filter_state *state = efx->filter_state;
879 enum efx_filter_table_id table_id;
880 struct efx_filter_table *table;
881 struct efx_filter_spec *spec;
882 unsigned int filter_idx;
883 u8 filter_flags;
884 int rc;
885
886 table_id = efx_filter_id_table_id(filter_id);
887 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
888 return -ENOENT;
889 table = &state->table[table_id];
890
891 filter_idx = efx_filter_id_index(filter_id);
892 if (filter_idx >= table->size)
893 return -ENOENT;
894 spec = &table->spec[filter_idx];
895
896 filter_flags = efx_filter_id_flags(filter_id);
897
898 spin_lock_bh(&state->lock);
899
900 if (test_bit(filter_idx, table->used_bitmap) &&
901 spec->priority == priority) {
902 *spec_buf = *spec;
903 rc = 0;
904 } else {
905 rc = -ENOENT;
906 }
907
908 spin_unlock_bh(&state->lock);
909
910 return rc;
911}
912
913static void efx_filter_table_clear(struct efx_nic *efx,
914 enum efx_filter_table_id table_id,
915 enum efx_filter_priority priority)
916{
917 struct efx_filter_state *state = efx->filter_state;
918 struct efx_filter_table *table = &state->table[table_id];
919 unsigned int filter_idx;
920
921 spin_lock_bh(&state->lock);
922
923 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
924 if (table->spec[filter_idx].priority <= priority)
925 efx_filter_table_clear_entry(efx, table, filter_idx);
926 if (table->used == 0)
927 efx_filter_table_reset_search_depth(table);
928
929 spin_unlock_bh(&state->lock);
930}
931
932
933
934
935
936
937void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
938{
939 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
940 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
941}
942
943u32 efx_filter_count_rx_used(struct efx_nic *efx,
944 enum efx_filter_priority priority)
945{
946 struct efx_filter_state *state = efx->filter_state;
947 enum efx_filter_table_id table_id;
948 struct efx_filter_table *table;
949 unsigned int filter_idx;
950 u32 count = 0;
951
952 spin_lock_bh(&state->lock);
953
954 for (table_id = EFX_FILTER_TABLE_RX_IP;
955 table_id <= EFX_FILTER_TABLE_RX_DEF;
956 table_id++) {
957 table = &state->table[table_id];
958 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
959 if (test_bit(filter_idx, table->used_bitmap) &&
960 table->spec[filter_idx].priority == priority)
961 ++count;
962 }
963 }
964
965 spin_unlock_bh(&state->lock);
966
967 return count;
968}
969
970s32 efx_filter_get_rx_ids(struct efx_nic *efx,
971 enum efx_filter_priority priority,
972 u32 *buf, u32 size)
973{
974 struct efx_filter_state *state = efx->filter_state;
975 enum efx_filter_table_id table_id;
976 struct efx_filter_table *table;
977 unsigned int filter_idx;
978 s32 count = 0;
979
980 spin_lock_bh(&state->lock);
981
982 for (table_id = EFX_FILTER_TABLE_RX_IP;
983 table_id <= EFX_FILTER_TABLE_RX_DEF;
984 table_id++) {
985 table = &state->table[table_id];
986 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
987 if (test_bit(filter_idx, table->used_bitmap) &&
988 table->spec[filter_idx].priority == priority) {
989 if (count == size) {
990 count = -EMSGSIZE;
991 goto out;
992 }
993 buf[count++] = efx_filter_make_id(
994 &table->spec[filter_idx], filter_idx);
995 }
996 }
997 }
998out:
999 spin_unlock_bh(&state->lock);
1000
1001 return count;
1002}
1003
1004
1005void efx_restore_filters(struct efx_nic *efx)
1006{
1007 struct efx_filter_state *state = efx->filter_state;
1008 enum efx_filter_table_id table_id;
1009 struct efx_filter_table *table;
1010 efx_oword_t filter;
1011 unsigned int filter_idx;
1012
1013 spin_lock_bh(&state->lock);
1014
1015 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1016 table = &state->table[table_id];
1017
1018
1019 if (table->step == 0)
1020 continue;
1021
1022 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1023 if (!test_bit(filter_idx, table->used_bitmap))
1024 continue;
1025 efx_filter_build(&filter, &table->spec[filter_idx]);
1026 efx_writeo(efx, &filter,
1027 table->offset + table->step * filter_idx);
1028 }
1029 }
1030
1031 efx_filter_push_rx_config(efx);
1032 efx_filter_push_tx_limits(efx);
1033
1034 spin_unlock_bh(&state->lock);
1035}
1036
1037int efx_probe_filters(struct efx_nic *efx)
1038{
1039 struct efx_filter_state *state;
1040 struct efx_filter_table *table;
1041 unsigned table_id;
1042
1043 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
1044 if (!state)
1045 return -ENOMEM;
1046 efx->filter_state = state;
1047
1048 spin_lock_init(&state->lock);
1049
1050 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1051#ifdef CONFIG_RFS_ACCEL
1052 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
1053 sizeof(*state->rps_flow_id),
1054 GFP_KERNEL);
1055 if (!state->rps_flow_id)
1056 goto fail;
1057#endif
1058 table = &state->table[EFX_FILTER_TABLE_RX_IP];
1059 table->id = EFX_FILTER_TABLE_RX_IP;
1060 table->offset = FR_BZ_RX_FILTER_TBL0;
1061 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
1062 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1063 }
1064
1065 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1066 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
1067 table->id = EFX_FILTER_TABLE_RX_MAC;
1068 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1069 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1070 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1071
1072 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1073 table->id = EFX_FILTER_TABLE_RX_DEF;
1074 table->size = EFX_FILTER_SIZE_RX_DEF;
1075
1076 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1077 table->id = EFX_FILTER_TABLE_TX_MAC;
1078 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1079 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1080 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1081 }
1082
1083 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1084 table = &state->table[table_id];
1085 if (table->size == 0)
1086 continue;
1087 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1088 sizeof(unsigned long),
1089 GFP_KERNEL);
1090 if (!table->used_bitmap)
1091 goto fail;
1092 table->spec = vzalloc(table->size * sizeof(*table->spec));
1093 if (!table->spec)
1094 goto fail;
1095 }
1096
1097 if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1098
1099 unsigned i;
1100 for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1101 efx_filter_reset_rx_def(efx, i);
1102 }
1103
1104 efx_filter_push_rx_config(efx);
1105
1106 return 0;
1107
1108fail:
1109 efx_remove_filters(efx);
1110 return -ENOMEM;
1111}
1112
1113void efx_remove_filters(struct efx_nic *efx)
1114{
1115 struct efx_filter_state *state = efx->filter_state;
1116 enum efx_filter_table_id table_id;
1117
1118 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1119 kfree(state->table[table_id].used_bitmap);
1120 vfree(state->table[table_id].spec);
1121 }
1122#ifdef CONFIG_RFS_ACCEL
1123 kfree(state->rps_flow_id);
1124#endif
1125 kfree(state);
1126}
1127
1128
1129void efx_filter_update_rx_scatter(struct efx_nic *efx)
1130{
1131 struct efx_filter_state *state = efx->filter_state;
1132 enum efx_filter_table_id table_id;
1133 struct efx_filter_table *table;
1134 efx_oword_t filter;
1135 unsigned int filter_idx;
1136
1137 spin_lock_bh(&state->lock);
1138
1139 for (table_id = EFX_FILTER_TABLE_RX_IP;
1140 table_id <= EFX_FILTER_TABLE_RX_DEF;
1141 table_id++) {
1142 table = &state->table[table_id];
1143
1144 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1145 if (!test_bit(filter_idx, table->used_bitmap) ||
1146 table->spec[filter_idx].dmaq_id >=
1147 efx->n_rx_channels)
1148 continue;
1149
1150 if (efx->rx_scatter)
1151 table->spec[filter_idx].flags |=
1152 EFX_FILTER_FLAG_RX_SCATTER;
1153 else
1154 table->spec[filter_idx].flags &=
1155 ~EFX_FILTER_FLAG_RX_SCATTER;
1156
1157 if (table_id == EFX_FILTER_TABLE_RX_DEF)
1158
1159 continue;
1160
1161 efx_filter_build(&filter, &table->spec[filter_idx]);
1162 efx_writeo(efx, &filter,
1163 table->offset + table->step * filter_idx);
1164 }
1165 }
1166
1167 efx_filter_push_rx_config(efx);
1168
1169 spin_unlock_bh(&state->lock);
1170}
1171
1172#ifdef CONFIG_RFS_ACCEL
1173
1174int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1175 u16 rxq_index, u32 flow_id)
1176{
1177 struct efx_nic *efx = netdev_priv(net_dev);
1178 struct efx_channel *channel;
1179 struct efx_filter_state *state = efx->filter_state;
1180 struct efx_filter_spec spec;
1181 const struct iphdr *ip;
1182 const __be16 *ports;
1183 int nhoff;
1184 int rc;
1185
1186 nhoff = skb_network_offset(skb);
1187
1188 if (skb->protocol == htons(ETH_P_8021Q)) {
1189 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
1190 nhoff + sizeof(struct vlan_hdr));
1191 if (((const struct vlan_hdr *)skb->data + nhoff)->
1192 h_vlan_encapsulated_proto != htons(ETH_P_IP))
1193 return -EPROTONOSUPPORT;
1194
1195
1196
1197
1198
1199 nhoff += sizeof(struct vlan_hdr);
1200 } else if (skb->protocol != htons(ETH_P_IP)) {
1201 return -EPROTONOSUPPORT;
1202 }
1203
1204
1205 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1206 ip = (const struct iphdr *)(skb->data + nhoff);
1207 if (ip_is_fragment(ip))
1208 return -EPROTONOSUPPORT;
1209 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1210 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1211
1212 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
1213 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1214 rxq_index);
1215 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1216 ip->daddr, ports[1], ip->saddr, ports[0]);
1217 if (rc)
1218 return rc;
1219
1220 rc = efx_filter_insert_filter(efx, &spec, true);
1221 if (rc < 0)
1222 return rc;
1223
1224
1225 state->rps_flow_id[rc] = flow_id;
1226 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1227 ++channel->rfs_filters_added;
1228
1229 netif_info(efx, rx_status, efx->net_dev,
1230 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1231 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1232 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1233 rxq_index, flow_id, rc);
1234
1235 return rc;
1236}
1237
1238bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1239{
1240 struct efx_filter_state *state = efx->filter_state;
1241 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
1242 unsigned mask = table->size - 1;
1243 unsigned index;
1244 unsigned stop;
1245
1246 if (!spin_trylock_bh(&state->lock))
1247 return false;
1248
1249 index = state->rps_expire_index;
1250 stop = (index + quota) & mask;
1251
1252 while (index != stop) {
1253 if (test_bit(index, table->used_bitmap) &&
1254 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1255 rps_may_expire_flow(efx->net_dev,
1256 table->spec[index].dmaq_id,
1257 state->rps_flow_id[index], index)) {
1258 netif_info(efx, rx_status, efx->net_dev,
1259 "expiring filter %d [flow %u]\n",
1260 index, state->rps_flow_id[index]);
1261 efx_filter_table_clear_entry(efx, table, index);
1262 }
1263 index = (index + 1) & mask;
1264 }
1265
1266 state->rps_expire_index = stop;
1267 if (table->used == 0)
1268 efx_filter_table_reset_search_depth(table);
1269
1270 spin_unlock_bh(&state->lock);
1271 return true;
1272}
1273
1274#endif
1275