1
2
3
4
5
6
7
8
9
10
11#include <linux/socket.h>
12#include <linux/in.h>
13#include <linux/slab.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/tcp.h>
17#include <linux/udp.h>
18#include <linux/prefetch.h>
19#include <linux/moduleparam.h>
20#include <linux/iommu.h>
21#include <net/ip.h>
22#include <net/checksum.h>
23#include "net_driver.h"
24#include "efx.h"
25#include "filter.h"
26#include "nic.h"
27#include "selftest.h"
28#include "workarounds.h"
29
30
31#define EFX_RX_PREFERRED_BATCH 8U
32
33
34
35
36
37#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
38#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
39
40
41#define EFX_SKB_HEADERS 128u
42
43
44
45
46static unsigned int rx_refill_threshold;
47
48
49#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
50 EFX_RX_USR_BUF_SIZE)
51
52
53
54
55
56
57
58#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
59
60static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
61{
62 return page_address(buf->page) + buf->page_offset;
63}
64
65static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
66{
67#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
69#else
70 const u8 *data = eh + efx->rx_packet_hash_offset;
71 return (u32)data[0] |
72 (u32)data[1] << 8 |
73 (u32)data[2] << 16 |
74 (u32)data[3] << 24;
75#endif
76}
77
78static inline struct efx_rx_buffer *
79efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
80{
81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
82 return efx_rx_buffer(rx_queue, 0);
83 else
84 return rx_buf + 1;
85}
86
87static inline void efx_sync_rx_buffer(struct efx_nic *efx,
88 struct efx_rx_buffer *rx_buf,
89 unsigned int len)
90{
91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
92 DMA_FROM_DEVICE);
93}
94
95void efx_rx_config_page_split(struct efx_nic *efx)
96{
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98 EFX_RX_BUF_ALIGNMENT);
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
101 efx->rx_page_buf_step);
102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
103 efx->rx_bufs_per_page;
104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
105 efx->rx_bufs_per_page);
106}
107
108
109static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
110{
111 struct efx_nic *efx = rx_queue->efx;
112 struct page *page;
113 struct efx_rx_page_state *state;
114 unsigned index;
115
116 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
117 page = rx_queue->page_ring[index];
118 if (page == NULL)
119 return NULL;
120
121 rx_queue->page_ring[index] = NULL;
122
123 if (rx_queue->page_remove != rx_queue->page_add)
124 ++rx_queue->page_remove;
125
126
127 if (page_count(page) == 1) {
128 ++rx_queue->page_recycle_count;
129 return page;
130 } else {
131 state = page_address(page);
132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
133 PAGE_SIZE << efx->rx_buffer_order,
134 DMA_FROM_DEVICE);
135 put_page(page);
136 ++rx_queue->page_recycle_failed;
137 }
138
139 return NULL;
140}
141
142
143
144
145
146
147
148
149
150
151
152static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
153{
154 struct efx_nic *efx = rx_queue->efx;
155 struct efx_rx_buffer *rx_buf;
156 struct page *page;
157 unsigned int page_offset;
158 struct efx_rx_page_state *state;
159 dma_addr_t dma_addr;
160 unsigned index, count;
161
162 count = 0;
163 do {
164 page = efx_reuse_page(rx_queue);
165 if (page == NULL) {
166 page = alloc_pages(__GFP_COLD | __GFP_COMP |
167 (atomic ? GFP_ATOMIC : GFP_KERNEL),
168 efx->rx_buffer_order);
169 if (unlikely(page == NULL))
170 return -ENOMEM;
171 dma_addr =
172 dma_map_page(&efx->pci_dev->dev, page, 0,
173 PAGE_SIZE << efx->rx_buffer_order,
174 DMA_FROM_DEVICE);
175 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
176 dma_addr))) {
177 __free_pages(page, efx->rx_buffer_order);
178 return -EIO;
179 }
180 state = page_address(page);
181 state->dma_addr = dma_addr;
182 } else {
183 state = page_address(page);
184 dma_addr = state->dma_addr;
185 }
186
187 dma_addr += sizeof(struct efx_rx_page_state);
188 page_offset = sizeof(struct efx_rx_page_state);
189
190 do {
191 index = rx_queue->added_count & rx_queue->ptr_mask;
192 rx_buf = efx_rx_buffer(rx_queue, index);
193 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
194 rx_buf->page = page;
195 rx_buf->page_offset = page_offset + efx->rx_ip_align;
196 rx_buf->len = efx->rx_dma_len;
197 rx_buf->flags = 0;
198 ++rx_queue->added_count;
199 get_page(page);
200 dma_addr += efx->rx_page_buf_step;
201 page_offset += efx->rx_page_buf_step;
202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
203
204 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
205 } while (++count < efx->rx_pages_per_batch);
206
207 return 0;
208}
209
210
211
212
213static void efx_unmap_rx_buffer(struct efx_nic *efx,
214 struct efx_rx_buffer *rx_buf)
215{
216 struct page *page = rx_buf->page;
217
218 if (page) {
219 struct efx_rx_page_state *state = page_address(page);
220 dma_unmap_page(&efx->pci_dev->dev,
221 state->dma_addr,
222 PAGE_SIZE << efx->rx_buffer_order,
223 DMA_FROM_DEVICE);
224 }
225}
226
227static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
228 struct efx_rx_buffer *rx_buf,
229 unsigned int num_bufs)
230{
231 do {
232 if (rx_buf->page) {
233 put_page(rx_buf->page);
234 rx_buf->page = NULL;
235 }
236 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
237 } while (--num_bufs);
238}
239
240
241
242
243
244static void efx_recycle_rx_page(struct efx_channel *channel,
245 struct efx_rx_buffer *rx_buf)
246{
247 struct page *page = rx_buf->page;
248 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
249 struct efx_nic *efx = rx_queue->efx;
250 unsigned index;
251
252
253 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
254 return;
255
256 index = rx_queue->page_add & rx_queue->page_ptr_mask;
257 if (rx_queue->page_ring[index] == NULL) {
258 unsigned read_index = rx_queue->page_remove &
259 rx_queue->page_ptr_mask;
260
261
262
263
264
265 if (read_index == index)
266 ++rx_queue->page_remove;
267 rx_queue->page_ring[index] = page;
268 ++rx_queue->page_add;
269 return;
270 }
271 ++rx_queue->page_recycle_full;
272 efx_unmap_rx_buffer(efx, rx_buf);
273 put_page(rx_buf->page);
274}
275
276static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
277 struct efx_rx_buffer *rx_buf)
278{
279
280 if (rx_buf->page)
281 put_page(rx_buf->page);
282
283
284 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
285 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
286 efx_free_rx_buffers(rx_queue, rx_buf, 1);
287 }
288 rx_buf->page = NULL;
289}
290
291
292static void efx_recycle_rx_pages(struct efx_channel *channel,
293 struct efx_rx_buffer *rx_buf,
294 unsigned int n_frags)
295{
296 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
297
298 do {
299 efx_recycle_rx_page(channel, rx_buf);
300 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
301 } while (--n_frags);
302}
303
304static void efx_discard_rx_packet(struct efx_channel *channel,
305 struct efx_rx_buffer *rx_buf,
306 unsigned int n_frags)
307{
308 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
309
310 efx_recycle_rx_pages(channel, rx_buf, n_frags);
311
312 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
328{
329 struct efx_nic *efx = rx_queue->efx;
330 unsigned int fill_level, batch_size;
331 int space, rc = 0;
332
333 if (!rx_queue->refill_enabled)
334 return;
335
336
337 fill_level = (rx_queue->added_count - rx_queue->removed_count);
338 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
339 if (fill_level >= rx_queue->fast_fill_trigger)
340 goto out;
341
342
343 if (unlikely(fill_level < rx_queue->min_fill)) {
344 if (fill_level)
345 rx_queue->min_fill = fill_level;
346 }
347
348 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
349 space = rx_queue->max_fill - fill_level;
350 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
351
352 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
353 "RX queue %d fast-filling descriptor ring from"
354 " level %d to level %d\n",
355 efx_rx_queue_index(rx_queue), fill_level,
356 rx_queue->max_fill);
357
358
359 do {
360 rc = efx_init_rx_buffers(rx_queue, atomic);
361 if (unlikely(rc)) {
362
363 if (rx_queue->added_count == rx_queue->removed_count)
364 efx_schedule_slow_fill(rx_queue);
365 goto out;
366 }
367 } while ((space -= batch_size) >= batch_size);
368
369 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
370 "RX queue %d fast-filled descriptor ring "
371 "to level %d\n", efx_rx_queue_index(rx_queue),
372 rx_queue->added_count - rx_queue->removed_count);
373
374 out:
375 if (rx_queue->notified_count != rx_queue->added_count)
376 efx_nic_notify_rx_desc(rx_queue);
377}
378
379void efx_rx_slow_fill(unsigned long context)
380{
381 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
382
383
384 efx_nic_generate_fill_event(rx_queue);
385 ++rx_queue->slow_fill_count;
386}
387
388static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
389 struct efx_rx_buffer *rx_buf,
390 int len)
391{
392 struct efx_nic *efx = rx_queue->efx;
393 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
394
395 if (likely(len <= max_len))
396 return;
397
398
399
400
401 rx_buf->flags |= EFX_RX_PKT_DISCARD;
402
403 if (net_ratelimit())
404 netif_err(efx, rx_err, efx->net_dev,
405 "RX queue %d overlength RX event (%#x > %#x)\n",
406 efx_rx_queue_index(rx_queue), len, max_len);
407
408 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
409}
410
411
412
413
414static void
415efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
416 unsigned int n_frags, u8 *eh)
417{
418 struct napi_struct *napi = &channel->napi_str;
419 gro_result_t gro_result;
420 struct efx_nic *efx = channel->efx;
421 struct sk_buff *skb;
422
423 skb = napi_get_frags(napi);
424 if (unlikely(!skb)) {
425 struct efx_rx_queue *rx_queue;
426
427 rx_queue = efx_channel_get_rx_queue(channel);
428 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
429 return;
430 }
431
432 if (efx->net_dev->features & NETIF_F_RXHASH)
433 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
434 PKT_HASH_TYPE_L3);
435 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
436 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
437 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
438
439 for (;;) {
440 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
441 rx_buf->page, rx_buf->page_offset,
442 rx_buf->len);
443 rx_buf->page = NULL;
444 skb->len += rx_buf->len;
445 if (skb_shinfo(skb)->nr_frags == n_frags)
446 break;
447
448 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
449 }
450
451 skb->data_len = skb->len;
452 skb->truesize += n_frags * efx->rx_buffer_truesize;
453
454 skb_record_rx_queue(skb, channel->rx_queue.core_index);
455
456 gro_result = napi_gro_frags(napi);
457 if (gro_result != GRO_DROP)
458 channel->irq_mod_score += 2;
459}
460
461
462static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
463 struct efx_rx_buffer *rx_buf,
464 unsigned int n_frags,
465 u8 *eh, int hdr_len)
466{
467 struct efx_nic *efx = channel->efx;
468 struct sk_buff *skb;
469
470
471 skb = netdev_alloc_skb(efx->net_dev,
472 efx->rx_ip_align + efx->rx_prefix_size +
473 hdr_len);
474 if (unlikely(skb == NULL)) {
475 atomic_inc(&efx->n_rx_noskb_drops);
476 return NULL;
477 }
478
479 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
480
481 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
482 efx->rx_prefix_size + hdr_len);
483 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
484 __skb_put(skb, hdr_len);
485
486
487 if (rx_buf->len > hdr_len) {
488 rx_buf->page_offset += hdr_len;
489 rx_buf->len -= hdr_len;
490
491 for (;;) {
492 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
493 rx_buf->page, rx_buf->page_offset,
494 rx_buf->len);
495 rx_buf->page = NULL;
496 skb->len += rx_buf->len;
497 skb->data_len += rx_buf->len;
498 if (skb_shinfo(skb)->nr_frags == n_frags)
499 break;
500
501 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
502 }
503 } else {
504 __free_pages(rx_buf->page, efx->rx_buffer_order);
505 rx_buf->page = NULL;
506 n_frags = 0;
507 }
508
509 skb->truesize += n_frags * efx->rx_buffer_truesize;
510
511
512 skb->protocol = eth_type_trans(skb, efx->net_dev);
513
514 skb_mark_napi_id(skb, &channel->napi_str);
515
516 return skb;
517}
518
519void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
520 unsigned int n_frags, unsigned int len, u16 flags)
521{
522 struct efx_nic *efx = rx_queue->efx;
523 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
524 struct efx_rx_buffer *rx_buf;
525
526 rx_queue->rx_packets++;
527
528 rx_buf = efx_rx_buffer(rx_queue, index);
529 rx_buf->flags |= flags;
530
531
532 if (n_frags == 1) {
533 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
534 efx_rx_packet__check_len(rx_queue, rx_buf, len);
535 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
536 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
537 unlikely(len > n_frags * efx->rx_dma_len) ||
538 unlikely(!efx->rx_scatter)) {
539
540
541
542 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
543 rx_buf->flags |= EFX_RX_PKT_DISCARD;
544 }
545
546 netif_vdbg(efx, rx_status, efx->net_dev,
547 "RX queue %d received ids %x-%x len %d %s%s\n",
548 efx_rx_queue_index(rx_queue), index,
549 (index + n_frags - 1) & rx_queue->ptr_mask, len,
550 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
551 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
552
553
554
555
556 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
557 efx_rx_flush_packet(channel);
558 efx_discard_rx_packet(channel, rx_buf, n_frags);
559 return;
560 }
561
562 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
563 rx_buf->len = len;
564
565
566
567
568 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
569
570
571
572
573 prefetch(efx_rx_buf_va(rx_buf));
574
575 rx_buf->page_offset += efx->rx_prefix_size;
576 rx_buf->len -= efx->rx_prefix_size;
577
578 if (n_frags > 1) {
579
580
581
582 unsigned int tail_frags = n_frags - 1;
583
584 for (;;) {
585 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
586 if (--tail_frags == 0)
587 break;
588 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
589 }
590 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
591 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
592 }
593
594
595 rx_buf = efx_rx_buffer(rx_queue, index);
596 efx_recycle_rx_pages(channel, rx_buf, n_frags);
597
598
599
600
601 efx_rx_flush_packet(channel);
602 channel->rx_pkt_n_frags = n_frags;
603 channel->rx_pkt_index = index;
604}
605
606static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
607 struct efx_rx_buffer *rx_buf,
608 unsigned int n_frags)
609{
610 struct sk_buff *skb;
611 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
612
613 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
614 if (unlikely(skb == NULL)) {
615 struct efx_rx_queue *rx_queue;
616
617 rx_queue = efx_channel_get_rx_queue(channel);
618 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
619 return;
620 }
621 skb_record_rx_queue(skb, channel->rx_queue.core_index);
622
623
624 skb_checksum_none_assert(skb);
625 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
626 skb->ip_summed = CHECKSUM_UNNECESSARY;
627 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
628 }
629
630 efx_rx_skb_attach_timestamp(channel, skb);
631
632 if (channel->type->receive_skb)
633 if (channel->type->receive_skb(channel, skb))
634 return;
635
636
637 netif_receive_skb(skb);
638}
639
640
641void __efx_rx_packet(struct efx_channel *channel)
642{
643 struct efx_nic *efx = channel->efx;
644 struct efx_rx_buffer *rx_buf =
645 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
646 u8 *eh = efx_rx_buf_va(rx_buf);
647
648
649
650
651 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
652 rx_buf->len = le16_to_cpup((__le16 *)
653 (eh + efx->rx_packet_len_offset));
654
655
656
657
658 if (unlikely(efx->loopback_selftest)) {
659 struct efx_rx_queue *rx_queue;
660
661 efx_loopback_rx_packet(efx, eh, rx_buf->len);
662 rx_queue = efx_channel_get_rx_queue(channel);
663 efx_free_rx_buffers(rx_queue, rx_buf,
664 channel->rx_pkt_n_frags);
665 goto out;
666 }
667
668 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
669 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
670
671 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
672 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
673 else
674 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
675out:
676 channel->rx_pkt_n_frags = 0;
677}
678
679int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
680{
681 struct efx_nic *efx = rx_queue->efx;
682 unsigned int entries;
683 int rc;
684
685
686 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
687 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
688 rx_queue->ptr_mask = entries - 1;
689
690 netif_dbg(efx, probe, efx->net_dev,
691 "creating RX queue %d size %#x mask %#x\n",
692 efx_rx_queue_index(rx_queue), efx->rxq_entries,
693 rx_queue->ptr_mask);
694
695
696 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
697 GFP_KERNEL);
698 if (!rx_queue->buffer)
699 return -ENOMEM;
700
701 rc = efx_nic_probe_rx(rx_queue);
702 if (rc) {
703 kfree(rx_queue->buffer);
704 rx_queue->buffer = NULL;
705 }
706
707 return rc;
708}
709
710static void efx_init_rx_recycle_ring(struct efx_nic *efx,
711 struct efx_rx_queue *rx_queue)
712{
713 unsigned int bufs_in_recycle_ring, page_ring_size;
714
715
716#ifdef CONFIG_PPC64
717 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
718#else
719 if (iommu_present(&pci_bus_type))
720 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
721 else
722 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
723#endif
724
725 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
726 efx->rx_bufs_per_page);
727 rx_queue->page_ring = kcalloc(page_ring_size,
728 sizeof(*rx_queue->page_ring), GFP_KERNEL);
729 rx_queue->page_ptr_mask = page_ring_size - 1;
730}
731
732void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
733{
734 struct efx_nic *efx = rx_queue->efx;
735 unsigned int max_fill, trigger, max_trigger;
736
737 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
738 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
739
740
741 rx_queue->added_count = 0;
742 rx_queue->notified_count = 0;
743 rx_queue->removed_count = 0;
744 rx_queue->min_fill = -1U;
745 efx_init_rx_recycle_ring(efx, rx_queue);
746
747 rx_queue->page_remove = 0;
748 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
749 rx_queue->page_recycle_count = 0;
750 rx_queue->page_recycle_failed = 0;
751 rx_queue->page_recycle_full = 0;
752
753
754 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
755 max_trigger =
756 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
757 if (rx_refill_threshold != 0) {
758 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
759 if (trigger > max_trigger)
760 trigger = max_trigger;
761 } else {
762 trigger = max_trigger;
763 }
764
765 rx_queue->max_fill = max_fill;
766 rx_queue->fast_fill_trigger = trigger;
767 rx_queue->refill_enabled = true;
768
769
770 efx_nic_init_rx(rx_queue);
771}
772
773void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
774{
775 int i;
776 struct efx_nic *efx = rx_queue->efx;
777 struct efx_rx_buffer *rx_buf;
778
779 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
780 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
781
782 del_timer_sync(&rx_queue->slow_fill);
783
784
785 if (rx_queue->buffer) {
786 for (i = rx_queue->removed_count; i < rx_queue->added_count;
787 i++) {
788 unsigned index = i & rx_queue->ptr_mask;
789 rx_buf = efx_rx_buffer(rx_queue, index);
790 efx_fini_rx_buffer(rx_queue, rx_buf);
791 }
792 }
793
794
795 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
796 struct page *page = rx_queue->page_ring[i];
797 struct efx_rx_page_state *state;
798
799 if (page == NULL)
800 continue;
801
802 state = page_address(page);
803 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
804 PAGE_SIZE << efx->rx_buffer_order,
805 DMA_FROM_DEVICE);
806 put_page(page);
807 }
808 kfree(rx_queue->page_ring);
809 rx_queue->page_ring = NULL;
810}
811
812void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
813{
814 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
815 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
816
817 efx_nic_remove_rx(rx_queue);
818
819 kfree(rx_queue->buffer);
820 rx_queue->buffer = NULL;
821}
822
823
824module_param(rx_refill_threshold, uint, 0444);
825MODULE_PARM_DESC(rx_refill_threshold,
826 "RX descriptor ring refill threshold (%)");
827
828#ifdef CONFIG_RFS_ACCEL
829
830int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
831 u16 rxq_index, u32 flow_id)
832{
833 struct efx_nic *efx = netdev_priv(net_dev);
834 struct efx_channel *channel;
835 struct efx_filter_spec spec;
836 struct flow_keys fk;
837 int rc;
838
839 if (flow_id == RPS_FLOW_ID_INVALID)
840 return -EINVAL;
841
842 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
843 return -EPROTONOSUPPORT;
844
845 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
846 return -EPROTONOSUPPORT;
847 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
848 return -EPROTONOSUPPORT;
849
850 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
851 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
852 rxq_index);
853 spec.match_flags =
854 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
855 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
856 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
857 spec.ether_type = fk.basic.n_proto;
858 spec.ip_proto = fk.basic.ip_proto;
859
860 if (fk.basic.n_proto == htons(ETH_P_IP)) {
861 spec.rem_host[0] = fk.addrs.v4addrs.src;
862 spec.loc_host[0] = fk.addrs.v4addrs.dst;
863 } else {
864 memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
865 memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
866 }
867
868 spec.rem_port = fk.ports.src;
869 spec.loc_port = fk.ports.dst;
870
871 rc = efx->type->filter_rfs_insert(efx, &spec);
872 if (rc < 0)
873 return rc;
874
875
876 channel = efx_get_channel(efx, rxq_index);
877 channel->rps_flow_id[rc] = flow_id;
878 ++channel->rfs_filters_added;
879
880 if (spec.ether_type == htons(ETH_P_IP))
881 netif_info(efx, rx_status, efx->net_dev,
882 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
883 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
884 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
885 ntohs(spec.loc_port), rxq_index, flow_id, rc);
886 else
887 netif_info(efx, rx_status, efx->net_dev,
888 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
889 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
890 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
891 ntohs(spec.loc_port), rxq_index, flow_id, rc);
892
893 return rc;
894}
895
896bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
897{
898 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
899 unsigned int channel_idx, index, size;
900 u32 flow_id;
901
902 if (!spin_trylock_bh(&efx->filter_lock))
903 return false;
904
905 expire_one = efx->type->filter_rfs_expire_one;
906 channel_idx = efx->rps_expire_channel;
907 index = efx->rps_expire_index;
908 size = efx->type->max_rx_ip_filters;
909 while (quota--) {
910 struct efx_channel *channel = efx_get_channel(efx, channel_idx);
911 flow_id = channel->rps_flow_id[index];
912
913 if (flow_id != RPS_FLOW_ID_INVALID &&
914 expire_one(efx, flow_id, index)) {
915 netif_info(efx, rx_status, efx->net_dev,
916 "expired filter %d [queue %u flow %u]\n",
917 index, channel_idx, flow_id);
918 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
919 }
920 if (++index == size) {
921 if (++channel_idx == efx->n_channels)
922 channel_idx = 0;
923 index = 0;
924 }
925 }
926 efx->rps_expire_channel = channel_idx;
927 efx->rps_expire_index = index;
928
929 spin_unlock_bh(&efx->filter_lock);
930 return true;
931}
932
933#endif
934
935
936
937
938
939
940
941
942
943
944bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
945{
946 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
947 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
948 return false;
949
950 if (spec->match_flags &
951 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
952 is_multicast_ether_addr(spec->loc_mac))
953 return true;
954
955 if ((spec->match_flags &
956 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
957 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
958 if (spec->ether_type == htons(ETH_P_IP) &&
959 ipv4_is_multicast(spec->loc_host[0]))
960 return true;
961 if (spec->ether_type == htons(ETH_P_IPV6) &&
962 ((const u8 *)spec->loc_host)[0] == 0xff)
963 return true;
964 }
965
966 return false;
967}
968