1
2
3
4
5
6
7
8#include <linux/socket.h>
9#include <linux/in.h>
10#include <linux/slab.h>
11#include <linux/ip.h>
12#include <linux/ipv6.h>
13#include <linux/tcp.h>
14#include <linux/udp.h>
15#include <linux/prefetch.h>
16#include <linux/moduleparam.h>
17#include <linux/iommu.h>
18#include <net/ip.h>
19#include <net/checksum.h>
20#include "net_driver.h"
21#include "efx.h"
22#include "filter.h"
23#include "nic.h"
24#include "selftest.h"
25#include "workarounds.h"
26
27
28#define EF4_RX_PREFERRED_BATCH 8U
29
30
31
32
33
34#define EF4_RECYCLE_RING_SIZE_IOMMU 4096
35#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
36
37
38#define EF4_SKB_HEADERS 128u
39
40
41
42
43static unsigned int rx_refill_threshold;
44
45
46#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
47 EF4_RX_USR_BUF_SIZE)
48
49
50
51
52
53
54
55#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
56
57static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
58{
59 return page_address(buf->page) + buf->page_offset;
60}
61
62static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
63{
64#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
65 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
66#else
67 const u8 *data = eh + efx->rx_packet_hash_offset;
68 return (u32)data[0] |
69 (u32)data[1] << 8 |
70 (u32)data[2] << 16 |
71 (u32)data[3] << 24;
72#endif
73}
74
75static inline struct ef4_rx_buffer *
76ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
77{
78 if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
79 return ef4_rx_buffer(rx_queue, 0);
80 else
81 return rx_buf + 1;
82}
83
84static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
85 struct ef4_rx_buffer *rx_buf,
86 unsigned int len)
87{
88 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
89 DMA_FROM_DEVICE);
90}
91
92void ef4_rx_config_page_split(struct ef4_nic *efx)
93{
94 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
95 EF4_RX_BUF_ALIGNMENT);
96 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
97 ((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
98 efx->rx_page_buf_step);
99 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
100 efx->rx_bufs_per_page;
101 efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
102 efx->rx_bufs_per_page);
103}
104
105
106static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
107{
108 struct ef4_nic *efx = rx_queue->efx;
109 struct page *page;
110 struct ef4_rx_page_state *state;
111 unsigned index;
112
113 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
114 page = rx_queue->page_ring[index];
115 if (page == NULL)
116 return NULL;
117
118 rx_queue->page_ring[index] = NULL;
119
120 if (rx_queue->page_remove != rx_queue->page_add)
121 ++rx_queue->page_remove;
122
123
124 if (page_count(page) == 1) {
125 ++rx_queue->page_recycle_count;
126 return page;
127 } else {
128 state = page_address(page);
129 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
130 PAGE_SIZE << efx->rx_buffer_order,
131 DMA_FROM_DEVICE);
132 put_page(page);
133 ++rx_queue->page_recycle_failed;
134 }
135
136 return NULL;
137}
138
139
140
141
142
143
144
145
146
147
148
149static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
150{
151 struct ef4_nic *efx = rx_queue->efx;
152 struct ef4_rx_buffer *rx_buf;
153 struct page *page;
154 unsigned int page_offset;
155 struct ef4_rx_page_state *state;
156 dma_addr_t dma_addr;
157 unsigned index, count;
158
159 count = 0;
160 do {
161 page = ef4_reuse_page(rx_queue);
162 if (page == NULL) {
163 page = alloc_pages(__GFP_COMP |
164 (atomic ? GFP_ATOMIC : GFP_KERNEL),
165 efx->rx_buffer_order);
166 if (unlikely(page == NULL))
167 return -ENOMEM;
168 dma_addr =
169 dma_map_page(&efx->pci_dev->dev, page, 0,
170 PAGE_SIZE << efx->rx_buffer_order,
171 DMA_FROM_DEVICE);
172 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
173 dma_addr))) {
174 __free_pages(page, efx->rx_buffer_order);
175 return -EIO;
176 }
177 state = page_address(page);
178 state->dma_addr = dma_addr;
179 } else {
180 state = page_address(page);
181 dma_addr = state->dma_addr;
182 }
183
184 dma_addr += sizeof(struct ef4_rx_page_state);
185 page_offset = sizeof(struct ef4_rx_page_state);
186
187 do {
188 index = rx_queue->added_count & rx_queue->ptr_mask;
189 rx_buf = ef4_rx_buffer(rx_queue, index);
190 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
191 rx_buf->page = page;
192 rx_buf->page_offset = page_offset + efx->rx_ip_align;
193 rx_buf->len = efx->rx_dma_len;
194 rx_buf->flags = 0;
195 ++rx_queue->added_count;
196 get_page(page);
197 dma_addr += efx->rx_page_buf_step;
198 page_offset += efx->rx_page_buf_step;
199 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
200
201 rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
202 } while (++count < efx->rx_pages_per_batch);
203
204 return 0;
205}
206
207
208
209
210static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
211 struct ef4_rx_buffer *rx_buf)
212{
213 struct page *page = rx_buf->page;
214
215 if (page) {
216 struct ef4_rx_page_state *state = page_address(page);
217 dma_unmap_page(&efx->pci_dev->dev,
218 state->dma_addr,
219 PAGE_SIZE << efx->rx_buffer_order,
220 DMA_FROM_DEVICE);
221 }
222}
223
224static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
225 struct ef4_rx_buffer *rx_buf,
226 unsigned int num_bufs)
227{
228 do {
229 if (rx_buf->page) {
230 put_page(rx_buf->page);
231 rx_buf->page = NULL;
232 }
233 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
234 } while (--num_bufs);
235}
236
237
238
239
240
241static void ef4_recycle_rx_page(struct ef4_channel *channel,
242 struct ef4_rx_buffer *rx_buf)
243{
244 struct page *page = rx_buf->page;
245 struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
246 struct ef4_nic *efx = rx_queue->efx;
247 unsigned index;
248
249
250 if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
251 return;
252
253 index = rx_queue->page_add & rx_queue->page_ptr_mask;
254 if (rx_queue->page_ring[index] == NULL) {
255 unsigned read_index = rx_queue->page_remove &
256 rx_queue->page_ptr_mask;
257
258
259
260
261
262 if (read_index == index)
263 ++rx_queue->page_remove;
264 rx_queue->page_ring[index] = page;
265 ++rx_queue->page_add;
266 return;
267 }
268 ++rx_queue->page_recycle_full;
269 ef4_unmap_rx_buffer(efx, rx_buf);
270 put_page(rx_buf->page);
271}
272
273static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
274 struct ef4_rx_buffer *rx_buf)
275{
276
277 if (rx_buf->page)
278 put_page(rx_buf->page);
279
280
281 if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
282 ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
283 ef4_free_rx_buffers(rx_queue, rx_buf, 1);
284 }
285 rx_buf->page = NULL;
286}
287
288
289static void ef4_recycle_rx_pages(struct ef4_channel *channel,
290 struct ef4_rx_buffer *rx_buf,
291 unsigned int n_frags)
292{
293 struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
294
295 do {
296 ef4_recycle_rx_page(channel, rx_buf);
297 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
298 } while (--n_frags);
299}
300
301static void ef4_discard_rx_packet(struct ef4_channel *channel,
302 struct ef4_rx_buffer *rx_buf,
303 unsigned int n_frags)
304{
305 struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
306
307 ef4_recycle_rx_pages(channel, rx_buf, n_frags);
308
309 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
325{
326 struct ef4_nic *efx = rx_queue->efx;
327 unsigned int fill_level, batch_size;
328 int space, rc = 0;
329
330 if (!rx_queue->refill_enabled)
331 return;
332
333
334 fill_level = (rx_queue->added_count - rx_queue->removed_count);
335 EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
336 if (fill_level >= rx_queue->fast_fill_trigger)
337 goto out;
338
339
340 if (unlikely(fill_level < rx_queue->min_fill)) {
341 if (fill_level)
342 rx_queue->min_fill = fill_level;
343 }
344
345 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
346 space = rx_queue->max_fill - fill_level;
347 EF4_BUG_ON_PARANOID(space < batch_size);
348
349 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
350 "RX queue %d fast-filling descriptor ring from"
351 " level %d to level %d\n",
352 ef4_rx_queue_index(rx_queue), fill_level,
353 rx_queue->max_fill);
354
355
356 do {
357 rc = ef4_init_rx_buffers(rx_queue, atomic);
358 if (unlikely(rc)) {
359
360 if (rx_queue->added_count == rx_queue->removed_count)
361 ef4_schedule_slow_fill(rx_queue);
362 goto out;
363 }
364 } while ((space -= batch_size) >= batch_size);
365
366 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
367 "RX queue %d fast-filled descriptor ring "
368 "to level %d\n", ef4_rx_queue_index(rx_queue),
369 rx_queue->added_count - rx_queue->removed_count);
370
371 out:
372 if (rx_queue->notified_count != rx_queue->added_count)
373 ef4_nic_notify_rx_desc(rx_queue);
374}
375
376void ef4_rx_slow_fill(struct timer_list *t)
377{
378 struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
379
380
381 ef4_nic_generate_fill_event(rx_queue);
382 ++rx_queue->slow_fill_count;
383}
384
385static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
386 struct ef4_rx_buffer *rx_buf,
387 int len)
388{
389 struct ef4_nic *efx = rx_queue->efx;
390 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
391
392 if (likely(len <= max_len))
393 return;
394
395
396
397
398 rx_buf->flags |= EF4_RX_PKT_DISCARD;
399
400 if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
401 if (net_ratelimit())
402 netif_err(efx, rx_err, efx->net_dev,
403 " RX queue %d seriously overlength "
404 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
405 ef4_rx_queue_index(rx_queue), len, max_len,
406 efx->type->rx_buffer_padding);
407 ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
408 } else {
409 if (net_ratelimit())
410 netif_err(efx, rx_err, efx->net_dev,
411 " RX queue %d overlength RX event "
412 "(0x%x > 0x%x)\n",
413 ef4_rx_queue_index(rx_queue), len, max_len);
414 }
415
416 ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
417}
418
419
420
421
422static void
423ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
424 unsigned int n_frags, u8 *eh)
425{
426 struct napi_struct *napi = &channel->napi_str;
427 gro_result_t gro_result;
428 struct ef4_nic *efx = channel->efx;
429 struct sk_buff *skb;
430
431 skb = napi_get_frags(napi);
432 if (unlikely(!skb)) {
433 struct ef4_rx_queue *rx_queue;
434
435 rx_queue = ef4_channel_get_rx_queue(channel);
436 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
437 return;
438 }
439
440 if (efx->net_dev->features & NETIF_F_RXHASH)
441 skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
442 PKT_HASH_TYPE_L3);
443 skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
444 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
445
446 for (;;) {
447 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
448 rx_buf->page, rx_buf->page_offset,
449 rx_buf->len);
450 rx_buf->page = NULL;
451 skb->len += rx_buf->len;
452 if (skb_shinfo(skb)->nr_frags == n_frags)
453 break;
454
455 rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
456 }
457
458 skb->data_len = skb->len;
459 skb->truesize += n_frags * efx->rx_buffer_truesize;
460
461 skb_record_rx_queue(skb, channel->rx_queue.core_index);
462
463 gro_result = napi_gro_frags(napi);
464 if (gro_result != GRO_DROP)
465 channel->irq_mod_score += 2;
466}
467
468
469static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
470 struct ef4_rx_buffer *rx_buf,
471 unsigned int n_frags,
472 u8 *eh, int hdr_len)
473{
474 struct ef4_nic *efx = channel->efx;
475 struct sk_buff *skb;
476
477
478 skb = netdev_alloc_skb(efx->net_dev,
479 efx->rx_ip_align + efx->rx_prefix_size +
480 hdr_len);
481 if (unlikely(skb == NULL)) {
482 atomic_inc(&efx->n_rx_noskb_drops);
483 return NULL;
484 }
485
486 EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
487
488 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
489 efx->rx_prefix_size + hdr_len);
490 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
491 __skb_put(skb, hdr_len);
492
493
494 if (rx_buf->len > hdr_len) {
495 rx_buf->page_offset += hdr_len;
496 rx_buf->len -= hdr_len;
497
498 for (;;) {
499 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
500 rx_buf->page, rx_buf->page_offset,
501 rx_buf->len);
502 rx_buf->page = NULL;
503 skb->len += rx_buf->len;
504 skb->data_len += rx_buf->len;
505 if (skb_shinfo(skb)->nr_frags == n_frags)
506 break;
507
508 rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
509 }
510 } else {
511 __free_pages(rx_buf->page, efx->rx_buffer_order);
512 rx_buf->page = NULL;
513 n_frags = 0;
514 }
515
516 skb->truesize += n_frags * efx->rx_buffer_truesize;
517
518
519 skb->protocol = eth_type_trans(skb, efx->net_dev);
520
521 skb_mark_napi_id(skb, &channel->napi_str);
522
523 return skb;
524}
525
526void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
527 unsigned int n_frags, unsigned int len, u16 flags)
528{
529 struct ef4_nic *efx = rx_queue->efx;
530 struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
531 struct ef4_rx_buffer *rx_buf;
532
533 rx_queue->rx_packets++;
534
535 rx_buf = ef4_rx_buffer(rx_queue, index);
536 rx_buf->flags |= flags;
537
538
539 if (n_frags == 1) {
540 if (!(flags & EF4_RX_PKT_PREFIX_LEN))
541 ef4_rx_packet__check_len(rx_queue, rx_buf, len);
542 } else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
543 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
544 unlikely(len > n_frags * efx->rx_dma_len) ||
545 unlikely(!efx->rx_scatter)) {
546
547
548
549 WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
550 rx_buf->flags |= EF4_RX_PKT_DISCARD;
551 }
552
553 netif_vdbg(efx, rx_status, efx->net_dev,
554 "RX queue %d received ids %x-%x len %d %s%s\n",
555 ef4_rx_queue_index(rx_queue), index,
556 (index + n_frags - 1) & rx_queue->ptr_mask, len,
557 (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
558 (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
559
560
561
562
563 if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
564 ef4_rx_flush_packet(channel);
565 ef4_discard_rx_packet(channel, rx_buf, n_frags);
566 return;
567 }
568
569 if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
570 rx_buf->len = len;
571
572
573
574
575 ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
576
577
578
579
580 prefetch(ef4_rx_buf_va(rx_buf));
581
582 rx_buf->page_offset += efx->rx_prefix_size;
583 rx_buf->len -= efx->rx_prefix_size;
584
585 if (n_frags > 1) {
586
587
588
589 unsigned int tail_frags = n_frags - 1;
590
591 for (;;) {
592 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
593 if (--tail_frags == 0)
594 break;
595 ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
596 }
597 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
598 ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
599 }
600
601
602 rx_buf = ef4_rx_buffer(rx_queue, index);
603 ef4_recycle_rx_pages(channel, rx_buf, n_frags);
604
605
606
607
608 ef4_rx_flush_packet(channel);
609 channel->rx_pkt_n_frags = n_frags;
610 channel->rx_pkt_index = index;
611}
612
613static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
614 struct ef4_rx_buffer *rx_buf,
615 unsigned int n_frags)
616{
617 struct sk_buff *skb;
618 u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
619
620 skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
621 if (unlikely(skb == NULL)) {
622 struct ef4_rx_queue *rx_queue;
623
624 rx_queue = ef4_channel_get_rx_queue(channel);
625 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
626 return;
627 }
628 skb_record_rx_queue(skb, channel->rx_queue.core_index);
629
630
631 skb_checksum_none_assert(skb);
632 if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
633 skb->ip_summed = CHECKSUM_UNNECESSARY;
634
635 if (channel->type->receive_skb)
636 if (channel->type->receive_skb(channel, skb))
637 return;
638
639
640 netif_receive_skb(skb);
641}
642
643
644void __ef4_rx_packet(struct ef4_channel *channel)
645{
646 struct ef4_nic *efx = channel->efx;
647 struct ef4_rx_buffer *rx_buf =
648 ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
649 u8 *eh = ef4_rx_buf_va(rx_buf);
650
651
652
653
654 if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
655 rx_buf->len = le16_to_cpup((__le16 *)
656 (eh + efx->rx_packet_len_offset));
657
658
659
660
661 if (unlikely(efx->loopback_selftest)) {
662 struct ef4_rx_queue *rx_queue;
663
664 ef4_loopback_rx_packet(efx, eh, rx_buf->len);
665 rx_queue = ef4_channel_get_rx_queue(channel);
666 ef4_free_rx_buffers(rx_queue, rx_buf,
667 channel->rx_pkt_n_frags);
668 goto out;
669 }
670
671 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
672 rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
673
674 if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
675 ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
676 else
677 ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
678out:
679 channel->rx_pkt_n_frags = 0;
680}
681
682int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
683{
684 struct ef4_nic *efx = rx_queue->efx;
685 unsigned int entries;
686 int rc;
687
688
689 entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
690 EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
691 rx_queue->ptr_mask = entries - 1;
692
693 netif_dbg(efx, probe, efx->net_dev,
694 "creating RX queue %d size %#x mask %#x\n",
695 ef4_rx_queue_index(rx_queue), efx->rxq_entries,
696 rx_queue->ptr_mask);
697
698
699 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
700 GFP_KERNEL);
701 if (!rx_queue->buffer)
702 return -ENOMEM;
703
704 rc = ef4_nic_probe_rx(rx_queue);
705 if (rc) {
706 kfree(rx_queue->buffer);
707 rx_queue->buffer = NULL;
708 }
709
710 return rc;
711}
712
713static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
714 struct ef4_rx_queue *rx_queue)
715{
716 unsigned int bufs_in_recycle_ring, page_ring_size;
717
718
719#ifdef CONFIG_PPC64
720 bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
721#else
722 if (iommu_present(&pci_bus_type))
723 bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
724 else
725 bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
726#endif
727
728 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
729 efx->rx_bufs_per_page);
730 rx_queue->page_ring = kcalloc(page_ring_size,
731 sizeof(*rx_queue->page_ring), GFP_KERNEL);
732 rx_queue->page_ptr_mask = page_ring_size - 1;
733}
734
735void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
736{
737 struct ef4_nic *efx = rx_queue->efx;
738 unsigned int max_fill, trigger, max_trigger;
739
740 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
741 "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
742
743
744 rx_queue->added_count = 0;
745 rx_queue->notified_count = 0;
746 rx_queue->removed_count = 0;
747 rx_queue->min_fill = -1U;
748 ef4_init_rx_recycle_ring(efx, rx_queue);
749
750 rx_queue->page_remove = 0;
751 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
752 rx_queue->page_recycle_count = 0;
753 rx_queue->page_recycle_failed = 0;
754 rx_queue->page_recycle_full = 0;
755
756
757 max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
758 max_trigger =
759 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
760 if (rx_refill_threshold != 0) {
761 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
762 if (trigger > max_trigger)
763 trigger = max_trigger;
764 } else {
765 trigger = max_trigger;
766 }
767
768 rx_queue->max_fill = max_fill;
769 rx_queue->fast_fill_trigger = trigger;
770 rx_queue->refill_enabled = true;
771
772
773 ef4_nic_init_rx(rx_queue);
774}
775
776void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
777{
778 int i;
779 struct ef4_nic *efx = rx_queue->efx;
780 struct ef4_rx_buffer *rx_buf;
781
782 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
783 "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
784
785 del_timer_sync(&rx_queue->slow_fill);
786
787
788 if (rx_queue->buffer) {
789 for (i = rx_queue->removed_count; i < rx_queue->added_count;
790 i++) {
791 unsigned index = i & rx_queue->ptr_mask;
792 rx_buf = ef4_rx_buffer(rx_queue, index);
793 ef4_fini_rx_buffer(rx_queue, rx_buf);
794 }
795 }
796
797
798 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
799 struct page *page = rx_queue->page_ring[i];
800 struct ef4_rx_page_state *state;
801
802 if (page == NULL)
803 continue;
804
805 state = page_address(page);
806 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
807 PAGE_SIZE << efx->rx_buffer_order,
808 DMA_FROM_DEVICE);
809 put_page(page);
810 }
811 kfree(rx_queue->page_ring);
812 rx_queue->page_ring = NULL;
813}
814
815void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
816{
817 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
818 "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
819
820 ef4_nic_remove_rx(rx_queue);
821
822 kfree(rx_queue->buffer);
823 rx_queue->buffer = NULL;
824}
825
826
827module_param(rx_refill_threshold, uint, 0444);
828MODULE_PARM_DESC(rx_refill_threshold,
829 "RX descriptor ring refill threshold (%)");
830
831#ifdef CONFIG_RFS_ACCEL
832
833int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
834 u16 rxq_index, u32 flow_id)
835{
836 struct ef4_nic *efx = netdev_priv(net_dev);
837 struct ef4_channel *channel;
838 struct ef4_filter_spec spec;
839 struct flow_keys fk;
840 int rc;
841
842 if (flow_id == RPS_FLOW_ID_INVALID)
843 return -EINVAL;
844
845 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
846 return -EPROTONOSUPPORT;
847
848 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
849 return -EPROTONOSUPPORT;
850 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
851 return -EPROTONOSUPPORT;
852
853 ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
854 efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
855 rxq_index);
856 spec.match_flags =
857 EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
858 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
859 EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
860 spec.ether_type = fk.basic.n_proto;
861 spec.ip_proto = fk.basic.ip_proto;
862
863 if (fk.basic.n_proto == htons(ETH_P_IP)) {
864 spec.rem_host[0] = fk.addrs.v4addrs.src;
865 spec.loc_host[0] = fk.addrs.v4addrs.dst;
866 } else {
867 memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
868 memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
869 }
870
871 spec.rem_port = fk.ports.src;
872 spec.loc_port = fk.ports.dst;
873
874 rc = efx->type->filter_rfs_insert(efx, &spec);
875 if (rc < 0)
876 return rc;
877
878
879 channel = ef4_get_channel(efx, rxq_index);
880 channel->rps_flow_id[rc] = flow_id;
881 ++channel->rfs_filters_added;
882
883 if (spec.ether_type == htons(ETH_P_IP))
884 netif_info(efx, rx_status, efx->net_dev,
885 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
886 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
887 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
888 ntohs(spec.loc_port), rxq_index, flow_id, rc);
889 else
890 netif_info(efx, rx_status, efx->net_dev,
891 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
892 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
893 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
894 ntohs(spec.loc_port), rxq_index, flow_id, rc);
895
896 return rc;
897}
898
899bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
900{
901 bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
902 unsigned int channel_idx, index, size;
903 u32 flow_id;
904
905 if (!spin_trylock_bh(&efx->filter_lock))
906 return false;
907
908 expire_one = efx->type->filter_rfs_expire_one;
909 channel_idx = efx->rps_expire_channel;
910 index = efx->rps_expire_index;
911 size = efx->type->max_rx_ip_filters;
912 while (quota--) {
913 struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
914 flow_id = channel->rps_flow_id[index];
915
916 if (flow_id != RPS_FLOW_ID_INVALID &&
917 expire_one(efx, flow_id, index)) {
918 netif_info(efx, rx_status, efx->net_dev,
919 "expired filter %d [queue %u flow %u]\n",
920 index, channel_idx, flow_id);
921 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
922 }
923 if (++index == size) {
924 if (++channel_idx == efx->n_channels)
925 channel_idx = 0;
926 index = 0;
927 }
928 }
929 efx->rps_expire_channel = channel_idx;
930 efx->rps_expire_index = index;
931
932 spin_unlock_bh(&efx->filter_lock);
933 return true;
934}
935
936#endif
937
938
939
940
941
942
943
944
945
946
947bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
948{
949 if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
950 spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
951 return false;
952
953 if (spec->match_flags &
954 (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
955 is_multicast_ether_addr(spec->loc_mac))
956 return true;
957
958 if ((spec->match_flags &
959 (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
960 (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
961 if (spec->ether_type == htons(ETH_P_IP) &&
962 ipv4_is_multicast(spec->loc_host[0]))
963 return true;
964 if (spec->ether_type == htons(ETH_P_IPV6) &&
965 ((const u8 *)spec->loc_host)[0] == 0xff)
966 return true;
967 }
968
969 return false;
970}
971