1
2
3
4
5
6
7
8
9
10
11#include <linux/socket.h>
12#include <linux/in.h>
13#include <linux/slab.h>
14#include <linux/ip.h>
15#include <linux/tcp.h>
16#include <linux/udp.h>
17#include <linux/prefetch.h>
18#include <linux/moduleparam.h>
19#include <linux/iommu.h>
20#include <net/ip.h>
21#include <net/checksum.h>
22#include "net_driver.h"
23#include "efx.h"
24#include "filter.h"
25#include "nic.h"
26#include "selftest.h"
27#include "workarounds.h"
28
29
30#define EFX_RX_PREFERRED_BATCH 8U
31
32
33
34
35
36#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
37#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
38
39
40#define EFX_SKB_HEADERS 128u
41
42
43
44
45static unsigned int rx_refill_threshold;
46
47
48#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
49 EFX_RX_USR_BUF_SIZE)
50
51
52
53
54
55
56
57#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
58
59static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
60{
61 return page_address(buf->page) + buf->page_offset;
62}
63
64static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
65{
66#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
67 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
68#else
69 const u8 *data = eh + efx->rx_packet_hash_offset;
70 return (u32)data[0] |
71 (u32)data[1] << 8 |
72 (u32)data[2] << 16 |
73 (u32)data[3] << 24;
74#endif
75}
76
77static inline struct efx_rx_buffer *
78efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
79{
80 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
81 return efx_rx_buffer(rx_queue, 0);
82 else
83 return rx_buf + 1;
84}
85
86static inline void efx_sync_rx_buffer(struct efx_nic *efx,
87 struct efx_rx_buffer *rx_buf,
88 unsigned int len)
89{
90 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
91 DMA_FROM_DEVICE);
92}
93
94void efx_rx_config_page_split(struct efx_nic *efx)
95{
96 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
97 EFX_RX_BUF_ALIGNMENT);
98 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
99 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
100 efx->rx_page_buf_step);
101 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
102 efx->rx_bufs_per_page;
103 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
104 efx->rx_bufs_per_page);
105}
106
107
108static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
109{
110 struct efx_nic *efx = rx_queue->efx;
111 struct page *page;
112 struct efx_rx_page_state *state;
113 unsigned index;
114
115 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
116 page = rx_queue->page_ring[index];
117 if (page == NULL)
118 return NULL;
119
120 rx_queue->page_ring[index] = NULL;
121
122 if (rx_queue->page_remove != rx_queue->page_add)
123 ++rx_queue->page_remove;
124
125
126 if (page_count(page) == 1) {
127 ++rx_queue->page_recycle_count;
128 return page;
129 } else {
130 state = page_address(page);
131 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132 PAGE_SIZE << efx->rx_buffer_order,
133 DMA_FROM_DEVICE);
134 put_page(page);
135 ++rx_queue->page_recycle_failed;
136 }
137
138 return NULL;
139}
140
141
142
143
144
145
146
147
148
149
150
151static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
152{
153 struct efx_nic *efx = rx_queue->efx;
154 struct efx_rx_buffer *rx_buf;
155 struct page *page;
156 unsigned int page_offset;
157 struct efx_rx_page_state *state;
158 dma_addr_t dma_addr;
159 unsigned index, count;
160
161 count = 0;
162 do {
163 page = efx_reuse_page(rx_queue);
164 if (page == NULL) {
165 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
166 efx->rx_buffer_order);
167 if (unlikely(page == NULL))
168 return -ENOMEM;
169 dma_addr =
170 dma_map_page(&efx->pci_dev->dev, page, 0,
171 PAGE_SIZE << efx->rx_buffer_order,
172 DMA_FROM_DEVICE);
173 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
174 dma_addr))) {
175 __free_pages(page, efx->rx_buffer_order);
176 return -EIO;
177 }
178 state = page_address(page);
179 state->dma_addr = dma_addr;
180 } else {
181 state = page_address(page);
182 dma_addr = state->dma_addr;
183 }
184
185 dma_addr += sizeof(struct efx_rx_page_state);
186 page_offset = sizeof(struct efx_rx_page_state);
187
188 do {
189 index = rx_queue->added_count & rx_queue->ptr_mask;
190 rx_buf = efx_rx_buffer(rx_queue, index);
191 rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
192 rx_buf->page = page;
193 rx_buf->page_offset = page_offset + NET_IP_ALIGN;
194 rx_buf->len = efx->rx_dma_len;
195 rx_buf->flags = 0;
196 ++rx_queue->added_count;
197 get_page(page);
198 dma_addr += efx->rx_page_buf_step;
199 page_offset += efx->rx_page_buf_step;
200 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
201
202 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
203 } while (++count < efx->rx_pages_per_batch);
204
205 return 0;
206}
207
208
209
210
211static void efx_unmap_rx_buffer(struct efx_nic *efx,
212 struct efx_rx_buffer *rx_buf)
213{
214 struct page *page = rx_buf->page;
215
216 if (page) {
217 struct efx_rx_page_state *state = page_address(page);
218 dma_unmap_page(&efx->pci_dev->dev,
219 state->dma_addr,
220 PAGE_SIZE << efx->rx_buffer_order,
221 DMA_FROM_DEVICE);
222 }
223}
224
225static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
226{
227 if (rx_buf->page) {
228 put_page(rx_buf->page);
229 rx_buf->page = NULL;
230 }
231}
232
233
234
235
236
237static void efx_recycle_rx_page(struct efx_channel *channel,
238 struct efx_rx_buffer *rx_buf)
239{
240 struct page *page = rx_buf->page;
241 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
242 struct efx_nic *efx = rx_queue->efx;
243 unsigned index;
244
245
246 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
247 return;
248
249 index = rx_queue->page_add & rx_queue->page_ptr_mask;
250 if (rx_queue->page_ring[index] == NULL) {
251 unsigned read_index = rx_queue->page_remove &
252 rx_queue->page_ptr_mask;
253
254
255
256
257
258 if (read_index == index)
259 ++rx_queue->page_remove;
260 rx_queue->page_ring[index] = page;
261 ++rx_queue->page_add;
262 return;
263 }
264 ++rx_queue->page_recycle_full;
265 efx_unmap_rx_buffer(efx, rx_buf);
266 put_page(rx_buf->page);
267}
268
269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270 struct efx_rx_buffer *rx_buf)
271{
272
273 if (rx_buf->page)
274 put_page(rx_buf->page);
275
276
277 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
278 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
279 efx_free_rx_buffer(rx_buf);
280 }
281 rx_buf->page = NULL;
282}
283
284
285static void efx_recycle_rx_pages(struct efx_channel *channel,
286 struct efx_rx_buffer *rx_buf,
287 unsigned int n_frags)
288{
289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
290
291 do {
292 efx_recycle_rx_page(channel, rx_buf);
293 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
294 } while (--n_frags);
295}
296
297static void efx_discard_rx_packet(struct efx_channel *channel,
298 struct efx_rx_buffer *rx_buf,
299 unsigned int n_frags)
300{
301 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
302
303 efx_recycle_rx_pages(channel, rx_buf, n_frags);
304
305 do {
306 efx_free_rx_buffer(rx_buf);
307 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
308 } while (--n_frags);
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
324{
325 struct efx_nic *efx = rx_queue->efx;
326 unsigned int fill_level, batch_size;
327 int space, rc = 0;
328
329 if (!rx_queue->refill_enabled)
330 return;
331
332
333 fill_level = (rx_queue->added_count - rx_queue->removed_count);
334 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
335 if (fill_level >= rx_queue->fast_fill_trigger)
336 goto out;
337
338
339 if (unlikely(fill_level < rx_queue->min_fill)) {
340 if (fill_level)
341 rx_queue->min_fill = fill_level;
342 }
343
344 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
345 space = rx_queue->max_fill - fill_level;
346 EFX_BUG_ON_PARANOID(space < batch_size);
347
348 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
349 "RX queue %d fast-filling descriptor ring from"
350 " level %d to level %d\n",
351 efx_rx_queue_index(rx_queue), fill_level,
352 rx_queue->max_fill);
353
354
355 do {
356 rc = efx_init_rx_buffers(rx_queue);
357 if (unlikely(rc)) {
358
359 if (rx_queue->added_count == rx_queue->removed_count)
360 efx_schedule_slow_fill(rx_queue);
361 goto out;
362 }
363 } while ((space -= batch_size) >= batch_size);
364
365 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
366 "RX queue %d fast-filled descriptor ring "
367 "to level %d\n", efx_rx_queue_index(rx_queue),
368 rx_queue->added_count - rx_queue->removed_count);
369
370 out:
371 if (rx_queue->notified_count != rx_queue->added_count)
372 efx_nic_notify_rx_desc(rx_queue);
373}
374
375void efx_rx_slow_fill(unsigned long context)
376{
377 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
378
379
380 efx_nic_generate_fill_event(rx_queue);
381 ++rx_queue->slow_fill_count;
382}
383
384static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
385 struct efx_rx_buffer *rx_buf,
386 int len)
387{
388 struct efx_nic *efx = rx_queue->efx;
389 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
390
391 if (likely(len <= max_len))
392 return;
393
394
395
396
397 rx_buf->flags |= EFX_RX_PKT_DISCARD;
398
399 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
400 if (net_ratelimit())
401 netif_err(efx, rx_err, efx->net_dev,
402 " RX queue %d seriously overlength "
403 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
404 efx_rx_queue_index(rx_queue), len, max_len,
405 efx->type->rx_buffer_padding);
406 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
407 } else {
408 if (net_ratelimit())
409 netif_err(efx, rx_err, efx->net_dev,
410 " RX queue %d overlength RX event "
411 "(0x%x > 0x%x)\n",
412 efx_rx_queue_index(rx_queue), len, max_len);
413 }
414
415 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
416}
417
418
419
420
421static void
422efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
423 unsigned int n_frags, u8 *eh)
424{
425 struct napi_struct *napi = &channel->napi_str;
426 gro_result_t gro_result;
427 struct efx_nic *efx = channel->efx;
428 struct sk_buff *skb;
429
430 skb = napi_get_frags(napi);
431 if (unlikely(!skb)) {
432 while (n_frags--) {
433 put_page(rx_buf->page);
434 rx_buf->page = NULL;
435 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
436 }
437 return;
438 }
439
440 if (efx->net_dev->features & NETIF_F_RXHASH)
441 skb->rxhash = efx_rx_buf_hash(efx, eh);
442 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
443 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
444
445 for (;;) {
446 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
447 rx_buf->page, rx_buf->page_offset,
448 rx_buf->len);
449 rx_buf->page = NULL;
450 skb->len += rx_buf->len;
451 if (skb_shinfo(skb)->nr_frags == n_frags)
452 break;
453
454 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
455 }
456
457 skb->data_len = skb->len;
458 skb->truesize += n_frags * efx->rx_buffer_truesize;
459
460 skb_record_rx_queue(skb, channel->rx_queue.core_index);
461
462 gro_result = napi_gro_frags(napi);
463 if (gro_result != GRO_DROP)
464 channel->irq_mod_score += 2;
465}
466
467
468static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
469 struct efx_rx_buffer *rx_buf,
470 unsigned int n_frags,
471 u8 *eh, int hdr_len)
472{
473 struct efx_nic *efx = channel->efx;
474 struct sk_buff *skb;
475
476
477 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
478 if (unlikely(skb == NULL))
479 return NULL;
480
481 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
482
483 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
484 memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
485
486
487 if (rx_buf->len > hdr_len) {
488 rx_buf->page_offset += hdr_len;
489 rx_buf->len -= hdr_len;
490
491 for (;;) {
492 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
493 rx_buf->page, rx_buf->page_offset,
494 rx_buf->len);
495 rx_buf->page = NULL;
496 skb->len += rx_buf->len;
497 skb->data_len += rx_buf->len;
498 if (skb_shinfo(skb)->nr_frags == n_frags)
499 break;
500
501 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
502 }
503 } else {
504 __free_pages(rx_buf->page, efx->rx_buffer_order);
505 rx_buf->page = NULL;
506 n_frags = 0;
507 }
508
509 skb->truesize += n_frags * efx->rx_buffer_truesize;
510
511
512 skb->protocol = eth_type_trans(skb, efx->net_dev);
513
514 return skb;
515}
516
517void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
518 unsigned int n_frags, unsigned int len, u16 flags)
519{
520 struct efx_nic *efx = rx_queue->efx;
521 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
522 struct efx_rx_buffer *rx_buf;
523
524 rx_buf = efx_rx_buffer(rx_queue, index);
525 rx_buf->flags |= flags;
526
527
528 if (n_frags == 1) {
529 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
530 efx_rx_packet__check_len(rx_queue, rx_buf, len);
531 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
532 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
533 unlikely(len > n_frags * efx->rx_dma_len) ||
534 unlikely(!efx->rx_scatter)) {
535
536
537
538 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
539 rx_buf->flags |= EFX_RX_PKT_DISCARD;
540 }
541
542 netif_vdbg(efx, rx_status, efx->net_dev,
543 "RX queue %d received ids %x-%x len %d %s%s\n",
544 efx_rx_queue_index(rx_queue), index,
545 (index + n_frags - 1) & rx_queue->ptr_mask, len,
546 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
547 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
548
549
550
551
552 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
553 efx_rx_flush_packet(channel);
554 efx_discard_rx_packet(channel, rx_buf, n_frags);
555 return;
556 }
557
558 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
559 rx_buf->len = len;
560
561
562
563
564 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
565
566
567
568
569 prefetch(efx_rx_buf_va(rx_buf));
570
571 rx_buf->page_offset += efx->rx_prefix_size;
572 rx_buf->len -= efx->rx_prefix_size;
573
574 if (n_frags > 1) {
575
576
577
578 unsigned int tail_frags = n_frags - 1;
579
580 for (;;) {
581 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
582 if (--tail_frags == 0)
583 break;
584 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
585 }
586 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
587 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
588 }
589
590
591 rx_buf = efx_rx_buffer(rx_queue, index);
592 efx_recycle_rx_pages(channel, rx_buf, n_frags);
593
594
595
596
597 efx_rx_flush_packet(channel);
598 channel->rx_pkt_n_frags = n_frags;
599 channel->rx_pkt_index = index;
600}
601
602static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
603 struct efx_rx_buffer *rx_buf,
604 unsigned int n_frags)
605{
606 struct sk_buff *skb;
607 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
608
609 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
610 if (unlikely(skb == NULL)) {
611 efx_free_rx_buffer(rx_buf);
612 return;
613 }
614 skb_record_rx_queue(skb, channel->rx_queue.core_index);
615
616
617 skb_checksum_none_assert(skb);
618 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
620
621 if (channel->type->receive_skb)
622 if (channel->type->receive_skb(channel, skb))
623 return;
624
625
626 netif_receive_skb(skb);
627}
628
629
630void __efx_rx_packet(struct efx_channel *channel)
631{
632 struct efx_nic *efx = channel->efx;
633 struct efx_rx_buffer *rx_buf =
634 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
635 u8 *eh = efx_rx_buf_va(rx_buf);
636
637
638
639
640 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
641 rx_buf->len = le16_to_cpup((__le16 *)
642 (eh + efx->rx_packet_len_offset));
643
644
645
646
647 if (unlikely(efx->loopback_selftest)) {
648 efx_loopback_rx_packet(efx, eh, rx_buf->len);
649 efx_free_rx_buffer(rx_buf);
650 goto out;
651 }
652
653 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
654 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
655
656 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
657 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
658 else
659 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
660out:
661 channel->rx_pkt_n_frags = 0;
662}
663
664int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
665{
666 struct efx_nic *efx = rx_queue->efx;
667 unsigned int entries;
668 int rc;
669
670
671 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
672 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
673 rx_queue->ptr_mask = entries - 1;
674
675 netif_dbg(efx, probe, efx->net_dev,
676 "creating RX queue %d size %#x mask %#x\n",
677 efx_rx_queue_index(rx_queue), efx->rxq_entries,
678 rx_queue->ptr_mask);
679
680
681 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
682 GFP_KERNEL);
683 if (!rx_queue->buffer)
684 return -ENOMEM;
685
686 rc = efx_nic_probe_rx(rx_queue);
687 if (rc) {
688 kfree(rx_queue->buffer);
689 rx_queue->buffer = NULL;
690 }
691
692 return rc;
693}
694
695static void efx_init_rx_recycle_ring(struct efx_nic *efx,
696 struct efx_rx_queue *rx_queue)
697{
698 unsigned int bufs_in_recycle_ring, page_ring_size;
699
700
701#ifdef CONFIG_PPC64
702 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
703#else
704 if (iommu_present(&pci_bus_type))
705 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
706 else
707 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
708#endif
709
710 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
711 efx->rx_bufs_per_page);
712 rx_queue->page_ring = kcalloc(page_ring_size,
713 sizeof(*rx_queue->page_ring), GFP_KERNEL);
714 rx_queue->page_ptr_mask = page_ring_size - 1;
715}
716
717void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
718{
719 struct efx_nic *efx = rx_queue->efx;
720 unsigned int max_fill, trigger, max_trigger;
721
722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
723 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
724
725
726 rx_queue->added_count = 0;
727 rx_queue->notified_count = 0;
728 rx_queue->removed_count = 0;
729 rx_queue->min_fill = -1U;
730 efx_init_rx_recycle_ring(efx, rx_queue);
731
732 rx_queue->page_remove = 0;
733 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
734 rx_queue->page_recycle_count = 0;
735 rx_queue->page_recycle_failed = 0;
736 rx_queue->page_recycle_full = 0;
737
738
739 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
740 max_trigger =
741 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
742 if (rx_refill_threshold != 0) {
743 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
744 if (trigger > max_trigger)
745 trigger = max_trigger;
746 } else {
747 trigger = max_trigger;
748 }
749
750 rx_queue->max_fill = max_fill;
751 rx_queue->fast_fill_trigger = trigger;
752 rx_queue->refill_enabled = true;
753
754
755 efx_nic_init_rx(rx_queue);
756}
757
758void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
759{
760 int i;
761 struct efx_nic *efx = rx_queue->efx;
762 struct efx_rx_buffer *rx_buf;
763
764 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
765 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
766
767 del_timer_sync(&rx_queue->slow_fill);
768
769
770 if (rx_queue->buffer) {
771 for (i = rx_queue->removed_count; i < rx_queue->added_count;
772 i++) {
773 unsigned index = i & rx_queue->ptr_mask;
774 rx_buf = efx_rx_buffer(rx_queue, index);
775 efx_fini_rx_buffer(rx_queue, rx_buf);
776 }
777 }
778
779
780 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
781 struct page *page = rx_queue->page_ring[i];
782 struct efx_rx_page_state *state;
783
784 if (page == NULL)
785 continue;
786
787 state = page_address(page);
788 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
789 PAGE_SIZE << efx->rx_buffer_order,
790 DMA_FROM_DEVICE);
791 put_page(page);
792 }
793 kfree(rx_queue->page_ring);
794 rx_queue->page_ring = NULL;
795}
796
797void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
798{
799 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
800 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
801
802 efx_nic_remove_rx(rx_queue);
803
804 kfree(rx_queue->buffer);
805 rx_queue->buffer = NULL;
806}
807
808
809module_param(rx_refill_threshold, uint, 0444);
810MODULE_PARM_DESC(rx_refill_threshold,
811 "RX descriptor ring refill threshold (%)");
812
813#ifdef CONFIG_RFS_ACCEL
814
815int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
816 u16 rxq_index, u32 flow_id)
817{
818 struct efx_nic *efx = netdev_priv(net_dev);
819 struct efx_channel *channel;
820 struct efx_filter_spec spec;
821 const struct iphdr *ip;
822 const __be16 *ports;
823 int nhoff;
824 int rc;
825
826 nhoff = skb_network_offset(skb);
827
828 if (skb->protocol == htons(ETH_P_8021Q)) {
829 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
830 nhoff + sizeof(struct vlan_hdr));
831 if (((const struct vlan_hdr *)skb->data + nhoff)->
832 h_vlan_encapsulated_proto != htons(ETH_P_IP))
833 return -EPROTONOSUPPORT;
834
835
836
837
838
839 nhoff += sizeof(struct vlan_hdr);
840 } else if (skb->protocol != htons(ETH_P_IP)) {
841 return -EPROTONOSUPPORT;
842 }
843
844
845 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
846 ip = (const struct iphdr *)(skb->data + nhoff);
847 if (ip_is_fragment(ip))
848 return -EPROTONOSUPPORT;
849 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
850 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
851
852 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
853 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
854 rxq_index);
855 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
856 ip->daddr, ports[1], ip->saddr, ports[0]);
857 if (rc)
858 return rc;
859
860 rc = efx->type->filter_rfs_insert(efx, &spec);
861 if (rc < 0)
862 return rc;
863
864
865 efx->rps_flow_id[rc] = flow_id;
866 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
867 ++channel->rfs_filters_added;
868
869 netif_info(efx, rx_status, efx->net_dev,
870 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
871 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
872 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
873 rxq_index, flow_id, rc);
874
875 return rc;
876}
877
878bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
879{
880 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
881 unsigned int index, size;
882 u32 flow_id;
883
884 if (!spin_trylock_bh(&efx->filter_lock))
885 return false;
886
887 expire_one = efx->type->filter_rfs_expire_one;
888 index = efx->rps_expire_index;
889 size = efx->type->max_rx_ip_filters;
890 while (quota--) {
891 flow_id = efx->rps_flow_id[index];
892 if (expire_one(efx, flow_id, index))
893 netif_info(efx, rx_status, efx->net_dev,
894 "expired filter %d [flow %u]\n",
895 index, flow_id);
896 if (++index == size)
897 index = 0;
898 }
899 efx->rps_expire_index = index;
900
901 spin_unlock_bh(&efx->filter_lock);
902 return true;
903}
904
905#endif
906
907
908
909
910
911
912
913
914
915
916bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
917{
918 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
919 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
920 return false;
921
922 if (spec->match_flags &
923 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
924 is_multicast_ether_addr(spec->loc_mac))
925 return true;
926
927 if ((spec->match_flags &
928 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
929 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
930 if (spec->ether_type == htons(ETH_P_IP) &&
931 ipv4_is_multicast(spec->loc_host[0]))
932 return true;
933 if (spec->ether_type == htons(ETH_P_IPV6) &&
934 ((const u8 *)spec->loc_host)[0] == 0xff)
935 return true;
936 }
937
938 return false;
939}
940