1
2
3
4#include "enetc.h"
5#include <linux/tcp.h>
6#include <linux/udp.h>
7#include <linux/of_mdio.h>
8#include <linux/vmalloc.h>
9
10
11#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
12
13#define ENETC_MAX_SKB_FRAGS 13
14#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
15
16static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
17 int active_offloads);
18
19netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
20{
21 struct enetc_ndev_priv *priv = netdev_priv(ndev);
22 struct enetc_bdr *tx_ring;
23 int count;
24
25 tx_ring = priv->tx_ring[skb->queue_mapping];
26
27 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
28 if (unlikely(skb_linearize(skb)))
29 goto drop_packet_err;
30
31 count = skb_shinfo(skb)->nr_frags + 1;
32 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
33 netif_stop_subqueue(ndev, tx_ring->index);
34 return NETDEV_TX_BUSY;
35 }
36
37 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
38 if (unlikely(!count))
39 goto drop_packet_err;
40
41 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
42 netif_stop_subqueue(ndev, tx_ring->index);
43
44 return NETDEV_TX_OK;
45
46drop_packet_err:
47 dev_kfree_skb_any(skb);
48 return NETDEV_TX_OK;
49}
50
51static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
52{
53 int l3_start, l3_hsize;
54 u16 l3_flags, l4_flags;
55
56 if (skb->ip_summed != CHECKSUM_PARTIAL)
57 return false;
58
59 switch (skb->csum_offset) {
60 case offsetof(struct tcphdr, check):
61 l4_flags = ENETC_TXBD_L4_TCP;
62 break;
63 case offsetof(struct udphdr, check):
64 l4_flags = ENETC_TXBD_L4_UDP;
65 break;
66 default:
67 skb_checksum_help(skb);
68 return false;
69 }
70
71 l3_start = skb_network_offset(skb);
72 l3_hsize = skb_network_header_len(skb);
73
74 l3_flags = 0;
75 if (skb->protocol == htons(ETH_P_IPV6))
76 l3_flags = ENETC_TXBD_L3_IPV6;
77
78
79 txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
80 txbd->l4_csoff = l4_flags;
81
82 return true;
83}
84
85static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
86 struct enetc_tx_swbd *tx_swbd)
87{
88 if (tx_swbd->is_dma_page)
89 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
90 tx_swbd->len, DMA_TO_DEVICE);
91 else
92 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
93 tx_swbd->len, DMA_TO_DEVICE);
94 tx_swbd->dma = 0;
95}
96
97static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
98 struct enetc_tx_swbd *tx_swbd)
99{
100 if (tx_swbd->dma)
101 enetc_unmap_tx_buff(tx_ring, tx_swbd);
102
103 if (tx_swbd->skb) {
104 dev_kfree_skb_any(tx_swbd->skb);
105 tx_swbd->skb = NULL;
106 }
107}
108
109static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
110 int active_offloads)
111{
112 struct enetc_tx_swbd *tx_swbd;
113 skb_frag_t *frag;
114 int len = skb_headlen(skb);
115 union enetc_tx_bd temp_bd;
116 union enetc_tx_bd *txbd;
117 bool do_vlan, do_tstamp;
118 int i, count = 0;
119 unsigned int f;
120 dma_addr_t dma;
121 u8 flags = 0;
122
123 i = tx_ring->next_to_use;
124 txbd = ENETC_TXBD(*tx_ring, i);
125 prefetchw(txbd);
126
127 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
128 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
129 goto dma_err;
130
131 temp_bd.addr = cpu_to_le64(dma);
132 temp_bd.buf_len = cpu_to_le16(len);
133 temp_bd.lstatus = 0;
134
135 tx_swbd = &tx_ring->tx_swbd[i];
136 tx_swbd->dma = dma;
137 tx_swbd->len = len;
138 tx_swbd->is_dma_page = 0;
139 count++;
140
141 do_vlan = skb_vlan_tag_present(skb);
142 do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
143 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
144 tx_swbd->do_tstamp = do_tstamp;
145 tx_swbd->check_wb = tx_swbd->do_tstamp;
146
147 if (do_vlan || do_tstamp)
148 flags |= ENETC_TXBD_FLAGS_EX;
149
150 if (enetc_tx_csum(skb, &temp_bd))
151 flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
152 else if (tx_ring->tsd_enable)
153 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
154
155
156 temp_bd.frm_len = cpu_to_le16(skb->len);
157 temp_bd.flags = flags;
158
159 if (flags & ENETC_TXBD_FLAGS_TSE) {
160 u32 temp;
161
162 temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
163 | (flags << ENETC_TXBD_FLAGS_OFFSET);
164 temp_bd.txstart = cpu_to_le32(temp);
165 }
166
167 if (flags & ENETC_TXBD_FLAGS_EX) {
168 u8 e_flags = 0;
169 *txbd = temp_bd;
170 enetc_clear_tx_bd(&temp_bd);
171
172
173 flags = 0;
174 tx_swbd++;
175 txbd++;
176 i++;
177 if (unlikely(i == tx_ring->bd_count)) {
178 i = 0;
179 tx_swbd = tx_ring->tx_swbd;
180 txbd = ENETC_TXBD(*tx_ring, 0);
181 }
182 prefetchw(txbd);
183
184 if (do_vlan) {
185 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
186 temp_bd.ext.tpid = 0;
187 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
188 }
189
190 if (do_tstamp) {
191 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
192 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
193 }
194
195 temp_bd.ext.e_flags = e_flags;
196 count++;
197 }
198
199 frag = &skb_shinfo(skb)->frags[0];
200 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
201 len = skb_frag_size(frag);
202 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
203 DMA_TO_DEVICE);
204 if (dma_mapping_error(tx_ring->dev, dma))
205 goto dma_err;
206
207 *txbd = temp_bd;
208 enetc_clear_tx_bd(&temp_bd);
209
210 flags = 0;
211 tx_swbd++;
212 txbd++;
213 i++;
214 if (unlikely(i == tx_ring->bd_count)) {
215 i = 0;
216 tx_swbd = tx_ring->tx_swbd;
217 txbd = ENETC_TXBD(*tx_ring, 0);
218 }
219 prefetchw(txbd);
220
221 temp_bd.addr = cpu_to_le64(dma);
222 temp_bd.buf_len = cpu_to_le16(len);
223
224 tx_swbd->dma = dma;
225 tx_swbd->len = len;
226 tx_swbd->is_dma_page = 1;
227 count++;
228 }
229
230
231 flags |= ENETC_TXBD_FLAGS_F;
232 temp_bd.flags = flags;
233 *txbd = temp_bd;
234
235 tx_ring->tx_swbd[i].skb = skb;
236
237 enetc_bdr_idx_inc(tx_ring, &i);
238 tx_ring->next_to_use = i;
239
240 skb_tx_timestamp(skb);
241
242
243 enetc_wr_reg(tx_ring->tpir, i);
244
245 return count;
246
247dma_err:
248 dev_err(tx_ring->dev, "DMA map error");
249
250 do {
251 tx_swbd = &tx_ring->tx_swbd[i];
252 enetc_free_tx_skb(tx_ring, tx_swbd);
253 if (i == 0)
254 i = tx_ring->bd_count;
255 i--;
256 } while (count--);
257
258 return 0;
259}
260
261static irqreturn_t enetc_msix(int irq, void *data)
262{
263 struct enetc_int_vector *v = data;
264 int i;
265
266
267 enetc_wr_reg(v->rbier, 0);
268 enetc_wr_reg(v->ricr1, v->rx_ictt);
269
270 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
271 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
272
273 napi_schedule(&v->napi);
274
275 return IRQ_HANDLED;
276}
277
278static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
279static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
280 struct napi_struct *napi, int work_limit);
281
282static void enetc_rx_dim_work(struct work_struct *w)
283{
284 struct dim *dim = container_of(w, struct dim, work);
285 struct dim_cq_moder moder =
286 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
287 struct enetc_int_vector *v =
288 container_of(dim, struct enetc_int_vector, rx_dim);
289
290 v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
291 dim->state = DIM_START_MEASURE;
292}
293
294static void enetc_rx_net_dim(struct enetc_int_vector *v)
295{
296 struct dim_sample dim_sample;
297
298 v->comp_cnt++;
299
300 if (!v->rx_napi_work)
301 return;
302
303 dim_update_sample(v->comp_cnt,
304 v->rx_ring.stats.packets,
305 v->rx_ring.stats.bytes,
306 &dim_sample);
307 net_dim(&v->rx_dim, dim_sample);
308}
309
310static int enetc_poll(struct napi_struct *napi, int budget)
311{
312 struct enetc_int_vector
313 *v = container_of(napi, struct enetc_int_vector, napi);
314 bool complete = true;
315 int work_done;
316 int i;
317
318 for (i = 0; i < v->count_tx_rings; i++)
319 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
320 complete = false;
321
322 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
323 if (work_done == budget)
324 complete = false;
325 if (work_done)
326 v->rx_napi_work = true;
327
328 if (!complete)
329 return budget;
330
331 napi_complete_done(napi, work_done);
332
333 if (likely(v->rx_dim_en))
334 enetc_rx_net_dim(v);
335
336 v->rx_napi_work = false;
337
338
339 enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
340
341 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
342 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
343 ENETC_TBIER_TXTIE);
344
345 return work_done;
346}
347
348static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
349{
350 int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
351
352 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
353}
354
355static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
356 u64 *tstamp)
357{
358 u32 lo, hi, tstamp_lo;
359
360 lo = enetc_rd(hw, ENETC_SICTR0);
361 hi = enetc_rd(hw, ENETC_SICTR1);
362 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
363 if (lo <= tstamp_lo)
364 hi -= 1;
365 *tstamp = (u64)hi << 32 | tstamp_lo;
366}
367
368static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
369{
370 struct skb_shared_hwtstamps shhwtstamps;
371
372 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
373 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
374 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
375 skb_tstamp_tx(skb, &shhwtstamps);
376 }
377}
378
379static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
380{
381 struct net_device *ndev = tx_ring->ndev;
382 int tx_frm_cnt = 0, tx_byte_cnt = 0;
383 struct enetc_tx_swbd *tx_swbd;
384 int i, bds_to_clean;
385 bool do_tstamp;
386 u64 tstamp = 0;
387
388 i = tx_ring->next_to_clean;
389 tx_swbd = &tx_ring->tx_swbd[i];
390 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
391
392 do_tstamp = false;
393
394 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
395 bool is_eof = !!tx_swbd->skb;
396
397 if (unlikely(tx_swbd->check_wb)) {
398 struct enetc_ndev_priv *priv = netdev_priv(ndev);
399 union enetc_tx_bd *txbd;
400
401 txbd = ENETC_TXBD(*tx_ring, i);
402
403 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
404 tx_swbd->do_tstamp) {
405 enetc_get_tx_tstamp(&priv->si->hw, txbd,
406 &tstamp);
407 do_tstamp = true;
408 }
409 }
410
411 if (likely(tx_swbd->dma))
412 enetc_unmap_tx_buff(tx_ring, tx_swbd);
413
414 if (is_eof) {
415 if (unlikely(do_tstamp)) {
416 enetc_tstamp_tx(tx_swbd->skb, tstamp);
417 do_tstamp = false;
418 }
419 napi_consume_skb(tx_swbd->skb, napi_budget);
420 tx_swbd->skb = NULL;
421 }
422
423 tx_byte_cnt += tx_swbd->len;
424
425 bds_to_clean--;
426 tx_swbd++;
427 i++;
428 if (unlikely(i == tx_ring->bd_count)) {
429 i = 0;
430 tx_swbd = tx_ring->tx_swbd;
431 }
432
433
434 if (is_eof) {
435 tx_frm_cnt++;
436
437 enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
438 BIT(16 + tx_ring->index));
439 }
440
441 if (unlikely(!bds_to_clean))
442 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
443 }
444
445 tx_ring->next_to_clean = i;
446 tx_ring->stats.packets += tx_frm_cnt;
447 tx_ring->stats.bytes += tx_byte_cnt;
448
449 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
450 __netif_subqueue_stopped(ndev, tx_ring->index) &&
451 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
452 netif_wake_subqueue(ndev, tx_ring->index);
453 }
454
455 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
456}
457
458static bool enetc_new_page(struct enetc_bdr *rx_ring,
459 struct enetc_rx_swbd *rx_swbd)
460{
461 struct page *page;
462 dma_addr_t addr;
463
464 page = dev_alloc_page();
465 if (unlikely(!page))
466 return false;
467
468 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
469 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
470 __free_page(page);
471
472 return false;
473 }
474
475 rx_swbd->dma = addr;
476 rx_swbd->page = page;
477 rx_swbd->page_offset = ENETC_RXB_PAD;
478
479 return true;
480}
481
482static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
483{
484 struct enetc_rx_swbd *rx_swbd;
485 union enetc_rx_bd *rxbd;
486 int i, j;
487
488 i = rx_ring->next_to_use;
489 rx_swbd = &rx_ring->rx_swbd[i];
490 rxbd = enetc_rxbd(rx_ring, i);
491
492 for (j = 0; j < buff_cnt; j++) {
493
494 if (unlikely(!rx_swbd->page)) {
495 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
496 rx_ring->stats.rx_alloc_errs++;
497 break;
498 }
499 }
500
501
502 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
503 rx_swbd->page_offset);
504
505 rxbd->r.lstatus = 0;
506
507 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
508 rx_swbd++;
509 i++;
510 if (unlikely(i == rx_ring->bd_count)) {
511 i = 0;
512 rx_swbd = rx_ring->rx_swbd;
513 }
514 }
515
516 if (likely(j)) {
517 rx_ring->next_to_alloc = i;
518 rx_ring->next_to_use = i;
519
520 enetc_wr_reg(rx_ring->rcir, i);
521 }
522
523 return j;
524}
525
526#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
527static void enetc_get_rx_tstamp(struct net_device *ndev,
528 union enetc_rx_bd *rxbd,
529 struct sk_buff *skb)
530{
531 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
532 struct enetc_ndev_priv *priv = netdev_priv(ndev);
533 struct enetc_hw *hw = &priv->si->hw;
534 u32 lo, hi, tstamp_lo;
535 u64 tstamp;
536
537 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
538 lo = enetc_rd(hw, ENETC_SICTR0);
539 hi = enetc_rd(hw, ENETC_SICTR1);
540 rxbd = enetc_rxbd_ext(rxbd);
541 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
542 if (lo <= tstamp_lo)
543 hi -= 1;
544
545 tstamp = (u64)hi << 32 | tstamp_lo;
546 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
547 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
548 }
549}
550#endif
551
552static void enetc_get_offloads(struct enetc_bdr *rx_ring,
553 union enetc_rx_bd *rxbd, struct sk_buff *skb)
554{
555#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
556 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
557#endif
558
559 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
560 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
561
562 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
563 skb->ip_summed = CHECKSUM_COMPLETE;
564 }
565
566
567
568
569 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
570 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
571 le16_to_cpu(rxbd->r.vlan_opt));
572#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
573 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
574 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
575#endif
576}
577
578static void enetc_process_skb(struct enetc_bdr *rx_ring,
579 struct sk_buff *skb)
580{
581 skb_record_rx_queue(skb, rx_ring->index);
582 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
583}
584
585static bool enetc_page_reusable(struct page *page)
586{
587 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
588}
589
590static void enetc_reuse_page(struct enetc_bdr *rx_ring,
591 struct enetc_rx_swbd *old)
592{
593 struct enetc_rx_swbd *new;
594
595 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
596
597
598 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
599
600
601 *new = *old;
602}
603
604static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
605 int i, u16 size)
606{
607 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
608
609 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
610 rx_swbd->page_offset,
611 size, DMA_FROM_DEVICE);
612 return rx_swbd;
613}
614
615static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
616 struct enetc_rx_swbd *rx_swbd)
617{
618 if (likely(enetc_page_reusable(rx_swbd->page))) {
619 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
620 page_ref_inc(rx_swbd->page);
621
622 enetc_reuse_page(rx_ring, rx_swbd);
623
624
625 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
626 rx_swbd->page_offset,
627 ENETC_RXB_DMA_SIZE,
628 DMA_FROM_DEVICE);
629 } else {
630 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
631 PAGE_SIZE, DMA_FROM_DEVICE);
632 }
633
634 rx_swbd->page = NULL;
635}
636
637static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
638 int i, u16 size)
639{
640 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
641 struct sk_buff *skb;
642 void *ba;
643
644 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
645 skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
646 if (unlikely(!skb)) {
647 rx_ring->stats.rx_alloc_errs++;
648 return NULL;
649 }
650
651 skb_reserve(skb, ENETC_RXB_PAD);
652 __skb_put(skb, size);
653
654 enetc_put_rx_buff(rx_ring, rx_swbd);
655
656 return skb;
657}
658
659static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
660 u16 size, struct sk_buff *skb)
661{
662 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
663
664 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
665 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
666
667 enetc_put_rx_buff(rx_ring, rx_swbd);
668}
669
670#define ENETC_RXBD_BUNDLE 16
671
672static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
673 struct napi_struct *napi, int work_limit)
674{
675 int rx_frm_cnt = 0, rx_byte_cnt = 0;
676 int cleaned_cnt, i;
677
678 cleaned_cnt = enetc_bd_unused(rx_ring);
679
680 i = rx_ring->next_to_clean;
681
682 while (likely(rx_frm_cnt < work_limit)) {
683 union enetc_rx_bd *rxbd;
684 struct sk_buff *skb;
685 u32 bd_status;
686 u16 size;
687
688 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
689 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
690
691 cleaned_cnt -= count;
692 }
693
694 rxbd = enetc_rxbd(rx_ring, i);
695 bd_status = le32_to_cpu(rxbd->r.lstatus);
696 if (!bd_status)
697 break;
698
699 enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
700 dma_rmb();
701 size = le16_to_cpu(rxbd->r.buf_len);
702 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
703 if (!skb)
704 break;
705
706 enetc_get_offloads(rx_ring, rxbd, skb);
707
708 cleaned_cnt++;
709
710 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
711 if (unlikely(++i == rx_ring->bd_count))
712 i = 0;
713
714 if (unlikely(bd_status &
715 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
716 dev_kfree_skb(skb);
717 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
718 dma_rmb();
719 bd_status = le32_to_cpu(rxbd->r.lstatus);
720
721 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
722 if (unlikely(++i == rx_ring->bd_count))
723 i = 0;
724 }
725
726 rx_ring->ndev->stats.rx_dropped++;
727 rx_ring->ndev->stats.rx_errors++;
728
729 break;
730 }
731
732
733 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
734 bd_status = le32_to_cpu(rxbd->r.lstatus);
735 size = ENETC_RXB_DMA_SIZE;
736
737 if (bd_status & ENETC_RXBD_LSTATUS_F) {
738 dma_rmb();
739 size = le16_to_cpu(rxbd->r.buf_len);
740 }
741
742 enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
743
744 cleaned_cnt++;
745
746 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
747 if (unlikely(++i == rx_ring->bd_count))
748 i = 0;
749 }
750
751 rx_byte_cnt += skb->len;
752
753 enetc_process_skb(rx_ring, skb);
754
755 napi_gro_receive(napi, skb);
756
757 rx_frm_cnt++;
758 }
759
760 rx_ring->next_to_clean = i;
761
762 rx_ring->stats.packets += rx_frm_cnt;
763 rx_ring->stats.bytes += rx_byte_cnt;
764
765 return rx_frm_cnt;
766}
767
768
769#define ENETC_MAX_RFS_SIZE 64
770void enetc_get_si_caps(struct enetc_si *si)
771{
772 struct enetc_hw *hw = &si->hw;
773 u32 val;
774
775
776 val = enetc_rd(hw, ENETC_SICAPR0);
777 si->num_rx_rings = (val >> 16) & 0xff;
778 si->num_tx_rings = val & 0xff;
779
780 val = enetc_rd(hw, ENETC_SIRFSCAPR);
781 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
782 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
783
784 si->num_rss = 0;
785 val = enetc_rd(hw, ENETC_SIPCAPR0);
786 if (val & ENETC_SIPCAPR0_RSS) {
787 u32 rss;
788
789 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
790 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
791 }
792
793 if (val & ENETC_SIPCAPR0_QBV)
794 si->hw_features |= ENETC_SI_F_QBV;
795
796 if (val & ENETC_SIPCAPR0_PSFP)
797 si->hw_features |= ENETC_SI_F_PSFP;
798}
799
800static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
801{
802 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
803 &r->bd_dma_base, GFP_KERNEL);
804 if (!r->bd_base)
805 return -ENOMEM;
806
807
808 if (!IS_ALIGNED(r->bd_dma_base, 128)) {
809 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
810 r->bd_dma_base);
811 return -EINVAL;
812 }
813
814 return 0;
815}
816
817static int enetc_alloc_txbdr(struct enetc_bdr *txr)
818{
819 int err;
820
821 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
822 if (!txr->tx_swbd)
823 return -ENOMEM;
824
825 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
826 if (err) {
827 vfree(txr->tx_swbd);
828 return err;
829 }
830
831 txr->next_to_clean = 0;
832 txr->next_to_use = 0;
833
834 return 0;
835}
836
837static void enetc_free_txbdr(struct enetc_bdr *txr)
838{
839 int size, i;
840
841 for (i = 0; i < txr->bd_count; i++)
842 enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
843
844 size = txr->bd_count * sizeof(union enetc_tx_bd);
845
846 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
847 txr->bd_base = NULL;
848
849 vfree(txr->tx_swbd);
850 txr->tx_swbd = NULL;
851}
852
853static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
854{
855 int i, err;
856
857 for (i = 0; i < priv->num_tx_rings; i++) {
858 err = enetc_alloc_txbdr(priv->tx_ring[i]);
859
860 if (err)
861 goto fail;
862 }
863
864 return 0;
865
866fail:
867 while (i-- > 0)
868 enetc_free_txbdr(priv->tx_ring[i]);
869
870 return err;
871}
872
873static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
874{
875 int i;
876
877 for (i = 0; i < priv->num_tx_rings; i++)
878 enetc_free_txbdr(priv->tx_ring[i]);
879}
880
881static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
882{
883 size_t size = sizeof(union enetc_rx_bd);
884 int err;
885
886 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
887 if (!rxr->rx_swbd)
888 return -ENOMEM;
889
890 if (extended)
891 size *= 2;
892
893 err = enetc_dma_alloc_bdr(rxr, size);
894 if (err) {
895 vfree(rxr->rx_swbd);
896 return err;
897 }
898
899 rxr->next_to_clean = 0;
900 rxr->next_to_use = 0;
901 rxr->next_to_alloc = 0;
902 rxr->ext_en = extended;
903
904 return 0;
905}
906
907static void enetc_free_rxbdr(struct enetc_bdr *rxr)
908{
909 int size;
910
911 size = rxr->bd_count * sizeof(union enetc_rx_bd);
912
913 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
914 rxr->bd_base = NULL;
915
916 vfree(rxr->rx_swbd);
917 rxr->rx_swbd = NULL;
918}
919
920static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
921{
922 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
923 int i, err;
924
925 for (i = 0; i < priv->num_rx_rings; i++) {
926 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
927
928 if (err)
929 goto fail;
930 }
931
932 return 0;
933
934fail:
935 while (i-- > 0)
936 enetc_free_rxbdr(priv->rx_ring[i]);
937
938 return err;
939}
940
941static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
942{
943 int i;
944
945 for (i = 0; i < priv->num_rx_rings; i++)
946 enetc_free_rxbdr(priv->rx_ring[i]);
947}
948
949static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
950{
951 int i;
952
953 if (!tx_ring->tx_swbd)
954 return;
955
956 for (i = 0; i < tx_ring->bd_count; i++) {
957 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
958
959 enetc_free_tx_skb(tx_ring, tx_swbd);
960 }
961
962 tx_ring->next_to_clean = 0;
963 tx_ring->next_to_use = 0;
964}
965
966static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
967{
968 int i;
969
970 if (!rx_ring->rx_swbd)
971 return;
972
973 for (i = 0; i < rx_ring->bd_count; i++) {
974 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
975
976 if (!rx_swbd->page)
977 continue;
978
979 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
980 PAGE_SIZE, DMA_FROM_DEVICE);
981 __free_page(rx_swbd->page);
982 rx_swbd->page = NULL;
983 }
984
985 rx_ring->next_to_clean = 0;
986 rx_ring->next_to_use = 0;
987 rx_ring->next_to_alloc = 0;
988}
989
990static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
991{
992 int i;
993
994 for (i = 0; i < priv->num_rx_rings; i++)
995 enetc_free_rx_ring(priv->rx_ring[i]);
996
997 for (i = 0; i < priv->num_tx_rings; i++)
998 enetc_free_tx_ring(priv->tx_ring[i]);
999}
1000
1001static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1002{
1003 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1004
1005 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
1006 GFP_KERNEL);
1007 if (!cbdr->bd_base)
1008 return -ENOMEM;
1009
1010
1011 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
1012 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1013 return -EINVAL;
1014 }
1015
1016 cbdr->next_to_clean = 0;
1017 cbdr->next_to_use = 0;
1018
1019 return 0;
1020}
1021
1022static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1023{
1024 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1025
1026 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1027 cbdr->bd_base = NULL;
1028}
1029
1030static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1031{
1032
1033 enetc_wr(hw, ENETC_SICAR2,
1034 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1035
1036 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
1037 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
1038 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
1039
1040 enetc_wr(hw, ENETC_SICBDRPIR, 0);
1041 enetc_wr(hw, ENETC_SICBDRCIR, 0);
1042
1043
1044 enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
1045
1046 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
1047 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
1048}
1049
1050static void enetc_clear_cbdr(struct enetc_hw *hw)
1051{
1052 enetc_wr(hw, ENETC_SICBDRMR, 0);
1053}
1054
1055static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1056{
1057 int *rss_table;
1058 int i;
1059
1060 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1061 if (!rss_table)
1062 return -ENOMEM;
1063
1064
1065 for (i = 0; i < si->num_rss; i++)
1066 rss_table[i] = i % num_groups;
1067
1068 enetc_set_rss_table(si, rss_table, si->num_rss);
1069
1070 kfree(rss_table);
1071
1072 return 0;
1073}
1074
1075static int enetc_configure_si(struct enetc_ndev_priv *priv)
1076{
1077 struct enetc_si *si = priv->si;
1078 struct enetc_hw *hw = &si->hw;
1079 int err;
1080
1081 enetc_setup_cbdr(hw, &si->cbd_ring);
1082
1083 enetc_wr(hw, ENETC_SICAR0,
1084 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1085 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1086
1087 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1088
1089 if (si->num_rss) {
1090 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1091 if (err)
1092 return err;
1093 }
1094
1095 return 0;
1096}
1097
1098void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1099{
1100 struct enetc_si *si = priv->si;
1101 int cpus = num_online_cpus();
1102
1103 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1104 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1105
1106
1107
1108
1109
1110 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1111 priv->num_tx_rings = si->num_tx_rings;
1112 priv->bdr_int_num = cpus;
1113 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1114 priv->tx_ictt = ENETC_TXIC_TIMETHR;
1115
1116
1117 si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
1118}
1119
1120int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1121{
1122 struct enetc_si *si = priv->si;
1123 int err;
1124
1125 err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
1126 if (err)
1127 return err;
1128
1129 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1130 GFP_KERNEL);
1131 if (!priv->cls_rules) {
1132 err = -ENOMEM;
1133 goto err_alloc_cls;
1134 }
1135
1136 err = enetc_configure_si(priv);
1137 if (err)
1138 goto err_config_si;
1139
1140 return 0;
1141
1142err_config_si:
1143 kfree(priv->cls_rules);
1144err_alloc_cls:
1145 enetc_clear_cbdr(&si->hw);
1146 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1147
1148 return err;
1149}
1150
1151void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1152{
1153 struct enetc_si *si = priv->si;
1154
1155 enetc_clear_cbdr(&si->hw);
1156 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1157
1158 kfree(priv->cls_rules);
1159}
1160
1161static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1162{
1163 int idx = tx_ring->index;
1164 u32 tbmr;
1165
1166 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1167 lower_32_bits(tx_ring->bd_dma_base));
1168
1169 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1170 upper_32_bits(tx_ring->bd_dma_base));
1171
1172 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64));
1173 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1174 ENETC_RTBLENR_LEN(tx_ring->bd_count));
1175
1176
1177 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1178 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1179
1180
1181 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1182
1183 tbmr = ENETC_TBMR_EN;
1184 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1185 tbmr |= ENETC_TBMR_VIH;
1186
1187
1188 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1189
1190 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1191 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1192 tx_ring->idr = hw->reg + ENETC_SITXIDR;
1193}
1194
1195static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1196{
1197 int idx = rx_ring->index;
1198 u32 rbmr;
1199
1200 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1201 lower_32_bits(rx_ring->bd_dma_base));
1202
1203 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1204 upper_32_bits(rx_ring->bd_dma_base));
1205
1206 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64));
1207 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1208 ENETC_RTBLENR_LEN(rx_ring->bd_count));
1209
1210 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1211
1212 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1213
1214
1215 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1216
1217 rbmr = ENETC_RBMR_EN;
1218
1219 if (rx_ring->ext_en)
1220 rbmr |= ENETC_RBMR_BDS;
1221
1222 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1223 rbmr |= ENETC_RBMR_VTE;
1224
1225 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1226 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1227
1228 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1229
1230
1231 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1232}
1233
1234static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1235{
1236 int i;
1237
1238 for (i = 0; i < priv->num_tx_rings; i++)
1239 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1240
1241 for (i = 0; i < priv->num_rx_rings; i++)
1242 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1243}
1244
1245static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1246{
1247 int idx = rx_ring->index;
1248
1249
1250 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1251}
1252
1253static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1254{
1255 int delay = 8, timeout = 100;
1256 int idx = tx_ring->index;
1257
1258
1259 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1260
1261
1262 while (delay < timeout &&
1263 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1264 msleep(delay);
1265 delay *= 2;
1266 }
1267
1268 if (delay >= timeout)
1269 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1270 idx);
1271}
1272
1273static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1274{
1275 int i;
1276
1277 for (i = 0; i < priv->num_tx_rings; i++)
1278 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1279
1280 for (i = 0; i < priv->num_rx_rings; i++)
1281 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1282
1283 udelay(1);
1284}
1285
1286static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1287{
1288 struct pci_dev *pdev = priv->si->pdev;
1289 cpumask_t cpu_mask;
1290 int i, j, err;
1291
1292 for (i = 0; i < priv->bdr_int_num; i++) {
1293 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1294 struct enetc_int_vector *v = priv->int_vector[i];
1295 int entry = ENETC_BDR_INT_BASE_IDX + i;
1296 struct enetc_hw *hw = &priv->si->hw;
1297
1298 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1299 priv->ndev->name, i);
1300 err = request_irq(irq, enetc_msix, 0, v->name, v);
1301 if (err) {
1302 dev_err(priv->dev, "request_irq() failed!\n");
1303 goto irq_err;
1304 }
1305 disable_irq(irq);
1306
1307 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1308 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1309 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1310
1311 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1312
1313 for (j = 0; j < v->count_tx_rings; j++) {
1314 int idx = v->tx_ring[j].index;
1315
1316 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1317 }
1318 cpumask_clear(&cpu_mask);
1319 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1320 irq_set_affinity_hint(irq, &cpu_mask);
1321 }
1322
1323 return 0;
1324
1325irq_err:
1326 while (i--) {
1327 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1328
1329 irq_set_affinity_hint(irq, NULL);
1330 free_irq(irq, priv->int_vector[i]);
1331 }
1332
1333 return err;
1334}
1335
1336static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1337{
1338 struct pci_dev *pdev = priv->si->pdev;
1339 int i;
1340
1341 for (i = 0; i < priv->bdr_int_num; i++) {
1342 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1343
1344 irq_set_affinity_hint(irq, NULL);
1345 free_irq(irq, priv->int_vector[i]);
1346 }
1347}
1348
1349static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1350{
1351 struct enetc_hw *hw = &priv->si->hw;
1352 u32 icpt, ictt;
1353 int i;
1354
1355
1356 if (priv->ic_mode &
1357 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1358 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1359
1360 ictt = 0x1;
1361 } else {
1362 icpt = 0x1;
1363 ictt = 0;
1364 }
1365
1366 for (i = 0; i < priv->num_rx_rings; i++) {
1367 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1368 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1369 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1370 }
1371
1372 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1373 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1374 else
1375 icpt = 0x1;
1376
1377 for (i = 0; i < priv->num_tx_rings; i++) {
1378 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1379 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1380 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1381 }
1382}
1383
1384static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1385{
1386 int i;
1387
1388 for (i = 0; i < priv->num_tx_rings; i++)
1389 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1390
1391 for (i = 0; i < priv->num_rx_rings; i++)
1392 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1393}
1394
1395static void adjust_link(struct net_device *ndev)
1396{
1397 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1398 struct phy_device *phydev = ndev->phydev;
1399
1400 if (priv->active_offloads & ENETC_F_QBV)
1401 enetc_sched_speed_set(ndev);
1402
1403 phy_print_status(phydev);
1404}
1405
1406static int enetc_phy_connect(struct net_device *ndev)
1407{
1408 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1409 struct phy_device *phydev;
1410 struct ethtool_eee edata;
1411
1412 if (!priv->phy_node)
1413 return 0;
1414
1415 phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
1416 0, priv->if_mode);
1417 if (!phydev) {
1418 dev_err(&ndev->dev, "could not attach to PHY\n");
1419 return -ENODEV;
1420 }
1421
1422 phy_attached_info(phydev);
1423
1424
1425 memset(&edata, 0, sizeof(struct ethtool_eee));
1426 phy_ethtool_set_eee(phydev, &edata);
1427
1428 return 0;
1429}
1430
1431void enetc_start(struct net_device *ndev)
1432{
1433 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1434 int i;
1435
1436 enetc_setup_interrupts(priv);
1437
1438 for (i = 0; i < priv->bdr_int_num; i++) {
1439 int irq = pci_irq_vector(priv->si->pdev,
1440 ENETC_BDR_INT_BASE_IDX + i);
1441
1442 napi_enable(&priv->int_vector[i]->napi);
1443 enable_irq(irq);
1444 }
1445
1446 if (ndev->phydev)
1447 phy_start(ndev->phydev);
1448 else
1449 netif_carrier_on(ndev);
1450
1451 netif_tx_start_all_queues(ndev);
1452}
1453
1454int enetc_open(struct net_device *ndev)
1455{
1456 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1457 int err;
1458
1459 err = enetc_setup_irqs(priv);
1460 if (err)
1461 return err;
1462
1463 err = enetc_phy_connect(ndev);
1464 if (err)
1465 goto err_phy_connect;
1466
1467 err = enetc_alloc_tx_resources(priv);
1468 if (err)
1469 goto err_alloc_tx;
1470
1471 err = enetc_alloc_rx_resources(priv);
1472 if (err)
1473 goto err_alloc_rx;
1474
1475 err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1476 if (err)
1477 goto err_set_queues;
1478
1479 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1480 if (err)
1481 goto err_set_queues;
1482
1483 enetc_setup_bdrs(priv);
1484 enetc_start(ndev);
1485
1486 return 0;
1487
1488err_set_queues:
1489 enetc_free_rx_resources(priv);
1490err_alloc_rx:
1491 enetc_free_tx_resources(priv);
1492err_alloc_tx:
1493 if (ndev->phydev)
1494 phy_disconnect(ndev->phydev);
1495err_phy_connect:
1496 enetc_free_irqs(priv);
1497
1498 return err;
1499}
1500
1501void enetc_stop(struct net_device *ndev)
1502{
1503 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1504 int i;
1505
1506 netif_tx_stop_all_queues(ndev);
1507
1508 for (i = 0; i < priv->bdr_int_num; i++) {
1509 int irq = pci_irq_vector(priv->si->pdev,
1510 ENETC_BDR_INT_BASE_IDX + i);
1511
1512 disable_irq(irq);
1513 napi_synchronize(&priv->int_vector[i]->napi);
1514 napi_disable(&priv->int_vector[i]->napi);
1515 }
1516
1517 if (ndev->phydev)
1518 phy_stop(ndev->phydev);
1519 else
1520 netif_carrier_off(ndev);
1521
1522 enetc_clear_interrupts(priv);
1523}
1524
1525int enetc_close(struct net_device *ndev)
1526{
1527 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1528
1529 enetc_stop(ndev);
1530 enetc_clear_bdrs(priv);
1531
1532 if (ndev->phydev)
1533 phy_disconnect(ndev->phydev);
1534 enetc_free_rxtx_rings(priv);
1535 enetc_free_rx_resources(priv);
1536 enetc_free_tx_resources(priv);
1537 enetc_free_irqs(priv);
1538
1539 return 0;
1540}
1541
1542static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
1543{
1544 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1545 struct tc_mqprio_qopt *mqprio = type_data;
1546 struct enetc_bdr *tx_ring;
1547 u8 num_tc;
1548 int i;
1549
1550 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1551 num_tc = mqprio->num_tc;
1552
1553 if (!num_tc) {
1554 netdev_reset_tc(ndev);
1555 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1556
1557
1558 for (i = 0; i < priv->num_tx_rings; i++) {
1559 tx_ring = priv->tx_ring[i];
1560 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
1561 }
1562
1563 return 0;
1564 }
1565
1566
1567 if (num_tc > priv->num_tx_rings) {
1568 netdev_err(ndev, "Max %d traffic classes supported\n",
1569 priv->num_tx_rings);
1570 return -EINVAL;
1571 }
1572
1573
1574
1575
1576
1577 for (i = 0; i < num_tc; i++) {
1578 tx_ring = priv->tx_ring[i];
1579 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
1580 }
1581
1582
1583 netif_set_real_num_tx_queues(ndev, num_tc);
1584
1585 netdev_set_num_tc(ndev, num_tc);
1586
1587
1588 for (i = 0; i < num_tc; i++)
1589 netdev_set_tc_queue(ndev, i, 1, i);
1590
1591 return 0;
1592}
1593
1594int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1595 void *type_data)
1596{
1597 switch (type) {
1598 case TC_SETUP_QDISC_MQPRIO:
1599 return enetc_setup_tc_mqprio(ndev, type_data);
1600 case TC_SETUP_QDISC_TAPRIO:
1601 return enetc_setup_tc_taprio(ndev, type_data);
1602 case TC_SETUP_QDISC_CBS:
1603 return enetc_setup_tc_cbs(ndev, type_data);
1604 case TC_SETUP_QDISC_ETF:
1605 return enetc_setup_tc_txtime(ndev, type_data);
1606 case TC_SETUP_BLOCK:
1607 return enetc_setup_tc_psfp(ndev, type_data);
1608 default:
1609 return -EOPNOTSUPP;
1610 }
1611}
1612
1613struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1614{
1615 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1616 struct net_device_stats *stats = &ndev->stats;
1617 unsigned long packets = 0, bytes = 0;
1618 int i;
1619
1620 for (i = 0; i < priv->num_rx_rings; i++) {
1621 packets += priv->rx_ring[i]->stats.packets;
1622 bytes += priv->rx_ring[i]->stats.bytes;
1623 }
1624
1625 stats->rx_packets = packets;
1626 stats->rx_bytes = bytes;
1627 bytes = 0;
1628 packets = 0;
1629
1630 for (i = 0; i < priv->num_tx_rings; i++) {
1631 packets += priv->tx_ring[i]->stats.packets;
1632 bytes += priv->tx_ring[i]->stats.bytes;
1633 }
1634
1635 stats->tx_packets = packets;
1636 stats->tx_bytes = bytes;
1637
1638 return stats;
1639}
1640
1641static int enetc_set_rss(struct net_device *ndev, int en)
1642{
1643 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1644 struct enetc_hw *hw = &priv->si->hw;
1645 u32 reg;
1646
1647 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1648
1649 reg = enetc_rd(hw, ENETC_SIMR);
1650 reg &= ~ENETC_SIMR_RSSE;
1651 reg |= (en) ? ENETC_SIMR_RSSE : 0;
1652 enetc_wr(hw, ENETC_SIMR, reg);
1653
1654 return 0;
1655}
1656
1657static int enetc_set_psfp(struct net_device *ndev, int en)
1658{
1659 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1660 int err;
1661
1662 if (en) {
1663 err = enetc_psfp_enable(priv);
1664 if (err)
1665 return err;
1666
1667 priv->active_offloads |= ENETC_F_QCI;
1668 return 0;
1669 }
1670
1671 err = enetc_psfp_disable(priv);
1672 if (err)
1673 return err;
1674
1675 priv->active_offloads &= ~ENETC_F_QCI;
1676
1677 return 0;
1678}
1679
1680static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
1681{
1682 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1683 int i;
1684
1685 for (i = 0; i < priv->num_rx_rings; i++)
1686 enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
1687}
1688
1689static void enetc_enable_txvlan(struct net_device *ndev, bool en)
1690{
1691 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1692 int i;
1693
1694 for (i = 0; i < priv->num_tx_rings; i++)
1695 enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
1696}
1697
1698int enetc_set_features(struct net_device *ndev,
1699 netdev_features_t features)
1700{
1701 netdev_features_t changed = ndev->features ^ features;
1702 int err = 0;
1703
1704 if (changed & NETIF_F_RXHASH)
1705 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1706
1707 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1708 enetc_enable_rxvlan(ndev,
1709 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
1710
1711 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
1712 enetc_enable_txvlan(ndev,
1713 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
1714
1715 if (changed & NETIF_F_HW_TC)
1716 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1717
1718 return err;
1719}
1720
1721#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1722static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
1723{
1724 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1725 struct hwtstamp_config config;
1726 int ao;
1727
1728 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1729 return -EFAULT;
1730
1731 switch (config.tx_type) {
1732 case HWTSTAMP_TX_OFF:
1733 priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
1734 break;
1735 case HWTSTAMP_TX_ON:
1736 priv->active_offloads |= ENETC_F_TX_TSTAMP;
1737 break;
1738 default:
1739 return -ERANGE;
1740 }
1741
1742 ao = priv->active_offloads;
1743 switch (config.rx_filter) {
1744 case HWTSTAMP_FILTER_NONE:
1745 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
1746 break;
1747 default:
1748 priv->active_offloads |= ENETC_F_RX_TSTAMP;
1749 config.rx_filter = HWTSTAMP_FILTER_ALL;
1750 }
1751
1752 if (netif_running(ndev) && ao != priv->active_offloads) {
1753 enetc_close(ndev);
1754 enetc_open(ndev);
1755 }
1756
1757 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1758 -EFAULT : 0;
1759}
1760
1761static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
1762{
1763 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1764 struct hwtstamp_config config;
1765
1766 config.flags = 0;
1767
1768 if (priv->active_offloads & ENETC_F_TX_TSTAMP)
1769 config.tx_type = HWTSTAMP_TX_ON;
1770 else
1771 config.tx_type = HWTSTAMP_TX_OFF;
1772
1773 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
1774 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1775
1776 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1777 -EFAULT : 0;
1778}
1779#endif
1780
1781int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1782{
1783#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1784 if (cmd == SIOCSHWTSTAMP)
1785 return enetc_hwtstamp_set(ndev, rq);
1786 if (cmd == SIOCGHWTSTAMP)
1787 return enetc_hwtstamp_get(ndev, rq);
1788#endif
1789
1790 if (!ndev->phydev)
1791 return -EOPNOTSUPP;
1792 return phy_mii_ioctl(ndev->phydev, rq, cmd);
1793}
1794
1795int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1796{
1797 struct pci_dev *pdev = priv->si->pdev;
1798 int v_tx_rings;
1799 int i, n, err, nvec;
1800
1801 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1802
1803 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1804
1805 if (n < 0)
1806 return n;
1807
1808 if (n != nvec)
1809 return -EPERM;
1810
1811
1812 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1813
1814 for (i = 0; i < priv->bdr_int_num; i++) {
1815 struct enetc_int_vector *v;
1816 struct enetc_bdr *bdr;
1817 int j;
1818
1819 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
1820 if (!v) {
1821 err = -ENOMEM;
1822 goto fail;
1823 }
1824
1825 priv->int_vector[i] = v;
1826
1827
1828 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
1829 v->rx_ictt = 0x1;
1830 v->rx_dim_en = true;
1831 }
1832 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
1833 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1834 NAPI_POLL_WEIGHT);
1835 v->count_tx_rings = v_tx_rings;
1836
1837 for (j = 0; j < v_tx_rings; j++) {
1838 int idx;
1839
1840
1841 if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1842 idx = 2 * j + i;
1843 else
1844 idx = j + i * v_tx_rings;
1845
1846 __set_bit(idx, &v->tx_rings_map);
1847 bdr = &v->tx_ring[j];
1848 bdr->index = idx;
1849 bdr->ndev = priv->ndev;
1850 bdr->dev = priv->dev;
1851 bdr->bd_count = priv->tx_bd_count;
1852 priv->tx_ring[idx] = bdr;
1853 }
1854
1855 bdr = &v->rx_ring;
1856 bdr->index = i;
1857 bdr->ndev = priv->ndev;
1858 bdr->dev = priv->dev;
1859 bdr->bd_count = priv->rx_bd_count;
1860 priv->rx_ring[i] = bdr;
1861 }
1862
1863 return 0;
1864
1865fail:
1866 while (i--) {
1867 netif_napi_del(&priv->int_vector[i]->napi);
1868 cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
1869 kfree(priv->int_vector[i]);
1870 }
1871
1872 pci_free_irq_vectors(pdev);
1873
1874 return err;
1875}
1876
1877void enetc_free_msix(struct enetc_ndev_priv *priv)
1878{
1879 int i;
1880
1881 for (i = 0; i < priv->bdr_int_num; i++) {
1882 struct enetc_int_vector *v = priv->int_vector[i];
1883
1884 netif_napi_del(&v->napi);
1885 cancel_work_sync(&v->rx_dim.work);
1886 }
1887
1888 for (i = 0; i < priv->num_rx_rings; i++)
1889 priv->rx_ring[i] = NULL;
1890
1891 for (i = 0; i < priv->num_tx_rings; i++)
1892 priv->tx_ring[i] = NULL;
1893
1894 for (i = 0; i < priv->bdr_int_num; i++) {
1895 kfree(priv->int_vector[i]);
1896 priv->int_vector[i] = NULL;
1897 }
1898
1899
1900 pci_free_irq_vectors(priv->si->pdev);
1901}
1902
1903static void enetc_kfree_si(struct enetc_si *si)
1904{
1905 char *p = (char *)si - si->pad;
1906
1907 kfree(p);
1908}
1909
1910static void enetc_detect_errata(struct enetc_si *si)
1911{
1912 if (si->pdev->revision == ENETC_REV1)
1913 si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
1914 ENETC_ERR_UCMCSWP;
1915}
1916
1917int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1918{
1919 struct enetc_si *si, *p;
1920 struct enetc_hw *hw;
1921 size_t alloc_size;
1922 int err, len;
1923
1924 pcie_flr(pdev);
1925 err = pci_enable_device_mem(pdev);
1926 if (err) {
1927 dev_err(&pdev->dev, "device enable failed\n");
1928 return err;
1929 }
1930
1931
1932 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1933 if (err) {
1934 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1935 if (err) {
1936 dev_err(&pdev->dev,
1937 "DMA configuration failed: 0x%x\n", err);
1938 goto err_dma;
1939 }
1940 }
1941
1942 err = pci_request_mem_regions(pdev, name);
1943 if (err) {
1944 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1945 goto err_pci_mem_reg;
1946 }
1947
1948 pci_set_master(pdev);
1949
1950 alloc_size = sizeof(struct enetc_si);
1951 if (sizeof_priv) {
1952
1953 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1954 alloc_size += sizeof_priv;
1955 }
1956
1957 alloc_size += ENETC_SI_ALIGN - 1;
1958
1959 p = kzalloc(alloc_size, GFP_KERNEL);
1960 if (!p) {
1961 err = -ENOMEM;
1962 goto err_alloc_si;
1963 }
1964
1965 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1966 si->pad = (char *)si - (char *)p;
1967
1968 pci_set_drvdata(pdev, si);
1969 si->pdev = pdev;
1970 hw = &si->hw;
1971
1972 len = pci_resource_len(pdev, ENETC_BAR_REGS);
1973 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1974 if (!hw->reg) {
1975 err = -ENXIO;
1976 dev_err(&pdev->dev, "ioremap() failed\n");
1977 goto err_ioremap;
1978 }
1979 if (len > ENETC_PORT_BASE)
1980 hw->port = hw->reg + ENETC_PORT_BASE;
1981 if (len > ENETC_GLOBAL_BASE)
1982 hw->global = hw->reg + ENETC_GLOBAL_BASE;
1983
1984 enetc_detect_errata(si);
1985
1986 return 0;
1987
1988err_ioremap:
1989 enetc_kfree_si(si);
1990err_alloc_si:
1991 pci_release_mem_regions(pdev);
1992err_pci_mem_reg:
1993err_dma:
1994 pci_disable_device(pdev);
1995
1996 return err;
1997}
1998
1999void enetc_pci_remove(struct pci_dev *pdev)
2000{
2001 struct enetc_si *si = pci_get_drvdata(pdev);
2002 struct enetc_hw *hw = &si->hw;
2003
2004 iounmap(hw->reg);
2005 enetc_kfree_si(si);
2006 pci_release_mem_regions(pdev);
2007 pci_disable_device(pdev);
2008}
2009