1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "ixgbe.h"
30#include <linux/if_ether.h>
31#include <linux/gfp.h>
32#include <linux/if_vlan.h>
33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_device.h>
35#include <scsi/fc/fc_fs.h>
36#include <scsi/fc/fc_fcoe.h>
37#include <scsi/libfc.h>
38#include <scsi/libfcoe.h>
39
40
41
42
43
44
45
46
47static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
48{
49 ddp->len = 0;
50 ddp->err = 1;
51 ddp->udl = NULL;
52 ddp->udp = 0UL;
53 ddp->sgl = NULL;
54 ddp->sgc = 0;
55}
56
57
58
59
60
61
62
63
64
65
66
67
68int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
69{
70 int len;
71 struct ixgbe_fcoe *fcoe;
72 struct ixgbe_adapter *adapter;
73 struct ixgbe_fcoe_ddp *ddp;
74 struct ixgbe_hw *hw;
75 u32 fcbuff;
76
77 if (!netdev)
78 return 0;
79
80 if (xid >= netdev->fcoe_ddp_xid)
81 return 0;
82
83 adapter = netdev_priv(netdev);
84 fcoe = &adapter->fcoe;
85 ddp = &fcoe->ddp[xid];
86 if (!ddp->udl)
87 return 0;
88
89 hw = &adapter->hw;
90 len = ddp->len;
91
92 if (!ddp->err)
93 goto skip_ddpinv;
94
95 if (hw->mac.type == ixgbe_mac_X550) {
96
97
98 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
99 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
100 (xid | IXGBE_FCFLTRW_WE));
101
102
103 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
104
105
106 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
107 (xid | IXGBE_FCDMARW_WE));
108
109
110 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
111 (xid | IXGBE_FCDMARW_RE));
112 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
113 } else {
114
115 spin_lock_bh(&fcoe->lock);
116 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
117 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
118 (xid | IXGBE_FCFLTRW_WE));
119 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
120 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
121 (xid | IXGBE_FCDMARW_WE));
122
123
124 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
125 (xid | IXGBE_FCDMARW_RE));
126 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
127 spin_unlock_bh(&fcoe->lock);
128 }
129
130 if (fcbuff & IXGBE_FCBUFF_VALID)
131 usleep_range(100, 150);
132
133skip_ddpinv:
134 if (ddp->sgl)
135 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
136 DMA_FROM_DEVICE);
137 if (ddp->pool) {
138 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
139 ddp->pool = NULL;
140 }
141
142 ixgbe_fcoe_clear_ddp(ddp);
143
144 return len;
145}
146
147
148
149
150
151
152
153
154
155
156
157static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
158 struct scatterlist *sgl, unsigned int sgc,
159 int target_mode)
160{
161 struct ixgbe_adapter *adapter;
162 struct ixgbe_hw *hw;
163 struct ixgbe_fcoe *fcoe;
164 struct ixgbe_fcoe_ddp *ddp;
165 struct ixgbe_fcoe_ddp_pool *ddp_pool;
166 struct scatterlist *sg;
167 unsigned int i, j, dmacount;
168 unsigned int len;
169 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
170 unsigned int firstoff = 0;
171 unsigned int lastsize;
172 unsigned int thisoff = 0;
173 unsigned int thislen = 0;
174 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
175 dma_addr_t addr = 0;
176
177 if (!netdev || !sgl)
178 return 0;
179
180 adapter = netdev_priv(netdev);
181 if (xid >= netdev->fcoe_ddp_xid) {
182 e_warn(drv, "xid=0x%x out-of-range\n", xid);
183 return 0;
184 }
185
186
187 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
188 test_bit(__IXGBE_RESETTING, &adapter->state))
189 return 0;
190
191 fcoe = &adapter->fcoe;
192 ddp = &fcoe->ddp[xid];
193 if (ddp->sgl) {
194 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
195 xid, ddp->sgl, ddp->sgc);
196 return 0;
197 }
198 ixgbe_fcoe_clear_ddp(ddp);
199
200
201 if (!fcoe->ddp_pool) {
202 e_warn(drv, "No ddp_pool resources allocated\n");
203 return 0;
204 }
205
206 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
207 if (!ddp_pool->pool) {
208 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
209 goto out_noddp;
210 }
211
212
213 dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
214 if (dmacount == 0) {
215 e_err(drv, "xid 0x%x DMA map error\n", xid);
216 goto out_noddp;
217 }
218
219
220 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
221 if (!ddp->udl) {
222 e_err(drv, "failed allocated ddp context\n");
223 goto out_noddp_unmap;
224 }
225 ddp->pool = ddp_pool->pool;
226 ddp->sgl = sgl;
227 ddp->sgc = sgc;
228
229 j = 0;
230 for_each_sg(sgl, sg, dmacount, i) {
231 addr = sg_dma_address(sg);
232 len = sg_dma_len(sg);
233 while (len) {
234
235 if (j >= IXGBE_BUFFCNT_MAX) {
236 ddp_pool->noddp++;
237 goto out_noddp_free;
238 }
239
240
241 thisoff = addr & ((dma_addr_t)bufflen - 1);
242 thislen = min((bufflen - thisoff), len);
243
244
245
246
247 if ((j != 0) && (thisoff))
248 goto out_noddp_free;
249
250
251
252
253
254 if (((i != (dmacount - 1)) || (thislen != len))
255 && ((thislen + thisoff) != bufflen))
256 goto out_noddp_free;
257
258 ddp->udl[j] = (u64)(addr - thisoff);
259
260 if (j == 0)
261 firstoff = thisoff;
262 len -= thislen;
263 addr += thislen;
264 j++;
265 }
266 }
267
268 lastsize = thisoff + thislen;
269
270
271
272
273
274 if (lastsize == bufflen) {
275 if (j >= IXGBE_BUFFCNT_MAX) {
276 ddp_pool->noddp_ext_buff++;
277 goto out_noddp_free;
278 }
279
280 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
281 j++;
282 lastsize = 1;
283 }
284 put_cpu();
285
286 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
287 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
288 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
289
290 if (target_mode)
291 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
292 fcbuff |= (IXGBE_FCBUFF_VALID);
293
294 fcdmarw = xid;
295 fcdmarw |= IXGBE_FCDMARW_WE;
296 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
297
298 fcfltrw = xid;
299 fcfltrw |= IXGBE_FCFLTRW_WE;
300
301
302 hw = &adapter->hw;
303
304
305
306 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
307 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
308 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
309 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
310 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
311 }
312
313 if (hw->mac.type == ixgbe_mac_X550) {
314
315
316 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
317 ddp->udp & DMA_BIT_MASK(32));
318 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
319 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
320 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
321
322 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
323 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
324 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
325 } else {
326
327 spin_lock_bh(&fcoe->lock);
328
329 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
330 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
331 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
332 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
333
334 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
335 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
336 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
337
338 spin_unlock_bh(&fcoe->lock);
339 }
340
341 return 1;
342
343out_noddp_free:
344 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
345 ixgbe_fcoe_clear_ddp(ddp);
346
347out_noddp_unmap:
348 dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
349out_noddp:
350 put_cpu();
351 return 0;
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
369 struct scatterlist *sgl, unsigned int sgc)
370{
371 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
372}
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
390 struct scatterlist *sgl, unsigned int sgc)
391{
392 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
408 union ixgbe_adv_rx_desc *rx_desc,
409 struct sk_buff *skb)
410{
411 int rc = -EINVAL;
412 struct ixgbe_fcoe *fcoe;
413 struct ixgbe_fcoe_ddp *ddp;
414 struct fc_frame_header *fh;
415 struct fcoe_crc_eof *crc;
416 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
417 __le32 ddp_err;
418 int ddp_max;
419 u32 fctl;
420 u16 xid;
421
422 if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
423 skb->ip_summed = CHECKSUM_NONE;
424 else
425 skb->ip_summed = CHECKSUM_UNNECESSARY;
426
427 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
428 fh = (struct fc_frame_header *)(skb->data +
429 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
430 else
431 fh = (struct fc_frame_header *)(skb->data +
432 sizeof(struct fcoe_hdr));
433
434 fctl = ntoh24(fh->fh_f_ctl);
435 if (fctl & FC_FC_EX_CTX)
436 xid = be16_to_cpu(fh->fh_ox_id);
437 else
438 xid = be16_to_cpu(fh->fh_rx_id);
439
440 ddp_max = IXGBE_FCOE_DDP_MAX;
441
442 if (adapter->hw.mac.type == ixgbe_mac_X550)
443 ddp_max = IXGBE_FCOE_DDP_MAX_X550;
444 if (xid >= ddp_max)
445 return -EINVAL;
446
447 fcoe = &adapter->fcoe;
448 ddp = &fcoe->ddp[xid];
449 if (!ddp->udl)
450 return -EINVAL;
451
452 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
453 IXGBE_RXDADV_ERR_FCERR);
454 if (ddp_err)
455 return -EINVAL;
456
457 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
458
459 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
460
461 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
462 rc = 0;
463 break;
464
465 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
466 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
467 ddp->sgc, DMA_FROM_DEVICE);
468 ddp->err = ddp_err;
469 ddp->sgl = NULL;
470 ddp->sgc = 0;
471
472
473 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
474
475 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
476 if (ddp->len)
477 rc = ddp->len;
478 break;
479
480 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
481 default:
482 break;
483 }
484
485
486
487
488
489
490
491
492 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
493 (fctl & FC_FC_END_SEQ)) {
494 skb_linearize(skb);
495 crc = skb_put(skb, sizeof(*crc));
496 crc->fcoe_eof = FC_EOF_T;
497 }
498
499 return rc;
500}
501
502
503
504
505
506
507
508
509
510
511
512int ixgbe_fso(struct ixgbe_ring *tx_ring,
513 struct ixgbe_tx_buffer *first,
514 u8 *hdr_len)
515{
516 struct sk_buff *skb = first->skb;
517 struct fc_frame_header *fh;
518 u32 vlan_macip_lens;
519 u32 fcoe_sof_eof = 0;
520 u32 mss_l4len_idx;
521 u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE;
522 u8 sof, eof;
523
524 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
525 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
526 skb_shinfo(skb)->gso_type);
527 return -EINVAL;
528 }
529
530
531 skb_set_network_header(skb, skb->mac_len);
532 skb_set_transport_header(skb, skb->mac_len +
533 sizeof(struct fcoe_hdr));
534
535
536 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
537 switch (sof) {
538 case FC_SOF_I2:
539 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
540 break;
541 case FC_SOF_I3:
542 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
543 IXGBE_ADVTXD_FCOEF_ORIS;
544 break;
545 case FC_SOF_N2:
546 break;
547 case FC_SOF_N3:
548 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
549 break;
550 default:
551 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
552 return -EINVAL;
553 }
554
555
556 skb_copy_bits(skb, skb->len - 4, &eof, 1);
557
558 switch (eof) {
559 case FC_EOF_N:
560 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
561 break;
562 case FC_EOF_T:
563
564 if (skb_is_gso(skb))
565 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
566 IXGBE_ADVTXD_FCOEF_ORIE;
567 else
568 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
569 break;
570 case FC_EOF_NI:
571 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
572 break;
573 case FC_EOF_A:
574 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
575 break;
576 default:
577 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
578 return -EINVAL;
579 }
580
581
582 fh = (struct fc_frame_header *)skb_transport_header(skb);
583 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
584 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
585
586
587 *hdr_len = sizeof(struct fcoe_crc_eof);
588
589
590 if (skb_is_gso(skb)) {
591 *hdr_len += skb_transport_offset(skb) +
592 sizeof(struct fc_frame_header);
593
594 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
595 skb_shinfo(skb)->gso_size);
596 first->bytecount += (first->gso_segs - 1) * *hdr_len;
597 first->tx_flags |= IXGBE_TX_FLAGS_TSO;
598
599 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV;
600 }
601
602
603 first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
604
605
606 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
607
608
609 vlan_macip_lens = skb_transport_offset(skb) +
610 sizeof(struct fc_frame_header);
611 vlan_macip_lens |= (skb_transport_offset(skb) - 4)
612 << IXGBE_ADVTXD_MACLEN_SHIFT;
613 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
614
615
616 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
617 type_tucmd, mss_l4len_idx);
618
619 return 0;
620}
621
622static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
623{
624 struct ixgbe_fcoe_ddp_pool *ddp_pool;
625
626 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
627 dma_pool_destroy(ddp_pool->pool);
628 ddp_pool->pool = NULL;
629}
630
631static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
632 struct device *dev,
633 unsigned int cpu)
634{
635 struct ixgbe_fcoe_ddp_pool *ddp_pool;
636 struct dma_pool *pool;
637 char pool_name[32];
638
639 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
640
641 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
642 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
643 if (!pool)
644 return -ENOMEM;
645
646 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
647 ddp_pool->pool = pool;
648 ddp_pool->noddp = 0;
649 ddp_pool->noddp_ext_buff = 0;
650
651 return 0;
652}
653
654
655
656
657
658
659
660
661
662void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
663{
664 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
665 struct ixgbe_hw *hw = &adapter->hw;
666 int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
667 int fcreta_size;
668 u32 etqf;
669
670
671 if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
672 return;
673
674
675 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
676 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
677 etqf |= IXGBE_ETQF_POOL_ENABLE;
678 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
679 }
680 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
681 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
682
683
684 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
685 return;
686
687
688 fcreta_size = IXGBE_FCRETA_SIZE;
689 if (adapter->hw.mac.type == ixgbe_mac_X550)
690 fcreta_size = IXGBE_FCRETA_SIZE_X550;
691
692 for (i = 0; i < fcreta_size; i++) {
693 if (adapter->hw.mac.type == ixgbe_mac_X550) {
694 int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
695 fcoe->indices);
696 fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
697 fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
698 IXGBE_FCRETA_ENTRY_HIGH_MASK;
699 }
700
701 fcoe_i = fcoe->offset + (i % fcoe->indices);
702 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
703 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
704 fcoe_q |= fcoe_q_h;
705 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
706 }
707 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
708
709
710 etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
711 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
712 etqf |= IXGBE_ETQF_POOL_ENABLE;
713 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
714 }
715 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
716
717
718 fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
719 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
720 IXGBE_ETQS_QUEUE_EN |
721 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
722
723
724 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
725 IXGBE_FCRXCTRL_FCCRCBO |
726 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
727}
728
729
730
731
732
733
734
735
736
737void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
738{
739 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
740 int cpu, i, ddp_max;
741
742
743 if (!fcoe->ddp_pool)
744 return;
745
746 ddp_max = IXGBE_FCOE_DDP_MAX;
747
748 if (adapter->hw.mac.type == ixgbe_mac_X550)
749 ddp_max = IXGBE_FCOE_DDP_MAX_X550;
750
751 for (i = 0; i < ddp_max; i++)
752 ixgbe_fcoe_ddp_put(adapter->netdev, i);
753
754 for_each_possible_cpu(cpu)
755 ixgbe_fcoe_dma_pool_free(fcoe, cpu);
756
757 dma_unmap_single(&adapter->pdev->dev,
758 fcoe->extra_ddp_buffer_dma,
759 IXGBE_FCBUFF_MIN,
760 DMA_FROM_DEVICE);
761 kfree(fcoe->extra_ddp_buffer);
762
763 fcoe->extra_ddp_buffer = NULL;
764 fcoe->extra_ddp_buffer_dma = 0;
765}
766
767
768
769
770
771
772
773
774
775int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
776{
777 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
778 struct device *dev = &adapter->pdev->dev;
779 void *buffer;
780 dma_addr_t dma;
781 unsigned int cpu;
782
783
784 if (!fcoe->ddp_pool)
785 return 0;
786
787
788 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
789 if (!buffer)
790 return -ENOMEM;
791
792 dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
793 if (dma_mapping_error(dev, dma)) {
794 e_err(drv, "failed to map extra DDP buffer\n");
795 kfree(buffer);
796 return -ENOMEM;
797 }
798
799 fcoe->extra_ddp_buffer = buffer;
800 fcoe->extra_ddp_buffer_dma = dma;
801
802
803 for_each_possible_cpu(cpu) {
804 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
805 if (!err)
806 continue;
807
808 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
809 ixgbe_free_fcoe_ddp_resources(adapter);
810 return -ENOMEM;
811 }
812
813 return 0;
814}
815
816static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
817{
818 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
819
820 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
821 return -EINVAL;
822
823 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
824
825 if (!fcoe->ddp_pool) {
826 e_err(drv, "failed to allocate percpu DDP resources\n");
827 return -ENOMEM;
828 }
829
830 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
831
832 if (adapter->hw.mac.type == ixgbe_mac_X550)
833 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
834
835 return 0;
836}
837
838static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
839{
840 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
841
842 adapter->netdev->fcoe_ddp_xid = 0;
843
844 if (!fcoe->ddp_pool)
845 return;
846
847 free_percpu(fcoe->ddp_pool);
848 fcoe->ddp_pool = NULL;
849}
850
851
852
853
854
855
856
857
858
859int ixgbe_fcoe_enable(struct net_device *netdev)
860{
861 struct ixgbe_adapter *adapter = netdev_priv(netdev);
862 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
863
864 atomic_inc(&fcoe->refcnt);
865
866 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
867 return -EINVAL;
868
869 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
870 return -EINVAL;
871
872 e_info(drv, "Enabling FCoE offload features.\n");
873
874 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
875 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
876
877 if (netif_running(netdev))
878 netdev->netdev_ops->ndo_stop(netdev);
879
880
881 ixgbe_fcoe_ddp_enable(adapter);
882
883
884 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
885 netdev->features |= NETIF_F_FCOE_MTU;
886 netdev_features_change(netdev);
887
888
889 ixgbe_clear_interrupt_scheme(adapter);
890 ixgbe_init_interrupt_scheme(adapter);
891
892 if (netif_running(netdev))
893 netdev->netdev_ops->ndo_open(netdev);
894
895 return 0;
896}
897
898
899
900
901
902
903
904
905
906int ixgbe_fcoe_disable(struct net_device *netdev)
907{
908 struct ixgbe_adapter *adapter = netdev_priv(netdev);
909
910 if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
911 return -EINVAL;
912
913 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
914 return -EINVAL;
915
916 e_info(drv, "Disabling FCoE offload features.\n");
917 if (netif_running(netdev))
918 netdev->netdev_ops->ndo_stop(netdev);
919
920
921 ixgbe_fcoe_ddp_disable(adapter);
922
923
924 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
925 netdev->features &= ~NETIF_F_FCOE_MTU;
926
927 netdev_features_change(netdev);
928
929
930 ixgbe_clear_interrupt_scheme(adapter);
931 ixgbe_init_interrupt_scheme(adapter);
932
933 if (netif_running(netdev))
934 netdev->netdev_ops->ndo_open(netdev);
935
936 return 0;
937}
938
939
940
941
942
943
944
945
946
947
948
949
950
951int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
952{
953 u16 prefix = 0xffff;
954 struct ixgbe_adapter *adapter = netdev_priv(netdev);
955 struct ixgbe_mac_info *mac = &adapter->hw.mac;
956
957 switch (type) {
958 case NETDEV_FCOE_WWNN:
959 prefix = mac->wwnn_prefix;
960 break;
961 case NETDEV_FCOE_WWPN:
962 prefix = mac->wwpn_prefix;
963 break;
964 default:
965 break;
966 }
967
968 if ((prefix != 0xffff) &&
969 is_valid_ether_addr(mac->san_addr)) {
970 *wwn = ((u64) prefix << 48) |
971 ((u64) mac->san_addr[0] << 40) |
972 ((u64) mac->san_addr[1] << 32) |
973 ((u64) mac->san_addr[2] << 24) |
974 ((u64) mac->san_addr[3] << 16) |
975 ((u64) mac->san_addr[4] << 8) |
976 ((u64) mac->san_addr[5]);
977 return 0;
978 }
979 return -EINVAL;
980}
981
982
983
984
985
986
987
988
989
990
991int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
992 struct netdev_fcoe_hbainfo *info)
993{
994 struct ixgbe_adapter *adapter = netdev_priv(netdev);
995 struct ixgbe_hw *hw = &adapter->hw;
996 int i, pos;
997 u8 buf[8];
998
999 if (!info)
1000 return -EINVAL;
1001
1002
1003 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
1004 return -EINVAL;
1005
1006
1007 snprintf(info->manufacturer, sizeof(info->manufacturer),
1008 "Intel Corporation");
1009
1010
1011
1012
1013 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
1014 if (pos) {
1015 pos += 4;
1016 for (i = 0; i < 8; i++)
1017 pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
1018
1019 snprintf(info->serial_number, sizeof(info->serial_number),
1020 "%02X%02X%02X%02X%02X%02X%02X%02X",
1021 buf[7], buf[6], buf[5], buf[4],
1022 buf[3], buf[2], buf[1], buf[0]);
1023 } else
1024 snprintf(info->serial_number, sizeof(info->serial_number),
1025 "Unknown");
1026
1027
1028 snprintf(info->hardware_version,
1029 sizeof(info->hardware_version),
1030 "Rev %d", hw->revision_id);
1031
1032 snprintf(info->driver_version,
1033 sizeof(info->driver_version),
1034 "%s v%s",
1035 ixgbe_driver_name,
1036 ixgbe_driver_version);
1037
1038 strlcpy(info->firmware_version, adapter->eeprom_id,
1039 sizeof(info->firmware_version));
1040
1041
1042 if (hw->mac.type == ixgbe_mac_82599EB) {
1043 snprintf(info->model,
1044 sizeof(info->model),
1045 "Intel 82599");
1046 } else if (hw->mac.type == ixgbe_mac_X550) {
1047 snprintf(info->model,
1048 sizeof(info->model),
1049 "Intel X550");
1050 } else {
1051 snprintf(info->model,
1052 sizeof(info->model),
1053 "Intel X540");
1054 }
1055
1056
1057 snprintf(info->model_description,
1058 sizeof(info->model_description),
1059 "%s",
1060 ixgbe_default_device_descr);
1061
1062 return 0;
1063}
1064
1065
1066
1067
1068
1069
1070
1071u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
1072{
1073#ifdef CONFIG_IXGBE_DCB
1074 return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
1075#else
1076 return 0;
1077#endif
1078}
1079