1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <asm/unaligned.h>
49#include <linux/kernel.h>
50#include <linux/module.h>
51#include <linux/kref.h>
52#include <linux/io.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/sort.h>
56#include <linux/sched.h>
57#include <linux/types.h>
58#include <linux/pci.h>
59
60#include "nfp_cpp.h"
61
62#include "nfp6000/nfp6000.h"
63
64#include "nfp6000_pcie.h"
65
66#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0)
67#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
68 (0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
69#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x) (((_x) & 0x3) << 30)
70#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x) (((_x) >> 30) & 0x3)
71#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x) (((_x) & 0x3) << 28)
72#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x) (((_x) >> 28) & 0x3)
73#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x) (((_x) & 0xffffff) << 0)
74#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x) (((_x) >> 0) & 0xffffff)
75#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
76 (0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
77#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x) (((_x) & 0x7f) << 24)
78#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x) (((_x) >> 24) & 0x7f)
79#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x) (((_x) & 0x3ff) << 14)
80#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x) (((_x) >> 14) & 0x3ff)
81#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x) (((_x) & 0x3fff) << 0)
82#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x) (((_x) >> 0) & 0x3fff)
83#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
84 (0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
85#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x) (((_x) & 0xf) << 28)
86#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x) (((_x) >> 28) & 0xf)
87#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x) (((_x) & 0x1f) << 23)
88#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x) (((_x) >> 23) & 0x1f)
89#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x) (((_x) & 0x1f) << 18)
90#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x) (((_x) >> 18) & 0x1f)
91#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x) (((_x) & 0xff) << 10)
92#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x) (((_x) >> 10) & 0xff)
93#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x) (((_x) & 0x3ff) << 0)
94#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
95
96#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x) (((_x) & 0x1f) << 16)
97#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
98#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x) (((_x) & 0xffff) << 0)
99#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x) (((_x) >> 0) & 0xffff)
100#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x) (((_x) & 0x3) << 27)
101#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x) (((_x) >> 27) & 0x3)
102#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT 0
103#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT 1
104#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE 3
105#define NFP_PCIE_BAR_PCIE2CPP_MapType(_x) (((_x) & 0x7) << 29)
106#define NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x) (((_x) >> 29) & 0x7)
107#define NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED 0
108#define NFP_PCIE_BAR_PCIE2CPP_MapType_BULK 1
109#define NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET 2
110#define NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL 3
111#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0 4
112#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1 5
113#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2 6
114#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3 7
115#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x) (((_x) & 0xf) << 23)
116#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
117#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x) (((_x) & 0x3) << 21)
118#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
119#define NFP_PCIE_EM 0x020000
120#define NFP_PCIE_SRAM 0x000000
121
122#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
123#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
124#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
125#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
126#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
127
128#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
129 (0x400 + ((bar) * 8 + (slot)) * 4)
130
131#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
132 (((bar) * 8 + (slot)) * 4)
133
134
135
136
137#define NFP_PCIE_EXPLICIT_BARS 2
138
139struct nfp6000_pcie;
140struct nfp6000_area_priv;
141
142
143
144
145
146
147
148
149
150
151
152
153
154struct nfp_bar {
155 struct nfp6000_pcie *nfp;
156 u32 barcfg;
157 u64 base;
158 u64 mask;
159 u32 bitsize;
160 int index;
161 atomic_t refcnt;
162
163 void __iomem *iomem;
164 struct resource *resource;
165};
166
167#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8)
168
169struct nfp6000_pcie {
170 struct pci_dev *pdev;
171 struct device *dev;
172
173
174 spinlock_t bar_lock;
175 int bars;
176 struct nfp_bar bar[NFP_PCI_BAR_MAX];
177 wait_queue_head_t bar_waiters;
178
179
180 struct {
181 void __iomem *csr;
182 void __iomem *em;
183 void __iomem *expl[4];
184 } iomem;
185
186
187 struct {
188 struct mutex mutex;
189 u8 master_id;
190 u8 signal_ref;
191 void __iomem *data;
192 struct {
193 void __iomem *addr;
194 int bitsize;
195 int free[4];
196 } group[4];
197 } expl;
198};
199
200static u32 nfp_bar_maptype(struct nfp_bar *bar)
201{
202 return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
203}
204
205static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
206{
207 return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
208}
209
210static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
211{
212 return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
213 + nfp_bar_resource_len(bar) * (bar->index & 7);
214}
215
216#define TARGET_WIDTH_32 4
217#define TARGET_WIDTH_64 8
218
219static int
220compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
221 u32 *bar_config, u64 *bar_base,
222 int tgt, int act, int tok, u64 offset, size_t size, int width)
223{
224 int bitsize;
225 u32 newcfg;
226
227 if (tgt >= NFP_CPP_NUM_TARGETS)
228 return -EINVAL;
229
230 switch (width) {
231 case 8:
232 newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
233 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
234 break;
235 case 4:
236 newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
237 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
238 break;
239 case 0:
240 newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
241 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
242 break;
243 default:
244 return -EINVAL;
245 }
246
247 if (act != NFP_CPP_ACTION_RW && act != 0) {
248
249 u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
250
251 newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
252 NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
253 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
254 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
255 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
256
257 if ((offset & mask) != ((offset + size - 1) & mask))
258 return -EINVAL;
259 offset &= mask;
260
261 bitsize = 40 - 16;
262 } else {
263 u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
264
265
266 newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
267 NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
268 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
269 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
270
271 if ((offset & mask) != ((offset + size - 1) & mask))
272 return -EINVAL;
273
274 offset &= mask;
275
276 bitsize = 40 - 21;
277 }
278
279 if (bar->bitsize < bitsize)
280 return -EINVAL;
281
282 newcfg |= offset >> bitsize;
283
284 if (bar_base)
285 *bar_base = offset;
286
287 if (bar_config)
288 *bar_config = newcfg;
289
290 return 0;
291}
292
293static int
294nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
295{
296 int base, slot;
297 int xbar;
298
299 base = bar->index >> 3;
300 slot = bar->index & 7;
301
302 if (nfp->iomem.csr) {
303 xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
304 writel(newcfg, nfp->iomem.csr + xbar);
305
306 readl(nfp->iomem.csr + xbar);
307 } else {
308 xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
309 pci_write_config_dword(nfp->pdev, xbar, newcfg);
310 }
311
312 bar->barcfg = newcfg;
313
314 return 0;
315}
316
317static int
318reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
319 int tgt, int act, int tok, u64 offset, size_t size, int width)
320{
321 u64 newbase;
322 u32 newcfg;
323 int err;
324
325 err = compute_bar(nfp, bar, &newcfg, &newbase,
326 tgt, act, tok, offset, size, width);
327 if (err)
328 return err;
329
330 bar->base = newbase;
331
332 return nfp6000_bar_write(nfp, bar, newcfg);
333}
334
335
336static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
337 u64 offset, size_t size, int width)
338{
339 int bartgt, baract, bartok;
340 int barwidth;
341 u32 maptype;
342
343 maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
344 bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
345 bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
346 baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
347
348 barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
349 switch (barwidth) {
350 case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
351 barwidth = 4;
352 break;
353 case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
354 barwidth = 8;
355 break;
356 case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
357 barwidth = 0;
358 break;
359 default:
360 barwidth = -1;
361 break;
362 }
363
364 switch (maptype) {
365 case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
366 bartok = -1;
367
368 case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
369 baract = NFP_CPP_ACTION_RW;
370 if (act == 0)
371 act = NFP_CPP_ACTION_RW;
372
373 case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
374 break;
375 default:
376
377 return 0;
378 }
379
380
381 if (barwidth != width)
382 return 0;
383
384 if ((bartgt < 0 || bartgt == tgt) &&
385 (bartok < 0 || bartok == tok) &&
386 (baract == act) &&
387 bar->base <= offset &&
388 (bar->base + (1 << bar->bitsize)) >= (offset + size))
389 return 1;
390
391
392 return 0;
393}
394
395static int
396find_matching_bar(struct nfp6000_pcie *nfp,
397 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
398{
399 int n;
400
401 for (n = 0; n < nfp->bars; n++) {
402 struct nfp_bar *bar = &nfp->bar[n];
403
404 if (matching_bar(bar, tgt, act, tok, offset, size, width))
405 return n;
406 }
407
408 return -1;
409}
410
411
412static int
413find_unused_bar_noblock(struct nfp6000_pcie *nfp,
414 int tgt, int act, int tok,
415 u64 offset, size_t size, int width)
416{
417 int n, invalid = 0;
418
419 for (n = 0; n < nfp->bars; n++) {
420 struct nfp_bar *bar = &nfp->bar[n];
421 int err;
422
423 if (bar->bitsize == 0) {
424 invalid++;
425 continue;
426 }
427
428 if (atomic_read(&bar->refcnt) != 0)
429 continue;
430
431
432 err = compute_bar(nfp, bar, NULL, NULL,
433 tgt, act, tok, offset, size, width);
434
435 if (err < 0)
436 invalid++;
437 else
438 return n;
439 }
440
441 return (n == invalid) ? -EINVAL : -EAGAIN;
442}
443
444static int
445find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
446 int tgt, int act, int tok,
447 u64 offset, size_t size, int width)
448{
449 unsigned long flags;
450 int n;
451
452 spin_lock_irqsave(&nfp->bar_lock, flags);
453
454 n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
455 if (n < 0)
456 spin_unlock_irqrestore(&nfp->bar_lock, flags);
457 else
458 __release(&nfp->bar_lock);
459
460 return n;
461}
462
463static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
464{
465 atomic_inc(&bar->refcnt);
466}
467
468static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
469{
470 if (atomic_dec_and_test(&bar->refcnt))
471 wake_up_interruptible(&nfp->bar_waiters);
472}
473
474static int
475nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
476 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
477{
478 return wait_event_interruptible(nfp->bar_waiters,
479 (*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
480 offset, size, width))
481 != -EAGAIN);
482}
483
484static int
485nfp_alloc_bar(struct nfp6000_pcie *nfp,
486 u32 tgt, u32 act, u32 tok,
487 u64 offset, size_t size, int width, int nonblocking)
488{
489 unsigned long irqflags;
490 int barnum, retval;
491
492 if (size > (1 << 24))
493 return -EINVAL;
494
495 spin_lock_irqsave(&nfp->bar_lock, irqflags);
496 barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
497 if (barnum >= 0) {
498
499 nfp_bar_get(nfp, &nfp->bar[barnum]);
500 spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
501 return barnum;
502 }
503
504 barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
505 offset, size, width);
506 if (barnum < 0) {
507 if (nonblocking)
508 goto err_nobar;
509
510
511
512
513
514 spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
515 retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
516 offset, size, width);
517 if (retval)
518 return retval;
519 __acquire(&nfp->bar_lock);
520 }
521
522 nfp_bar_get(nfp, &nfp->bar[barnum]);
523 retval = reconfigure_bar(nfp, &nfp->bar[barnum],
524 tgt, act, tok, offset, size, width);
525 if (retval < 0) {
526 nfp_bar_put(nfp, &nfp->bar[barnum]);
527 barnum = retval;
528 }
529
530err_nobar:
531 spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
532 return barnum;
533}
534
535static void disable_bars(struct nfp6000_pcie *nfp);
536
537static int bar_cmp(const void *aptr, const void *bptr)
538{
539 const struct nfp_bar *a = aptr, *b = bptr;
540
541 if (a->bitsize == b->bitsize)
542 return a->index - b->index;
543 else
544 return a->bitsize - b->bitsize;
545}
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
564{
565 const u32 barcfg_msix_general =
566 NFP_PCIE_BAR_PCIE2CPP_MapType(
567 NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
568 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
569 const u32 barcfg_msix_xpb =
570 NFP_PCIE_BAR_PCIE2CPP_MapType(
571 NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
572 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
573 NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
574 NFP_CPP_TARGET_ISLAND_XPB);
575 const u32 barcfg_explicit[4] = {
576 NFP_PCIE_BAR_PCIE2CPP_MapType(
577 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
578 NFP_PCIE_BAR_PCIE2CPP_MapType(
579 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
580 NFP_PCIE_BAR_PCIE2CPP_MapType(
581 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
582 NFP_PCIE_BAR_PCIE2CPP_MapType(
583 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
584 };
585 struct nfp_bar *bar;
586 int i, bars_free;
587 int expl_groups;
588
589 bar = &nfp->bar[0];
590 for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
591 struct resource *res;
592
593 res = &nfp->pdev->resource[(i >> 3) * 2];
594
595
596 if (!(resource_type(res) & IORESOURCE_MEM)) {
597 bar--;
598 continue;
599 }
600
601 bar->resource = res;
602 bar->barcfg = 0;
603
604 bar->nfp = nfp;
605 bar->index = i;
606 bar->mask = nfp_bar_resource_len(bar) - 1;
607 bar->bitsize = fls(bar->mask);
608 bar->base = 0;
609 bar->iomem = NULL;
610 }
611
612 nfp->bars = bar - &nfp->bar[0];
613 if (nfp->bars < 8) {
614 dev_err(nfp->dev, "No usable BARs found!\n");
615 return -EINVAL;
616 }
617
618 bars_free = nfp->bars;
619
620
621
622 mutex_init(&nfp->expl.mutex);
623
624 nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
625 << 4;
626 nfp->expl.signal_ref = 0x10;
627
628
629 bar = &nfp->bar[0];
630 bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
631 nfp_bar_resource_len(bar));
632 if (bar->iomem) {
633 dev_info(nfp->dev,
634 "BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n");
635 atomic_inc(&bar->refcnt);
636 bars_free--;
637
638 nfp6000_bar_write(nfp, bar, barcfg_msix_general);
639
640 nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
641 }
642
643 if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
644 nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
645 nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
646 expl_groups = 4;
647 } else {
648 int pf = nfp->pdev->devfn & 7;
649
650 nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
651 expl_groups = 1;
652 }
653 nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
654
655
656 bar = &nfp->bar[1];
657 dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n");
658 atomic_inc(&bar->refcnt);
659 bars_free--;
660
661 nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
662
663
664 for (i = 0; i < 4; i++) {
665 int j;
666
667 if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
668 nfp->expl.group[i].bitsize = 0;
669 continue;
670 }
671
672 bar = &nfp->bar[4 + i];
673 bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
674 nfp_bar_resource_len(bar));
675 if (bar->iomem) {
676 dev_info(nfp->dev,
677 "BAR0.%d RESERVED: Explicit%d Mapping\n",
678 4 + i, i);
679 atomic_inc(&bar->refcnt);
680 bars_free--;
681
682 nfp->expl.group[i].bitsize = bar->bitsize;
683 nfp->expl.group[i].addr = bar->iomem;
684 nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
685
686 for (j = 0; j < 4; j++)
687 nfp->expl.group[i].free[j] = true;
688 }
689 nfp->iomem.expl[i] = bar->iomem;
690 }
691
692
693 sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
694 bar_cmp, NULL);
695
696 dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n",
697 nfp->bars, bars_free);
698
699 return 0;
700}
701
702static void disable_bars(struct nfp6000_pcie *nfp)
703{
704 struct nfp_bar *bar = &nfp->bar[0];
705 int n;
706
707 for (n = 0; n < nfp->bars; n++, bar++) {
708 if (bar->iomem) {
709 iounmap(bar->iomem);
710 bar->iomem = NULL;
711 }
712 }
713}
714
715
716
717
718
719struct nfp6000_area_priv {
720 atomic_t refcnt;
721
722 struct nfp_bar *bar;
723 u32 bar_offset;
724
725 u32 target;
726 u32 action;
727 u32 token;
728 u64 offset;
729 struct {
730 int read;
731 int write;
732 int bar;
733 } width;
734 size_t size;
735
736 void __iomem *iomem;
737 phys_addr_t phys;
738 struct resource resource;
739};
740
741static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
742 unsigned long long address, unsigned long size)
743{
744 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
745 u32 target = NFP_CPP_ID_TARGET_of(dest);
746 u32 action = NFP_CPP_ID_ACTION_of(dest);
747 u32 token = NFP_CPP_ID_TOKEN_of(dest);
748 int pp;
749
750 pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
751 if (pp < 0)
752 return pp;
753
754 priv->width.read = PUSH_WIDTH(pp);
755 priv->width.write = PULL_WIDTH(pp);
756 if (priv->width.read > 0 &&
757 priv->width.write > 0 &&
758 priv->width.read != priv->width.write) {
759 return -EINVAL;
760 }
761
762 if (priv->width.read > 0)
763 priv->width.bar = priv->width.read;
764 else
765 priv->width.bar = priv->width.write;
766
767 atomic_set(&priv->refcnt, 0);
768 priv->bar = NULL;
769
770 priv->target = target;
771 priv->action = action;
772 priv->token = token;
773 priv->offset = address;
774 priv->size = size;
775 memset(&priv->resource, 0, sizeof(priv->resource));
776
777 return 0;
778}
779
780static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
781{
782}
783
784static void priv_area_get(struct nfp_cpp_area *area)
785{
786 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
787
788 atomic_inc(&priv->refcnt);
789}
790
791static int priv_area_put(struct nfp_cpp_area *area)
792{
793 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
794
795 if (WARN_ON(!atomic_read(&priv->refcnt)))
796 return 0;
797
798 return atomic_dec_and_test(&priv->refcnt);
799}
800
801static int nfp6000_area_acquire(struct nfp_cpp_area *area)
802{
803 struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
804 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
805 int barnum, err;
806
807 if (priv->bar) {
808
809 priv_area_get(area);
810 return 0;
811 }
812
813 barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
814 priv->offset, priv->size, priv->width.bar, 1);
815
816 if (barnum < 0) {
817 err = barnum;
818 goto err_alloc_bar;
819 }
820 priv->bar = &nfp->bar[barnum];
821
822
823 if (nfp_bar_maptype(priv->bar) ==
824 NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
825 priv->bar_offset = priv->offset &
826 (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
827 priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
828 priv->bar, priv->target);
829 priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
830 priv->bar, priv->token);
831 } else {
832 priv->bar_offset = priv->offset & priv->bar->mask;
833 }
834
835
836
837
838
839
840 priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
841 priv->resource.name = nfp_cpp_area_name(area);
842 priv->resource.start = priv->phys;
843 priv->resource.end = priv->resource.start + priv->size - 1;
844 priv->resource.flags = IORESOURCE_MEM;
845
846
847 if (priv->bar->iomem)
848 priv->iomem = priv->bar->iomem + priv->bar_offset;
849 else
850
851 priv->iomem = ioremap_nocache(priv->phys, priv->size);
852
853 if (IS_ERR_OR_NULL(priv->iomem)) {
854 dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
855 (int)priv->size, priv->bar->index);
856 err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
857 priv->iomem = NULL;
858 goto err_iomem_remap;
859 }
860
861 priv_area_get(area);
862 return 0;
863
864err_iomem_remap:
865 nfp_bar_put(nfp, priv->bar);
866 priv->bar = NULL;
867err_alloc_bar:
868 return err;
869}
870
871static void nfp6000_area_release(struct nfp_cpp_area *area)
872{
873 struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
874 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
875
876 if (!priv_area_put(area))
877 return;
878
879 if (!priv->bar->iomem)
880 iounmap(priv->iomem);
881
882 nfp_bar_put(nfp, priv->bar);
883
884 priv->bar = NULL;
885 priv->iomem = NULL;
886}
887
888static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
889{
890 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
891
892 return priv->phys;
893}
894
895static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
896{
897 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
898
899 return priv->iomem;
900}
901
902static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
903{
904
905
906
907
908 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
909
910 return priv->bar->resource;
911}
912
913static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
914 unsigned long offset, unsigned int length)
915{
916 u64 __maybe_unused *wrptr64 = kernel_vaddr;
917 const u64 __iomem __maybe_unused *rdptr64;
918 struct nfp6000_area_priv *priv;
919 u32 *wrptr32 = kernel_vaddr;
920 const u32 __iomem *rdptr32;
921 int n, width;
922 bool is_64;
923
924 priv = nfp_cpp_area_priv(area);
925 rdptr64 = priv->iomem + offset;
926 rdptr32 = priv->iomem + offset;
927
928 if (offset + length > priv->size)
929 return -EFAULT;
930
931 width = priv->width.read;
932
933 if (width <= 0)
934 return -EINVAL;
935
936
937 if ((priv->offset + offset) & (width - 1))
938 return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
939 NFP_CPP_ID(priv->target,
940 priv->action,
941 priv->token),
942 priv->offset + offset,
943 kernel_vaddr, length, width);
944
945 is_64 = width == TARGET_WIDTH_64;
946
947
948 if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
949 priv->action == NFP_CPP_ACTION_RW)
950 is_64 = false;
951
952 if (is_64) {
953 if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
954 return -EINVAL;
955 } else {
956 if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
957 return -EINVAL;
958 }
959
960 if (WARN_ON(!priv->bar))
961 return -EFAULT;
962
963 if (is_64)
964#ifndef __raw_readq
965 return -EINVAL;
966#else
967 for (n = 0; n < length; n += sizeof(u64))
968 *wrptr64++ = __raw_readq(rdptr64++);
969#endif
970 else
971 for (n = 0; n < length; n += sizeof(u32))
972 *wrptr32++ = __raw_readl(rdptr32++);
973
974 return n;
975}
976
977static int
978nfp6000_area_write(struct nfp_cpp_area *area,
979 const void *kernel_vaddr,
980 unsigned long offset, unsigned int length)
981{
982 const u64 __maybe_unused *rdptr64 = kernel_vaddr;
983 u64 __iomem __maybe_unused *wrptr64;
984 const u32 *rdptr32 = kernel_vaddr;
985 struct nfp6000_area_priv *priv;
986 u32 __iomem *wrptr32;
987 int n, width;
988 bool is_64;
989
990 priv = nfp_cpp_area_priv(area);
991 wrptr64 = priv->iomem + offset;
992 wrptr32 = priv->iomem + offset;
993
994 if (offset + length > priv->size)
995 return -EFAULT;
996
997 width = priv->width.write;
998
999 if (width <= 0)
1000 return -EINVAL;
1001
1002
1003 if ((priv->offset + offset) & (width - 1))
1004 return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
1005 NFP_CPP_ID(priv->target,
1006 priv->action,
1007 priv->token),
1008 priv->offset + offset,
1009 kernel_vaddr, length, width);
1010
1011 is_64 = width == TARGET_WIDTH_64;
1012
1013
1014 if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
1015 priv->action == NFP_CPP_ACTION_RW)
1016 is_64 = false;
1017
1018 if (is_64) {
1019 if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
1020 return -EINVAL;
1021 } else {
1022 if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
1023 return -EINVAL;
1024 }
1025
1026 if (WARN_ON(!priv->bar))
1027 return -EFAULT;
1028
1029 if (is_64)
1030#ifndef __raw_writeq
1031 return -EINVAL;
1032#else
1033 for (n = 0; n < length; n += sizeof(u64)) {
1034 __raw_writeq(*rdptr64++, wrptr64++);
1035 wmb();
1036 }
1037#endif
1038 else
1039 for (n = 0; n < length; n += sizeof(u32)) {
1040 __raw_writel(*rdptr32++, wrptr32++);
1041 wmb();
1042 }
1043
1044 return n;
1045}
1046
1047struct nfp6000_explicit_priv {
1048 struct nfp6000_pcie *nfp;
1049 struct {
1050 int group;
1051 int area;
1052 } bar;
1053 int bitsize;
1054 void __iomem *data;
1055 void __iomem *addr;
1056};
1057
1058static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
1059{
1060 struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
1061 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1062 int i, j;
1063
1064 mutex_lock(&nfp->expl.mutex);
1065 for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
1066 if (!nfp->expl.group[i].bitsize)
1067 continue;
1068
1069 for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
1070 u16 data_offset;
1071
1072 if (!nfp->expl.group[i].free[j])
1073 continue;
1074
1075 priv->nfp = nfp;
1076 priv->bar.group = i;
1077 priv->bar.area = j;
1078 priv->bitsize = nfp->expl.group[i].bitsize - 2;
1079
1080 data_offset = (priv->bar.group << 9) +
1081 (priv->bar.area << 7);
1082 priv->data = nfp->expl.data + data_offset;
1083 priv->addr = nfp->expl.group[i].addr +
1084 (priv->bar.area << priv->bitsize);
1085 nfp->expl.group[i].free[j] = false;
1086
1087 mutex_unlock(&nfp->expl.mutex);
1088 return 0;
1089 }
1090 }
1091 mutex_unlock(&nfp->expl.mutex);
1092
1093 return -EAGAIN;
1094}
1095
1096static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
1097{
1098 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1099 struct nfp6000_pcie *nfp = priv->nfp;
1100
1101 mutex_lock(&nfp->expl.mutex);
1102 nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
1103 mutex_unlock(&nfp->expl.mutex);
1104}
1105
1106static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
1107 const void *buff, size_t len)
1108{
1109 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1110 const u32 *src = buff;
1111 size_t i;
1112
1113 for (i = 0; i < len; i += sizeof(u32))
1114 writel(*(src++), priv->data + i);
1115
1116 return i;
1117}
1118
1119static int
1120nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
1121 const struct nfp_cpp_explicit_command *cmd, u64 address)
1122{
1123 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1124 u8 signal_master, signal_ref, data_master;
1125 struct nfp6000_pcie *nfp = priv->nfp;
1126 int sigmask = 0;
1127 u16 data_ref;
1128 u32 csr[3];
1129
1130 if (cmd->siga_mode)
1131 sigmask |= 1 << cmd->siga;
1132 if (cmd->sigb_mode)
1133 sigmask |= 1 << cmd->sigb;
1134
1135 signal_master = cmd->signal_master;
1136 if (!signal_master)
1137 signal_master = nfp->expl.master_id;
1138
1139 signal_ref = cmd->signal_ref;
1140 if (signal_master == nfp->expl.master_id)
1141 signal_ref = nfp->expl.signal_ref +
1142 ((priv->bar.group * 4 + priv->bar.area) << 1);
1143
1144 data_master = cmd->data_master;
1145 if (!data_master)
1146 data_master = nfp->expl.master_id;
1147
1148 data_ref = cmd->data_ref;
1149 if (data_master == nfp->expl.master_id)
1150 data_ref = 0x1000 +
1151 (priv->bar.group << 9) + (priv->bar.area << 7);
1152
1153 csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
1154 NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
1155 NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
1156 NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
1157
1158 csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
1159 NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
1160 NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
1161
1162 csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
1163 NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
1164 NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
1165 NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
1166 NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
1167 NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
1168 NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
1169
1170 if (nfp->iomem.csr) {
1171 writel(csr[0], nfp->iomem.csr +
1172 NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1173 priv->bar.area));
1174 writel(csr[1], nfp->iomem.csr +
1175 NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1176 priv->bar.area));
1177 writel(csr[2], nfp->iomem.csr +
1178 NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1179 priv->bar.area));
1180
1181 readl(nfp->iomem.csr +
1182 NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1183 priv->bar.area));
1184 readl(nfp->iomem.csr +
1185 NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1186 priv->bar.area));
1187 readl(nfp->iomem.csr +
1188 NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1189 priv->bar.area));
1190 } else {
1191 pci_write_config_dword(nfp->pdev, 0x400 +
1192 NFP_PCIE_BAR_EXPLICIT_BAR0(
1193 priv->bar.group, priv->bar.area),
1194 csr[0]);
1195
1196 pci_write_config_dword(nfp->pdev, 0x400 +
1197 NFP_PCIE_BAR_EXPLICIT_BAR1(
1198 priv->bar.group, priv->bar.area),
1199 csr[1]);
1200
1201 pci_write_config_dword(nfp->pdev, 0x400 +
1202 NFP_PCIE_BAR_EXPLICIT_BAR2(
1203 priv->bar.group, priv->bar.area),
1204 csr[2]);
1205 }
1206
1207
1208 readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
1209
1210 return sigmask;
1211}
1212
1213static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
1214 void *buff, size_t len)
1215{
1216 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1217 u32 *dst = buff;
1218 size_t i;
1219
1220 for (i = 0; i < len; i += sizeof(u32))
1221 *(dst++) = readl(priv->data + i);
1222
1223 return i;
1224}
1225
1226static int nfp6000_init(struct nfp_cpp *cpp)
1227{
1228 nfp_cpp_area_cache_add(cpp, SZ_64K);
1229 nfp_cpp_area_cache_add(cpp, SZ_64K);
1230 nfp_cpp_area_cache_add(cpp, SZ_256K);
1231
1232 return 0;
1233}
1234
1235static void nfp6000_free(struct nfp_cpp *cpp)
1236{
1237 struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
1238
1239 disable_bars(nfp);
1240 kfree(nfp);
1241}
1242
1243static void nfp6000_read_serial(struct device *dev, u8 *serial)
1244{
1245 struct pci_dev *pdev = to_pci_dev(dev);
1246 int pos;
1247 u32 reg;
1248
1249 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
1250 if (!pos) {
1251 memset(serial, 0, NFP_SERIAL_LEN);
1252 return;
1253 }
1254
1255 pci_read_config_dword(pdev, pos + 4, ®);
1256 put_unaligned_be16(reg >> 16, serial + 4);
1257 pci_read_config_dword(pdev, pos + 8, ®);
1258 put_unaligned_be32(reg, serial);
1259}
1260
1261static u16 nfp6000_get_interface(struct device *dev)
1262{
1263 struct pci_dev *pdev = to_pci_dev(dev);
1264 int pos;
1265 u32 reg;
1266
1267 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
1268 if (!pos)
1269 return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
1270
1271 pci_read_config_dword(pdev, pos + 4, ®);
1272
1273 return reg & 0xffff;
1274}
1275
1276static const struct nfp_cpp_operations nfp6000_pcie_ops = {
1277 .owner = THIS_MODULE,
1278
1279 .init = nfp6000_init,
1280 .free = nfp6000_free,
1281
1282 .read_serial = nfp6000_read_serial,
1283 .get_interface = nfp6000_get_interface,
1284
1285 .area_priv_size = sizeof(struct nfp6000_area_priv),
1286 .area_init = nfp6000_area_init,
1287 .area_cleanup = nfp6000_area_cleanup,
1288 .area_acquire = nfp6000_area_acquire,
1289 .area_release = nfp6000_area_release,
1290 .area_phys = nfp6000_area_phys,
1291 .area_iomem = nfp6000_area_iomem,
1292 .area_resource = nfp6000_area_resource,
1293 .area_read = nfp6000_area_read,
1294 .area_write = nfp6000_area_write,
1295
1296 .explicit_priv_size = sizeof(struct nfp6000_explicit_priv),
1297 .explicit_acquire = nfp6000_explicit_acquire,
1298 .explicit_release = nfp6000_explicit_release,
1299 .explicit_put = nfp6000_explicit_put,
1300 .explicit_do = nfp6000_explicit_do,
1301 .explicit_get = nfp6000_explicit_get,
1302};
1303
1304
1305
1306
1307
1308
1309
1310struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
1311{
1312 struct nfp6000_pcie *nfp;
1313 u16 interface;
1314 int err;
1315
1316
1317 dev_info(&pdev->dev,
1318 "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
1319
1320 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
1321 if (!nfp) {
1322 err = -ENOMEM;
1323 goto err_ret;
1324 }
1325
1326 nfp->dev = &pdev->dev;
1327 nfp->pdev = pdev;
1328 init_waitqueue_head(&nfp->bar_waiters);
1329 spin_lock_init(&nfp->bar_lock);
1330
1331 interface = nfp6000_get_interface(&pdev->dev);
1332
1333 if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
1334 NFP_CPP_INTERFACE_TYPE_PCI) {
1335 dev_err(&pdev->dev,
1336 "Interface type %d is not the expected %d\n",
1337 NFP_CPP_INTERFACE_TYPE_of(interface),
1338 NFP_CPP_INTERFACE_TYPE_PCI);
1339 err = -ENODEV;
1340 goto err_free_nfp;
1341 }
1342
1343 if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
1344 NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
1345 dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
1346 NFP_CPP_INTERFACE_CHANNEL_of(interface),
1347 NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
1348 err = -ENODEV;
1349 goto err_free_nfp;
1350 }
1351
1352 err = enable_bars(nfp, interface);
1353 if (err)
1354 goto err_free_nfp;
1355
1356
1357 return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
1358
1359err_free_nfp:
1360 kfree(nfp);
1361err_ret:
1362 dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
1363 return ERR_PTR(err);
1364}
1365