1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <asm/unaligned.h>
49#include <linux/kernel.h>
50#include <linux/module.h>
51#include <linux/kref.h>
52#include <linux/io.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/sort.h>
56#include <linux/sched.h>
57#include <linux/types.h>
58#include <linux/pci.h>
59
60#include "nfp_cpp.h"
61
62#include "nfp6000/nfp6000.h"
63
64#include "nfp6000_pcie.h"
65
66#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0)
67#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
68 (0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
69#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x) (((_x) & 0x3) << 30)
70#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x) (((_x) >> 30) & 0x3)
71#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x) (((_x) & 0x3) << 28)
72#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x) (((_x) >> 28) & 0x3)
73#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x) (((_x) & 0xffffff) << 0)
74#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x) (((_x) >> 0) & 0xffffff)
75#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
76 (0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
77#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x) (((_x) & 0x7f) << 24)
78#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x) (((_x) >> 24) & 0x7f)
79#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x) (((_x) & 0x3ff) << 14)
80#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x) (((_x) >> 14) & 0x3ff)
81#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x) (((_x) & 0x3fff) << 0)
82#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x) (((_x) >> 0) & 0x3fff)
83#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
84 (0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
85#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x) (((_x) & 0xf) << 28)
86#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x) (((_x) >> 28) & 0xf)
87#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x) (((_x) & 0x1f) << 23)
88#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x) (((_x) >> 23) & 0x1f)
89#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x) (((_x) & 0x1f) << 18)
90#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x) (((_x) >> 18) & 0x1f)
91#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x) (((_x) & 0xff) << 10)
92#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x) (((_x) >> 10) & 0xff)
93#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x) (((_x) & 0x3ff) << 0)
94#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
95
96#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x) (((_x) & 0x1f) << 16)
97#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
98#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x) (((_x) & 0xffff) << 0)
99#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x) (((_x) >> 0) & 0xffff)
100#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x) (((_x) & 0x3) << 27)
101#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x) (((_x) >> 27) & 0x3)
102#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT 0
103#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT 1
104#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE 3
105#define NFP_PCIE_BAR_PCIE2CPP_MapType(_x) (((_x) & 0x7) << 29)
106#define NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x) (((_x) >> 29) & 0x7)
107#define NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED 0
108#define NFP_PCIE_BAR_PCIE2CPP_MapType_BULK 1
109#define NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET 2
110#define NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL 3
111#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0 4
112#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1 5
113#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2 6
114#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3 7
115#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x) (((_x) & 0xf) << 23)
116#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
117#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x) (((_x) & 0x3) << 21)
118#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
119#define NFP_PCIE_EM 0x020000
120#define NFP_PCIE_SRAM 0x000000
121
122
123
124
125#define NFP_PCI_MIN_MAP_SIZE 0x080000
126
127#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
128#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
129#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
130#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
131#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
132
133#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
134 (0x400 + ((bar) * 8 + (slot)) * 4)
135
136#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
137 (((bar) * 8 + (slot)) * 4)
138
139
140
141
142#define NFP_PCIE_EXPLICIT_BARS 2
143
144struct nfp6000_pcie;
145struct nfp6000_area_priv;
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct nfp_bar {
160 struct nfp6000_pcie *nfp;
161 u32 barcfg;
162 u64 base;
163 u64 mask;
164 u32 bitsize;
165 int index;
166 atomic_t refcnt;
167
168 void __iomem *iomem;
169 struct resource *resource;
170};
171
172#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8)
173
174struct nfp6000_pcie {
175 struct pci_dev *pdev;
176 struct device *dev;
177
178
179 spinlock_t bar_lock;
180 int bars;
181 struct nfp_bar bar[NFP_PCI_BAR_MAX];
182 wait_queue_head_t bar_waiters;
183
184
185 struct {
186 void __iomem *csr;
187 void __iomem *em;
188 void __iomem *expl[4];
189 } iomem;
190
191
192 struct {
193 struct mutex mutex;
194 u8 master_id;
195 u8 signal_ref;
196 void __iomem *data;
197 struct {
198 void __iomem *addr;
199 int bitsize;
200 int free[4];
201 } group[4];
202 } expl;
203};
204
205static u32 nfp_bar_maptype(struct nfp_bar *bar)
206{
207 return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
208}
209
210static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
211{
212 return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
213}
214
215static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
216{
217 return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
218 + nfp_bar_resource_len(bar) * (bar->index & 7);
219}
220
221#define TARGET_WIDTH_32 4
222#define TARGET_WIDTH_64 8
223
224static int
225compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
226 u32 *bar_config, u64 *bar_base,
227 int tgt, int act, int tok, u64 offset, size_t size, int width)
228{
229 int bitsize;
230 u32 newcfg;
231
232 if (tgt >= NFP_CPP_NUM_TARGETS)
233 return -EINVAL;
234
235 switch (width) {
236 case 8:
237 newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
238 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
239 break;
240 case 4:
241 newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
242 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
243 break;
244 case 0:
245 newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
246 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
247 break;
248 default:
249 return -EINVAL;
250 }
251
252 if (act != NFP_CPP_ACTION_RW && act != 0) {
253
254 u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
255
256 newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
257 NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
258 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
259 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
260 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
261
262 if ((offset & mask) != ((offset + size - 1) & mask))
263 return -EINVAL;
264 offset &= mask;
265
266 bitsize = 40 - 16;
267 } else {
268 u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
269
270
271 newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
272 NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
273 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
274 newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
275
276 if ((offset & mask) != ((offset + size - 1) & mask))
277 return -EINVAL;
278
279 offset &= mask;
280
281 bitsize = 40 - 21;
282 }
283
284 if (bar->bitsize < bitsize)
285 return -EINVAL;
286
287 newcfg |= offset >> bitsize;
288
289 if (bar_base)
290 *bar_base = offset;
291
292 if (bar_config)
293 *bar_config = newcfg;
294
295 return 0;
296}
297
298static int
299nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
300{
301 int base, slot;
302 int xbar;
303
304 base = bar->index >> 3;
305 slot = bar->index & 7;
306
307 if (nfp->iomem.csr) {
308 xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
309 writel(newcfg, nfp->iomem.csr + xbar);
310
311 readl(nfp->iomem.csr + xbar);
312 } else {
313 xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
314 pci_write_config_dword(nfp->pdev, xbar, newcfg);
315 }
316
317 bar->barcfg = newcfg;
318
319 return 0;
320}
321
322static int
323reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
324 int tgt, int act, int tok, u64 offset, size_t size, int width)
325{
326 u64 newbase;
327 u32 newcfg;
328 int err;
329
330 err = compute_bar(nfp, bar, &newcfg, &newbase,
331 tgt, act, tok, offset, size, width);
332 if (err)
333 return err;
334
335 bar->base = newbase;
336
337 return nfp6000_bar_write(nfp, bar, newcfg);
338}
339
340
341static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
342 u64 offset, size_t size, int width)
343{
344 int bartgt, baract, bartok;
345 int barwidth;
346 u32 maptype;
347
348 maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
349 bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
350 bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
351 baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
352
353 barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
354 switch (barwidth) {
355 case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
356 barwidth = 4;
357 break;
358 case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
359 barwidth = 8;
360 break;
361 case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
362 barwidth = 0;
363 break;
364 default:
365 barwidth = -1;
366 break;
367 }
368
369 switch (maptype) {
370 case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
371 bartok = -1;
372
373 case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
374 baract = NFP_CPP_ACTION_RW;
375 if (act == 0)
376 act = NFP_CPP_ACTION_RW;
377
378 case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
379 break;
380 default:
381
382 return 0;
383 }
384
385
386 if (barwidth != width)
387 return 0;
388
389 if ((bartgt < 0 || bartgt == tgt) &&
390 (bartok < 0 || bartok == tok) &&
391 (baract == act) &&
392 bar->base <= offset &&
393 (bar->base + (1 << bar->bitsize)) >= (offset + size))
394 return 1;
395
396
397 return 0;
398}
399
400static int
401find_matching_bar(struct nfp6000_pcie *nfp,
402 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
403{
404 int n;
405
406 for (n = 0; n < nfp->bars; n++) {
407 struct nfp_bar *bar = &nfp->bar[n];
408
409 if (matching_bar(bar, tgt, act, tok, offset, size, width))
410 return n;
411 }
412
413 return -1;
414}
415
416
417static int
418find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
419 int tgt, int act, int tok,
420 u64 offset, size_t size, int width)
421{
422 int n, busy = 0;
423
424 for (n = 0; n < nfp->bars; n++) {
425 const struct nfp_bar *bar = &nfp->bar[n];
426 int err;
427
428 if (!bar->bitsize)
429 continue;
430
431
432 err = compute_bar(nfp, bar, NULL, NULL,
433 tgt, act, tok, offset, size, width);
434 if (err)
435 continue;
436
437 if (!atomic_read(&bar->refcnt))
438 return n;
439
440 busy++;
441 }
442
443 if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
444 tgt, act, tok, offset, size, width))
445 return -EINVAL;
446
447 return -EAGAIN;
448}
449
450static int
451find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
452 int tgt, int act, int tok,
453 u64 offset, size_t size, int width)
454{
455 unsigned long flags;
456 int n;
457
458 spin_lock_irqsave(&nfp->bar_lock, flags);
459
460 n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
461 if (n < 0)
462 spin_unlock_irqrestore(&nfp->bar_lock, flags);
463 else
464 __release(&nfp->bar_lock);
465
466 return n;
467}
468
469static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
470{
471 atomic_inc(&bar->refcnt);
472}
473
474static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
475{
476 if (atomic_dec_and_test(&bar->refcnt))
477 wake_up_interruptible(&nfp->bar_waiters);
478}
479
480static int
481nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
482 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
483{
484 return wait_event_interruptible(nfp->bar_waiters,
485 (*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
486 offset, size, width))
487 != -EAGAIN);
488}
489
490static int
491nfp_alloc_bar(struct nfp6000_pcie *nfp,
492 u32 tgt, u32 act, u32 tok,
493 u64 offset, size_t size, int width, int nonblocking)
494{
495 unsigned long irqflags;
496 int barnum, retval;
497
498 if (size > (1 << 24))
499 return -EINVAL;
500
501 spin_lock_irqsave(&nfp->bar_lock, irqflags);
502 barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
503 if (barnum >= 0) {
504
505 nfp_bar_get(nfp, &nfp->bar[barnum]);
506 spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
507 return barnum;
508 }
509
510 barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
511 offset, size, width);
512 if (barnum < 0) {
513 if (nonblocking)
514 goto err_nobar;
515
516
517
518
519
520 spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
521 retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
522 offset, size, width);
523 if (retval)
524 return retval;
525 __acquire(&nfp->bar_lock);
526 }
527
528 nfp_bar_get(nfp, &nfp->bar[barnum]);
529 retval = reconfigure_bar(nfp, &nfp->bar[barnum],
530 tgt, act, tok, offset, size, width);
531 if (retval < 0) {
532 nfp_bar_put(nfp, &nfp->bar[barnum]);
533 barnum = retval;
534 }
535
536err_nobar:
537 spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
538 return barnum;
539}
540
541static void disable_bars(struct nfp6000_pcie *nfp);
542
543static int bar_cmp(const void *aptr, const void *bptr)
544{
545 const struct nfp_bar *a = aptr, *b = bptr;
546
547 if (a->bitsize == b->bitsize)
548 return a->index - b->index;
549 else
550 return a->bitsize - b->bitsize;
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
570{
571 const u32 barcfg_msix_general =
572 NFP_PCIE_BAR_PCIE2CPP_MapType(
573 NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
574 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
575 const u32 barcfg_msix_xpb =
576 NFP_PCIE_BAR_PCIE2CPP_MapType(
577 NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
578 NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
579 NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
580 NFP_CPP_TARGET_ISLAND_XPB);
581 const u32 barcfg_explicit[4] = {
582 NFP_PCIE_BAR_PCIE2CPP_MapType(
583 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
584 NFP_PCIE_BAR_PCIE2CPP_MapType(
585 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
586 NFP_PCIE_BAR_PCIE2CPP_MapType(
587 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
588 NFP_PCIE_BAR_PCIE2CPP_MapType(
589 NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
590 };
591 char status_msg[196] = {};
592 struct nfp_bar *bar;
593 int i, bars_free;
594 int expl_groups;
595 char *msg, *end;
596
597 msg = status_msg +
598 snprintf(status_msg, sizeof(status_msg) - 1, "RESERVED BARs: ");
599 end = status_msg + sizeof(status_msg) - 1;
600
601 bar = &nfp->bar[0];
602 for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
603 struct resource *res;
604
605 res = &nfp->pdev->resource[(i >> 3) * 2];
606
607
608 if (!(resource_type(res) & IORESOURCE_MEM)) {
609 bar--;
610 continue;
611 }
612
613 bar->resource = res;
614 bar->barcfg = 0;
615
616 bar->nfp = nfp;
617 bar->index = i;
618 bar->mask = nfp_bar_resource_len(bar) - 1;
619 bar->bitsize = fls(bar->mask);
620 bar->base = 0;
621 bar->iomem = NULL;
622 }
623
624 nfp->bars = bar - &nfp->bar[0];
625 if (nfp->bars < 8) {
626 dev_err(nfp->dev, "No usable BARs found!\n");
627 return -EINVAL;
628 }
629
630 bars_free = nfp->bars;
631
632
633
634 mutex_init(&nfp->expl.mutex);
635
636 nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
637 << 4;
638 nfp->expl.signal_ref = 0x10;
639
640
641 bar = &nfp->bar[0];
642 if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
643 bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
644 nfp_bar_resource_len(bar));
645 if (bar->iomem) {
646 msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
647 atomic_inc(&bar->refcnt);
648 bars_free--;
649
650 nfp6000_bar_write(nfp, bar, barcfg_msix_general);
651
652 nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
653
654 if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
655 nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
656 nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
657 } else {
658 int pf = nfp->pdev->devfn & 7;
659
660 nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
661 }
662 nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
663 }
664
665 if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
666 nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000)
667 expl_groups = 4;
668 else
669 expl_groups = 1;
670
671
672 bar = &nfp->bar[1];
673 msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
674 atomic_inc(&bar->refcnt);
675 bars_free--;
676
677 nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
678
679
680 for (i = 0; i < 4; i++) {
681 int j;
682
683 if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
684 nfp->expl.group[i].bitsize = 0;
685 continue;
686 }
687
688 bar = &nfp->bar[4 + i];
689 bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
690 nfp_bar_resource_len(bar));
691 if (bar->iomem) {
692 msg += snprintf(msg, end - msg,
693 "0.%d: Explicit%d, ", 4 + i, i);
694 atomic_inc(&bar->refcnt);
695 bars_free--;
696
697 nfp->expl.group[i].bitsize = bar->bitsize;
698 nfp->expl.group[i].addr = bar->iomem;
699 nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
700
701 for (j = 0; j < 4; j++)
702 nfp->expl.group[i].free[j] = true;
703 }
704 nfp->iomem.expl[i] = bar->iomem;
705 }
706
707
708 sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
709 bar_cmp, NULL);
710
711 dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars);
712
713 return 0;
714}
715
716static void disable_bars(struct nfp6000_pcie *nfp)
717{
718 struct nfp_bar *bar = &nfp->bar[0];
719 int n;
720
721 for (n = 0; n < nfp->bars; n++, bar++) {
722 if (bar->iomem) {
723 iounmap(bar->iomem);
724 bar->iomem = NULL;
725 }
726 }
727}
728
729
730
731
732
733struct nfp6000_area_priv {
734 atomic_t refcnt;
735
736 struct nfp_bar *bar;
737 u32 bar_offset;
738
739 u32 target;
740 u32 action;
741 u32 token;
742 u64 offset;
743 struct {
744 int read;
745 int write;
746 int bar;
747 } width;
748 size_t size;
749
750 void __iomem *iomem;
751 phys_addr_t phys;
752 struct resource resource;
753};
754
755static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
756 unsigned long long address, unsigned long size)
757{
758 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
759 u32 target = NFP_CPP_ID_TARGET_of(dest);
760 u32 action = NFP_CPP_ID_ACTION_of(dest);
761 u32 token = NFP_CPP_ID_TOKEN_of(dest);
762 int pp;
763
764 pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
765 if (pp < 0)
766 return pp;
767
768 priv->width.read = PUSH_WIDTH(pp);
769 priv->width.write = PULL_WIDTH(pp);
770 if (priv->width.read > 0 &&
771 priv->width.write > 0 &&
772 priv->width.read != priv->width.write) {
773 return -EINVAL;
774 }
775
776 if (priv->width.read > 0)
777 priv->width.bar = priv->width.read;
778 else
779 priv->width.bar = priv->width.write;
780
781 atomic_set(&priv->refcnt, 0);
782 priv->bar = NULL;
783
784 priv->target = target;
785 priv->action = action;
786 priv->token = token;
787 priv->offset = address;
788 priv->size = size;
789 memset(&priv->resource, 0, sizeof(priv->resource));
790
791 return 0;
792}
793
794static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
795{
796}
797
798static void priv_area_get(struct nfp_cpp_area *area)
799{
800 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
801
802 atomic_inc(&priv->refcnt);
803}
804
805static int priv_area_put(struct nfp_cpp_area *area)
806{
807 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
808
809 if (WARN_ON(!atomic_read(&priv->refcnt)))
810 return 0;
811
812 return atomic_dec_and_test(&priv->refcnt);
813}
814
815static int nfp6000_area_acquire(struct nfp_cpp_area *area)
816{
817 struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
818 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
819 int barnum, err;
820
821 if (priv->bar) {
822
823 priv_area_get(area);
824 return 0;
825 }
826
827 barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
828 priv->offset, priv->size, priv->width.bar, 1);
829
830 if (barnum < 0) {
831 err = barnum;
832 goto err_alloc_bar;
833 }
834 priv->bar = &nfp->bar[barnum];
835
836
837 if (nfp_bar_maptype(priv->bar) ==
838 NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
839 priv->bar_offset = priv->offset &
840 (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
841 priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
842 priv->bar, priv->target);
843 priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
844 priv->bar, priv->token);
845 } else {
846 priv->bar_offset = priv->offset & priv->bar->mask;
847 }
848
849
850
851
852
853
854 priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
855 priv->resource.name = nfp_cpp_area_name(area);
856 priv->resource.start = priv->phys;
857 priv->resource.end = priv->resource.start + priv->size - 1;
858 priv->resource.flags = IORESOURCE_MEM;
859
860
861 if (priv->bar->iomem)
862 priv->iomem = priv->bar->iomem + priv->bar_offset;
863 else
864
865 priv->iomem = ioremap_nocache(priv->phys, priv->size);
866
867 if (IS_ERR_OR_NULL(priv->iomem)) {
868 dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
869 (int)priv->size, priv->bar->index);
870 err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
871 priv->iomem = NULL;
872 goto err_iomem_remap;
873 }
874
875 priv_area_get(area);
876 return 0;
877
878err_iomem_remap:
879 nfp_bar_put(nfp, priv->bar);
880 priv->bar = NULL;
881err_alloc_bar:
882 return err;
883}
884
885static void nfp6000_area_release(struct nfp_cpp_area *area)
886{
887 struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
888 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
889
890 if (!priv_area_put(area))
891 return;
892
893 if (!priv->bar->iomem)
894 iounmap(priv->iomem);
895
896 nfp_bar_put(nfp, priv->bar);
897
898 priv->bar = NULL;
899 priv->iomem = NULL;
900}
901
902static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
903{
904 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
905
906 return priv->phys;
907}
908
909static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
910{
911 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
912
913 return priv->iomem;
914}
915
916static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
917{
918
919
920
921
922 struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
923
924 return priv->bar->resource;
925}
926
927static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
928 unsigned long offset, unsigned int length)
929{
930 u64 __maybe_unused *wrptr64 = kernel_vaddr;
931 const u64 __iomem __maybe_unused *rdptr64;
932 struct nfp6000_area_priv *priv;
933 u32 *wrptr32 = kernel_vaddr;
934 const u32 __iomem *rdptr32;
935 int n, width;
936 bool is_64;
937
938 priv = nfp_cpp_area_priv(area);
939 rdptr64 = priv->iomem + offset;
940 rdptr32 = priv->iomem + offset;
941
942 if (offset + length > priv->size)
943 return -EFAULT;
944
945 width = priv->width.read;
946
947 if (width <= 0)
948 return -EINVAL;
949
950
951 if ((priv->offset + offset) & (width - 1))
952 return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
953 NFP_CPP_ID(priv->target,
954 priv->action,
955 priv->token),
956 priv->offset + offset,
957 kernel_vaddr, length, width);
958
959 is_64 = width == TARGET_WIDTH_64;
960
961
962 if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
963 priv->action == NFP_CPP_ACTION_RW)
964 is_64 = false;
965
966 if (is_64) {
967 if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
968 return -EINVAL;
969 } else {
970 if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
971 return -EINVAL;
972 }
973
974 if (WARN_ON(!priv->bar))
975 return -EFAULT;
976
977 if (is_64)
978#ifndef __raw_readq
979 return -EINVAL;
980#else
981 for (n = 0; n < length; n += sizeof(u64))
982 *wrptr64++ = __raw_readq(rdptr64++);
983#endif
984 else
985 for (n = 0; n < length; n += sizeof(u32))
986 *wrptr32++ = __raw_readl(rdptr32++);
987
988 return n;
989}
990
991static int
992nfp6000_area_write(struct nfp_cpp_area *area,
993 const void *kernel_vaddr,
994 unsigned long offset, unsigned int length)
995{
996 const u64 __maybe_unused *rdptr64 = kernel_vaddr;
997 u64 __iomem __maybe_unused *wrptr64;
998 const u32 *rdptr32 = kernel_vaddr;
999 struct nfp6000_area_priv *priv;
1000 u32 __iomem *wrptr32;
1001 int n, width;
1002 bool is_64;
1003
1004 priv = nfp_cpp_area_priv(area);
1005 wrptr64 = priv->iomem + offset;
1006 wrptr32 = priv->iomem + offset;
1007
1008 if (offset + length > priv->size)
1009 return -EFAULT;
1010
1011 width = priv->width.write;
1012
1013 if (width <= 0)
1014 return -EINVAL;
1015
1016
1017 if ((priv->offset + offset) & (width - 1))
1018 return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
1019 NFP_CPP_ID(priv->target,
1020 priv->action,
1021 priv->token),
1022 priv->offset + offset,
1023 kernel_vaddr, length, width);
1024
1025 is_64 = width == TARGET_WIDTH_64;
1026
1027
1028 if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
1029 priv->action == NFP_CPP_ACTION_RW)
1030 is_64 = false;
1031
1032 if (is_64) {
1033 if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
1034 return -EINVAL;
1035 } else {
1036 if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
1037 return -EINVAL;
1038 }
1039
1040 if (WARN_ON(!priv->bar))
1041 return -EFAULT;
1042
1043 if (is_64)
1044#ifndef __raw_writeq
1045 return -EINVAL;
1046#else
1047 for (n = 0; n < length; n += sizeof(u64)) {
1048 __raw_writeq(*rdptr64++, wrptr64++);
1049 wmb();
1050 }
1051#endif
1052 else
1053 for (n = 0; n < length; n += sizeof(u32)) {
1054 __raw_writel(*rdptr32++, wrptr32++);
1055 wmb();
1056 }
1057
1058 return n;
1059}
1060
1061struct nfp6000_explicit_priv {
1062 struct nfp6000_pcie *nfp;
1063 struct {
1064 int group;
1065 int area;
1066 } bar;
1067 int bitsize;
1068 void __iomem *data;
1069 void __iomem *addr;
1070};
1071
1072static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
1073{
1074 struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
1075 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1076 int i, j;
1077
1078 mutex_lock(&nfp->expl.mutex);
1079 for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
1080 if (!nfp->expl.group[i].bitsize)
1081 continue;
1082
1083 for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
1084 u16 data_offset;
1085
1086 if (!nfp->expl.group[i].free[j])
1087 continue;
1088
1089 priv->nfp = nfp;
1090 priv->bar.group = i;
1091 priv->bar.area = j;
1092 priv->bitsize = nfp->expl.group[i].bitsize - 2;
1093
1094 data_offset = (priv->bar.group << 9) +
1095 (priv->bar.area << 7);
1096 priv->data = nfp->expl.data + data_offset;
1097 priv->addr = nfp->expl.group[i].addr +
1098 (priv->bar.area << priv->bitsize);
1099 nfp->expl.group[i].free[j] = false;
1100
1101 mutex_unlock(&nfp->expl.mutex);
1102 return 0;
1103 }
1104 }
1105 mutex_unlock(&nfp->expl.mutex);
1106
1107 return -EAGAIN;
1108}
1109
1110static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
1111{
1112 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1113 struct nfp6000_pcie *nfp = priv->nfp;
1114
1115 mutex_lock(&nfp->expl.mutex);
1116 nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
1117 mutex_unlock(&nfp->expl.mutex);
1118}
1119
1120static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
1121 const void *buff, size_t len)
1122{
1123 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1124 const u32 *src = buff;
1125 size_t i;
1126
1127 for (i = 0; i < len; i += sizeof(u32))
1128 writel(*(src++), priv->data + i);
1129
1130 return i;
1131}
1132
1133static int
1134nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
1135 const struct nfp_cpp_explicit_command *cmd, u64 address)
1136{
1137 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1138 u8 signal_master, signal_ref, data_master;
1139 struct nfp6000_pcie *nfp = priv->nfp;
1140 int sigmask = 0;
1141 u16 data_ref;
1142 u32 csr[3];
1143
1144 if (cmd->siga_mode)
1145 sigmask |= 1 << cmd->siga;
1146 if (cmd->sigb_mode)
1147 sigmask |= 1 << cmd->sigb;
1148
1149 signal_master = cmd->signal_master;
1150 if (!signal_master)
1151 signal_master = nfp->expl.master_id;
1152
1153 signal_ref = cmd->signal_ref;
1154 if (signal_master == nfp->expl.master_id)
1155 signal_ref = nfp->expl.signal_ref +
1156 ((priv->bar.group * 4 + priv->bar.area) << 1);
1157
1158 data_master = cmd->data_master;
1159 if (!data_master)
1160 data_master = nfp->expl.master_id;
1161
1162 data_ref = cmd->data_ref;
1163 if (data_master == nfp->expl.master_id)
1164 data_ref = 0x1000 +
1165 (priv->bar.group << 9) + (priv->bar.area << 7);
1166
1167 csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
1168 NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
1169 NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
1170 NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
1171
1172 csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
1173 NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
1174 NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
1175
1176 csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
1177 NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
1178 NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
1179 NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
1180 NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
1181 NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
1182 NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
1183
1184 if (nfp->iomem.csr) {
1185 writel(csr[0], nfp->iomem.csr +
1186 NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1187 priv->bar.area));
1188 writel(csr[1], nfp->iomem.csr +
1189 NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1190 priv->bar.area));
1191 writel(csr[2], nfp->iomem.csr +
1192 NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1193 priv->bar.area));
1194
1195 readl(nfp->iomem.csr +
1196 NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1197 priv->bar.area));
1198 readl(nfp->iomem.csr +
1199 NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1200 priv->bar.area));
1201 readl(nfp->iomem.csr +
1202 NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1203 priv->bar.area));
1204 } else {
1205 pci_write_config_dword(nfp->pdev, 0x400 +
1206 NFP_PCIE_BAR_EXPLICIT_BAR0(
1207 priv->bar.group, priv->bar.area),
1208 csr[0]);
1209
1210 pci_write_config_dword(nfp->pdev, 0x400 +
1211 NFP_PCIE_BAR_EXPLICIT_BAR1(
1212 priv->bar.group, priv->bar.area),
1213 csr[1]);
1214
1215 pci_write_config_dword(nfp->pdev, 0x400 +
1216 NFP_PCIE_BAR_EXPLICIT_BAR2(
1217 priv->bar.group, priv->bar.area),
1218 csr[2]);
1219 }
1220
1221
1222 readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
1223
1224 return sigmask;
1225}
1226
1227static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
1228 void *buff, size_t len)
1229{
1230 struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1231 u32 *dst = buff;
1232 size_t i;
1233
1234 for (i = 0; i < len; i += sizeof(u32))
1235 *(dst++) = readl(priv->data + i);
1236
1237 return i;
1238}
1239
1240static int nfp6000_init(struct nfp_cpp *cpp)
1241{
1242 nfp_cpp_area_cache_add(cpp, SZ_64K);
1243 nfp_cpp_area_cache_add(cpp, SZ_64K);
1244 nfp_cpp_area_cache_add(cpp, SZ_256K);
1245
1246 return 0;
1247}
1248
1249static void nfp6000_free(struct nfp_cpp *cpp)
1250{
1251 struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
1252
1253 disable_bars(nfp);
1254 kfree(nfp);
1255}
1256
1257static void nfp6000_read_serial(struct device *dev, u8 *serial)
1258{
1259 struct pci_dev *pdev = to_pci_dev(dev);
1260 int pos;
1261 u32 reg;
1262
1263 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
1264 if (!pos) {
1265 memset(serial, 0, NFP_SERIAL_LEN);
1266 return;
1267 }
1268
1269 pci_read_config_dword(pdev, pos + 4, ®);
1270 put_unaligned_be16(reg >> 16, serial + 4);
1271 pci_read_config_dword(pdev, pos + 8, ®);
1272 put_unaligned_be32(reg, serial);
1273}
1274
1275static u16 nfp6000_get_interface(struct device *dev)
1276{
1277 struct pci_dev *pdev = to_pci_dev(dev);
1278 int pos;
1279 u32 reg;
1280
1281 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
1282 if (!pos)
1283 return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
1284
1285 pci_read_config_dword(pdev, pos + 4, ®);
1286
1287 return reg & 0xffff;
1288}
1289
1290static const struct nfp_cpp_operations nfp6000_pcie_ops = {
1291 .owner = THIS_MODULE,
1292
1293 .init = nfp6000_init,
1294 .free = nfp6000_free,
1295
1296 .read_serial = nfp6000_read_serial,
1297 .get_interface = nfp6000_get_interface,
1298
1299 .area_priv_size = sizeof(struct nfp6000_area_priv),
1300 .area_init = nfp6000_area_init,
1301 .area_cleanup = nfp6000_area_cleanup,
1302 .area_acquire = nfp6000_area_acquire,
1303 .area_release = nfp6000_area_release,
1304 .area_phys = nfp6000_area_phys,
1305 .area_iomem = nfp6000_area_iomem,
1306 .area_resource = nfp6000_area_resource,
1307 .area_read = nfp6000_area_read,
1308 .area_write = nfp6000_area_write,
1309
1310 .explicit_priv_size = sizeof(struct nfp6000_explicit_priv),
1311 .explicit_acquire = nfp6000_explicit_acquire,
1312 .explicit_release = nfp6000_explicit_release,
1313 .explicit_put = nfp6000_explicit_put,
1314 .explicit_do = nfp6000_explicit_do,
1315 .explicit_get = nfp6000_explicit_get,
1316};
1317
1318
1319
1320
1321
1322
1323
1324struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
1325{
1326 struct nfp6000_pcie *nfp;
1327 u16 interface;
1328 int err;
1329
1330
1331 dev_info(&pdev->dev,
1332 "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
1333
1334 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
1335 if (!nfp) {
1336 err = -ENOMEM;
1337 goto err_ret;
1338 }
1339
1340 nfp->dev = &pdev->dev;
1341 nfp->pdev = pdev;
1342 init_waitqueue_head(&nfp->bar_waiters);
1343 spin_lock_init(&nfp->bar_lock);
1344
1345 interface = nfp6000_get_interface(&pdev->dev);
1346
1347 if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
1348 NFP_CPP_INTERFACE_TYPE_PCI) {
1349 dev_err(&pdev->dev,
1350 "Interface type %d is not the expected %d\n",
1351 NFP_CPP_INTERFACE_TYPE_of(interface),
1352 NFP_CPP_INTERFACE_TYPE_PCI);
1353 err = -ENODEV;
1354 goto err_free_nfp;
1355 }
1356
1357 if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
1358 NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
1359 dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
1360 NFP_CPP_INTERFACE_CHANNEL_of(interface),
1361 NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
1362 err = -ENODEV;
1363 goto err_free_nfp;
1364 }
1365
1366 err = enable_bars(nfp, interface);
1367 if (err)
1368 goto err_free_nfp;
1369
1370
1371 return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
1372
1373err_free_nfp:
1374 kfree(nfp);
1375err_ret:
1376 dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
1377 return ERR_PTR(err);
1378}
1379