1
2
3
4
5
6
7
8
9
10#include <linux/pci_regs.h>
11#include <linux/pci_ids.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/sort.h>
17#include <linux/pci.h>
18#include <linux/of.h>
19#include <linux/delay.h>
20#include <asm/opal.h>
21#include <asm/msi_bitmap.h>
22#include <asm/pnv-pci.h>
23#include <asm/io.h>
24#include <asm/reg.h>
25
26#include "cxl.h"
27#include <misc/cxl.h>
28
29
30#define CXL_PCI_VSEC_ID 0x1280
31#define CXL_VSEC_MIN_SIZE 0x80
32
33#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
34 { \
35 pci_read_config_word(dev, vsec + 0x6, dest); \
36 *dest >>= 4; \
37 }
38#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
39 pci_read_config_byte(dev, vsec + 0x8, dest)
40
41#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
42 pci_read_config_byte(dev, vsec + 0x9, dest)
43#define CXL_STATUS_SECOND_PORT 0x80
44#define CXL_STATUS_MSI_X_FULL 0x40
45#define CXL_STATUS_MSI_X_SINGLE 0x20
46#define CXL_STATUS_FLASH_RW 0x08
47#define CXL_STATUS_FLASH_RO 0x04
48#define CXL_STATUS_LOADABLE_AFU 0x02
49#define CXL_STATUS_LOADABLE_PSL 0x01
50
51#define CXL_UNSUPPORTED_FEATURES \
52 (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
53
54#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
55 pci_read_config_byte(dev, vsec + 0xa, dest)
56#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
57 pci_write_config_byte(dev, vsec + 0xa, val)
58#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \
59 pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val)
60#define CXL_VSEC_PROTOCOL_MASK 0xe0
61#define CXL_VSEC_PROTOCOL_1024TB 0x80
62#define CXL_VSEC_PROTOCOL_512TB 0x40
63#define CXL_VSEC_PROTOCOL_256TB 0x20
64#define CXL_VSEC_PROTOCOL_ENABLE 0x01
65
66#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
67 pci_read_config_word(dev, vsec + 0xc, dest)
68#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
69 pci_read_config_byte(dev, vsec + 0xe, dest)
70#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
71 pci_read_config_byte(dev, vsec + 0xf, dest)
72#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
73 pci_read_config_word(dev, vsec + 0x10, dest)
74
75#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
76 pci_read_config_byte(dev, vsec + 0x13, dest)
77#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
78 pci_write_config_byte(dev, vsec + 0x13, val)
79#define CXL_VSEC_USER_IMAGE_LOADED 0x80
80#define CXL_VSEC_PERST_LOADS_IMAGE 0x20
81#define CXL_VSEC_PERST_SELECT_USER 0x10
82
83#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
84 pci_read_config_dword(dev, vsec + 0x20, dest)
85#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
86 pci_read_config_dword(dev, vsec + 0x24, dest)
87#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
88 pci_read_config_dword(dev, vsec + 0x28, dest)
89#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
90 pci_read_config_dword(dev, vsec + 0x2c, dest)
91
92
93
94
95#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
96#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
97#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
98#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
99
100#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
101#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
102#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
103#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
104#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
105#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
106#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
107#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
108#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
109#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
110#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
111#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
112#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
113#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
114#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
115#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
116#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
117#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
118#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
119#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
120
121static const struct pci_device_id cxl_pci_tbl[] = {
122 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
123 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
124 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
125 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
126 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
127 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
128 { PCI_DEVICE_CLASS(0x120000, ~0), },
129
130 { }
131};
132MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
133
134
135
136
137
138
139static inline resource_size_t p1_base(struct pci_dev *dev)
140{
141 return pci_resource_start(dev, 2);
142}
143
144static inline resource_size_t p1_size(struct pci_dev *dev)
145{
146 return pci_resource_len(dev, 2);
147}
148
149static inline resource_size_t p2_base(struct pci_dev *dev)
150{
151 return pci_resource_start(dev, 0);
152}
153
154static inline resource_size_t p2_size(struct pci_dev *dev)
155{
156 return pci_resource_len(dev, 0);
157}
158
159static int find_cxl_vsec(struct pci_dev *dev)
160{
161 int vsec = 0;
162 u16 val;
163
164 while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
165 pci_read_config_word(dev, vsec + 0x4, &val);
166 if (val == CXL_PCI_VSEC_ID)
167 return vsec;
168 }
169 return 0;
170
171}
172
173static void dump_cxl_config_space(struct pci_dev *dev)
174{
175 int vsec;
176 u32 val;
177
178 dev_info(&dev->dev, "dump_cxl_config_space\n");
179
180 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
181 dev_info(&dev->dev, "BAR0: %#.8x\n", val);
182 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
183 dev_info(&dev->dev, "BAR1: %#.8x\n", val);
184 pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
185 dev_info(&dev->dev, "BAR2: %#.8x\n", val);
186 pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
187 dev_info(&dev->dev, "BAR3: %#.8x\n", val);
188 pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
189 dev_info(&dev->dev, "BAR4: %#.8x\n", val);
190 pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
191 dev_info(&dev->dev, "BAR5: %#.8x\n", val);
192
193 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
194 p1_base(dev), p1_size(dev));
195 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
196 p2_base(dev), p2_size(dev));
197 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
198 pci_resource_start(dev, 4), pci_resource_len(dev, 4));
199
200 if (!(vsec = find_cxl_vsec(dev)))
201 return;
202
203#define show_reg(name, what) \
204 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
205
206 pci_read_config_dword(dev, vsec + 0x0, &val);
207 show_reg("Cap ID", (val >> 0) & 0xffff);
208 show_reg("Cap Ver", (val >> 16) & 0xf);
209 show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
210 pci_read_config_dword(dev, vsec + 0x4, &val);
211 show_reg("VSEC ID", (val >> 0) & 0xffff);
212 show_reg("VSEC Rev", (val >> 16) & 0xf);
213 show_reg("VSEC Length", (val >> 20) & 0xfff);
214 pci_read_config_dword(dev, vsec + 0x8, &val);
215 show_reg("Num AFUs", (val >> 0) & 0xff);
216 show_reg("Status", (val >> 8) & 0xff);
217 show_reg("Mode Control", (val >> 16) & 0xff);
218 show_reg("Reserved", (val >> 24) & 0xff);
219 pci_read_config_dword(dev, vsec + 0xc, &val);
220 show_reg("PSL Rev", (val >> 0) & 0xffff);
221 show_reg("CAIA Ver", (val >> 16) & 0xffff);
222 pci_read_config_dword(dev, vsec + 0x10, &val);
223 show_reg("Base Image Rev", (val >> 0) & 0xffff);
224 show_reg("Reserved", (val >> 16) & 0x0fff);
225 show_reg("Image Control", (val >> 28) & 0x3);
226 show_reg("Reserved", (val >> 30) & 0x1);
227 show_reg("Image Loaded", (val >> 31) & 0x1);
228
229 pci_read_config_dword(dev, vsec + 0x14, &val);
230 show_reg("Reserved", val);
231 pci_read_config_dword(dev, vsec + 0x18, &val);
232 show_reg("Reserved", val);
233 pci_read_config_dword(dev, vsec + 0x1c, &val);
234 show_reg("Reserved", val);
235
236 pci_read_config_dword(dev, vsec + 0x20, &val);
237 show_reg("AFU Descriptor Offset", val);
238 pci_read_config_dword(dev, vsec + 0x24, &val);
239 show_reg("AFU Descriptor Size", val);
240 pci_read_config_dword(dev, vsec + 0x28, &val);
241 show_reg("Problem State Offset", val);
242 pci_read_config_dword(dev, vsec + 0x2c, &val);
243 show_reg("Problem State Size", val);
244
245 pci_read_config_dword(dev, vsec + 0x30, &val);
246 show_reg("Reserved", val);
247 pci_read_config_dword(dev, vsec + 0x34, &val);
248 show_reg("Reserved", val);
249 pci_read_config_dword(dev, vsec + 0x38, &val);
250 show_reg("Reserved", val);
251 pci_read_config_dword(dev, vsec + 0x3c, &val);
252 show_reg("Reserved", val);
253
254 pci_read_config_dword(dev, vsec + 0x40, &val);
255 show_reg("PSL Programming Port", val);
256 pci_read_config_dword(dev, vsec + 0x44, &val);
257 show_reg("PSL Programming Control", val);
258
259 pci_read_config_dword(dev, vsec + 0x48, &val);
260 show_reg("Reserved", val);
261 pci_read_config_dword(dev, vsec + 0x4c, &val);
262 show_reg("Reserved", val);
263
264 pci_read_config_dword(dev, vsec + 0x50, &val);
265 show_reg("Flash Address Register", val);
266 pci_read_config_dword(dev, vsec + 0x54, &val);
267 show_reg("Flash Size Register", val);
268 pci_read_config_dword(dev, vsec + 0x58, &val);
269 show_reg("Flash Status/Control Register", val);
270 pci_read_config_dword(dev, vsec + 0x58, &val);
271 show_reg("Flash Data Port", val);
272
273#undef show_reg
274}
275
276static void dump_afu_descriptor(struct cxl_afu *afu)
277{
278 u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
279 int i;
280
281#define show_reg(name, what) \
282 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
283
284 val = AFUD_READ_INFO(afu);
285 show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
286 show_reg("num_of_processes", AFUD_NUM_PROCS(val));
287 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
288 show_reg("req_prog_mode", val & 0xffffULL);
289 afu_cr_num = AFUD_NUM_CRS(val);
290
291 val = AFUD_READ(afu, 0x8);
292 show_reg("Reserved", val);
293 val = AFUD_READ(afu, 0x10);
294 show_reg("Reserved", val);
295 val = AFUD_READ(afu, 0x18);
296 show_reg("Reserved", val);
297
298 val = AFUD_READ_CR(afu);
299 show_reg("Reserved", (val >> (63-7)) & 0xff);
300 show_reg("AFU_CR_len", AFUD_CR_LEN(val));
301 afu_cr_len = AFUD_CR_LEN(val) * 256;
302
303 val = AFUD_READ_CR_OFF(afu);
304 afu_cr_off = val;
305 show_reg("AFU_CR_offset", val);
306
307 val = AFUD_READ_PPPSA(afu);
308 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
309 show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
310
311 val = AFUD_READ_PPPSA_OFF(afu);
312 show_reg("PerProcessPSA_offset", val);
313
314 val = AFUD_READ_EB(afu);
315 show_reg("Reserved", (val >> (63-7)) & 0xff);
316 show_reg("AFU_EB_len", AFUD_EB_LEN(val));
317
318 val = AFUD_READ_EB_OFF(afu);
319 show_reg("AFU_EB_offset", val);
320
321 for (i = 0; i < afu_cr_num; i++) {
322 val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
323 show_reg("CR Vendor", val & 0xffff);
324 show_reg("CR Device", (val >> 16) & 0xffff);
325 }
326#undef show_reg
327}
328
329#define P8_CAPP_UNIT0_ID 0xBA
330#define P8_CAPP_UNIT1_ID 0XBE
331#define P9_CAPP_UNIT0_ID 0xC0
332#define P9_CAPP_UNIT1_ID 0xE0
333
334static int get_phb_index(struct device_node *np, u32 *phb_index)
335{
336 if (of_property_read_u32(np, "ibm,phb-index", phb_index))
337 return -ENODEV;
338 return 0;
339}
340
341static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
342{
343
344
345
346
347
348
349
350 if (cxl_is_power8()) {
351 if (!pvr_version_is(PVR_POWER8NVL))
352 return P8_CAPP_UNIT0_ID;
353
354 if (phb_index == 0)
355 return P8_CAPP_UNIT0_ID;
356
357 if (phb_index == 1)
358 return P8_CAPP_UNIT1_ID;
359 }
360
361
362
363
364
365
366
367 if (cxl_is_power9()) {
368 if (phb_index == 0)
369 return P9_CAPP_UNIT0_ID;
370
371 if (phb_index == 3)
372 return P9_CAPP_UNIT1_ID;
373 }
374
375 return 0;
376}
377
378int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
379 u32 *phb_index, u64 *capp_unit_id)
380{
381 int rc;
382 struct device_node *np;
383 const __be32 *prop;
384
385 if (!(np = pnv_pci_get_phb_node(dev)))
386 return -ENODEV;
387
388 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
389 np = of_get_next_parent(np);
390 if (!np)
391 return -ENODEV;
392
393 *chipid = be32_to_cpup(prop);
394
395 rc = get_phb_index(np, phb_index);
396 if (rc) {
397 pr_err("cxl: invalid phb index\n");
398 return rc;
399 }
400
401 *capp_unit_id = get_capp_unit_id(np, *phb_index);
402 of_node_put(np);
403 if (!*capp_unit_id) {
404 pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
405 *phb_index);
406 return -ENODEV;
407 }
408
409 return 0;
410}
411
412int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg)
413{
414 u64 xsl_dsnctl;
415
416
417
418
419
420
421
422
423
424
425
426 xsl_dsnctl = ((u64)0x2 << (63-7));
427 xsl_dsnctl |= (capp_unit_id << (63-15));
428
429
430 xsl_dsnctl |= ((u64)0x09 << (63-28));
431
432 if (!(cxl_is_power9_dd1())) {
433
434
435
436
437
438
439
440 xsl_dsnctl |= ((u64)0x03 << (63-47));
441
442
443
444
445
446
447 xsl_dsnctl |= ((u64)0x04 << (63-55));
448 }
449
450 *reg = xsl_dsnctl;
451 return 0;
452}
453
454static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
455 struct pci_dev *dev)
456{
457 u64 xsl_dsnctl, psl_fircntl;
458 u64 chipid;
459 u32 phb_index;
460 u64 capp_unit_id;
461 int rc;
462
463 rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
464 if (rc)
465 return rc;
466
467 rc = cxl_get_xsl9_dsnctl(capp_unit_id, &xsl_dsnctl);
468 if (rc)
469 return rc;
470
471 cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
472
473
474 psl_fircntl = (0x2ULL << (63-3));
475 psl_fircntl |= (0x1ULL << (63-6));
476 psl_fircntl |= 0x1ULL;
477 cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
478
479
480
481
482 cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000002A10ULL);
483
484
485
486
487
488
489
490
491
492 cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
493
494
495 cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
496
497 if (phb_index == 3) {
498
499 cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
500 }
501
502
503 cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
504
505 if (cxl_is_power9_dd1()) {
506
507 cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL);
508 } else
509 cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x4000000000000000ULL);
510
511 return 0;
512}
513
514static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
515{
516 u64 psl_dsnctl, psl_fircntl;
517 u64 chipid;
518 u32 phb_index;
519 u64 capp_unit_id;
520 int rc;
521
522 rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
523 if (rc)
524 return rc;
525
526 psl_dsnctl = 0x0000900000000000ULL;
527 psl_dsnctl |= (0x2ULL << (63-38));
528
529 psl_dsnctl |= (chipid << (63-5));
530 psl_dsnctl |= (capp_unit_id << (63-13));
531
532 cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
533 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
534
535 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
536
537 psl_fircntl = (0x2ULL << (63-3));
538 psl_fircntl |= (0x1ULL << (63-6));
539 psl_fircntl |= 0x1ULL;
540 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
541
542 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
543
544 return 0;
545}
546
547static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_dev *dev)
548{
549 u64 xsl_dsnctl;
550 u64 chipid;
551 u32 phb_index;
552 u64 capp_unit_id;
553 int rc;
554
555 rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
556 if (rc)
557 return rc;
558
559
560 xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5));
561 xsl_dsnctl |= (capp_unit_id << (63-13));
562 cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl);
563
564 return 0;
565}
566
567
568#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
569#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
570
571#define PSL_2048_250MHZ_CYCLES 1
572
573static void write_timebase_ctrl_psl9(struct cxl *adapter)
574{
575 cxl_p1_write(adapter, CXL_PSL9_TB_CTLSTAT,
576 TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
577}
578
579static void write_timebase_ctrl_psl8(struct cxl *adapter)
580{
581 cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
582 TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
583}
584
585
586#define TBSYNC_ENA (1ULL << 63)
587
588#define XSL_2000_CLOCKS 1
589#define XSL_4000_CLOCKS 2
590#define XSL_8000_CLOCKS 3
591
592static void write_timebase_ctrl_xsl(struct cxl *adapter)
593{
594 cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT,
595 TBSYNC_ENA |
596 TBSYNC_CAL(3) |
597 TBSYNC_CNT(XSL_4000_CLOCKS));
598}
599
600static u64 timebase_read_psl9(struct cxl *adapter)
601{
602 return cxl_p1_read(adapter, CXL_PSL9_Timebase);
603}
604
605static u64 timebase_read_psl8(struct cxl *adapter)
606{
607 return cxl_p1_read(adapter, CXL_PSL_Timebase);
608}
609
610static u64 timebase_read_xsl(struct cxl *adapter)
611{
612 return cxl_p1_read(adapter, CXL_XSL_Timebase);
613}
614
615static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
616{
617 u64 psl_tb;
618 int delta;
619 unsigned int retry = 0;
620 struct device_node *np;
621
622 adapter->psl_timebase_synced = false;
623
624 if (!(np = pnv_pci_get_phb_node(dev)))
625 return;
626
627
628 of_node_get(np);
629 if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
630 of_node_put(np);
631 dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
632 return;
633 }
634 of_node_put(np);
635
636
637
638
639
640 adapter->native->sl_ops->write_timebase_ctrl(adapter);
641
642
643 cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
644 cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
645
646
647 do {
648 msleep(1);
649 if (retry++ > 5) {
650 dev_info(&dev->dev, "PSL timebase can't synchronize\n");
651 return;
652 }
653 psl_tb = adapter->native->sl_ops->timebase_read(adapter);
654 delta = mftb() - psl_tb;
655 if (delta < 0)
656 delta = -delta;
657 } while (tb_to_ns(delta) > 16000);
658
659 adapter->psl_timebase_synced = true;
660 return;
661}
662
663static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
664{
665 return 0;
666}
667
668static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
669{
670
671 cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
672
673 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
674
675 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
676 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
677
678 return 0;
679}
680
681int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
682 unsigned int virq)
683{
684 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
685
686 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
687}
688
689int cxl_update_image_control(struct cxl *adapter)
690{
691 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
692 int rc;
693 int vsec;
694 u8 image_state;
695
696 if (!(vsec = find_cxl_vsec(dev))) {
697 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
698 return -ENODEV;
699 }
700
701 if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
702 dev_err(&dev->dev, "failed to read image state: %i\n", rc);
703 return rc;
704 }
705
706 if (adapter->perst_loads_image)
707 image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
708 else
709 image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
710
711 if (adapter->perst_select_user)
712 image_state |= CXL_VSEC_PERST_SELECT_USER;
713 else
714 image_state &= ~CXL_VSEC_PERST_SELECT_USER;
715
716 if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
717 dev_err(&dev->dev, "failed to update image control: %i\n", rc);
718 return rc;
719 }
720
721 return 0;
722}
723
724int cxl_pci_alloc_one_irq(struct cxl *adapter)
725{
726 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
727
728 return pnv_cxl_alloc_hwirqs(dev, 1);
729}
730
731void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
732{
733 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
734
735 return pnv_cxl_release_hwirqs(dev, hwirq, 1);
736}
737
738int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
739 struct cxl *adapter, unsigned int num)
740{
741 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
742
743 return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
744}
745
746void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
747 struct cxl *adapter)
748{
749 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
750
751 pnv_cxl_release_hwirq_ranges(irqs, dev);
752}
753
754static int setup_cxl_bars(struct pci_dev *dev)
755{
756
757 if ((p1_base(dev) < 0x100000000ULL) ||
758 (p2_base(dev) < 0x100000000ULL)) {
759 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
760 return -ENODEV;
761 }
762
763
764
765
766
767
768 pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
769 pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
770
771 return 0;
772}
773
774#ifdef CONFIG_CXL_BIMODAL
775
776struct cxl_switch_work {
777 struct pci_dev *dev;
778 struct work_struct work;
779 int vsec;
780 int mode;
781};
782
783static void switch_card_to_cxl(struct work_struct *work)
784{
785 struct cxl_switch_work *switch_work =
786 container_of(work, struct cxl_switch_work, work);
787 struct pci_dev *dev = switch_work->dev;
788 struct pci_bus *bus = dev->bus;
789 struct pci_controller *hose = pci_bus_to_host(bus);
790 struct pci_dev *bridge;
791 struct pnv_php_slot *php_slot;
792 unsigned int devfn;
793 u8 val;
794 int rc;
795
796 dev_info(&bus->dev, "cxl: Preparing for mode switch...\n");
797 bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev,
798 bus_list);
799 if (!bridge) {
800 dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n");
801 goto err_dev_put;
802 }
803
804 php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge));
805 if (!php_slot) {
806 dev_err(&bus->dev, "cxl: Failed to find slot hotplug "
807 "information. You may need to upgrade "
808 "skiboot. Aborting.\n");
809 goto err_dev_put;
810 }
811
812 rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val);
813 if (rc) {
814 dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc);
815 goto err_dev_put;
816 }
817 devfn = dev->devfn;
818
819
820 pci_dev_put(dev);
821
822 dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n");
823 pci_lock_rescan_remove();
824 pci_hp_remove_devices(bridge->subordinate);
825 pci_unlock_rescan_remove();
826
827
828 if (switch_work->mode == CXL_BIMODE_CXL) {
829 dev_info(&bus->dev, "cxl: Switching card to CXL mode\n");
830 val &= ~CXL_VSEC_PROTOCOL_MASK;
831 val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
832 rc = pnv_cxl_enable_phb_kernel_api(hose, true);
833 if (rc) {
834 dev_err(&bus->dev, "cxl: Failed to enable kernel API"
835 " on real PHB, aborting\n");
836 goto err_free_work;
837 }
838 } else {
839 dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n");
840 goto err_free_work;
841 }
842
843 rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val);
844 if (rc) {
845 dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc);
846 goto err_free_work;
847 }
848
849
850
851
852
853
854 msleep(100);
855
856
857
858
859
860
861
862
863
864
865
866 dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n");
867 pci_set_pcie_reset_state(bridge, pcie_hot_reset);
868 pci_set_pcie_reset_state(bridge, pcie_deassert_reset);
869
870 dev_dbg(&bus->dev, "cxl: Offlining slot\n");
871 rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE);
872 if (rc) {
873 dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc);
874 goto err_free_work;
875 }
876
877 dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n");
878 rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE);
879 if (rc) {
880 dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc);
881 goto err_free_work;
882 }
883
884 pci_lock_rescan_remove();
885 pci_hp_add_devices(bridge->subordinate);
886 pci_unlock_rescan_remove();
887
888 dev_info(&bus->dev, "cxl: CAPI mode switch completed\n");
889 kfree(switch_work);
890 return;
891
892err_dev_put:
893
894 pci_dev_put(dev);
895err_free_work:
896 kfree(switch_work);
897}
898
899int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec)
900{
901 struct cxl_switch_work *work;
902 u8 val;
903 int rc;
904
905 if (!cpu_has_feature(CPU_FTR_HVMODE))
906 return -ENODEV;
907
908 if (!vsec) {
909 vsec = find_cxl_vsec(dev);
910 if (!vsec) {
911 dev_info(&dev->dev, "CXL VSEC not found\n");
912 return -ENODEV;
913 }
914 }
915
916 rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
917 if (rc) {
918 dev_err(&dev->dev, "Failed to read current mode control: %i", rc);
919 return rc;
920 }
921
922 if (mode == CXL_BIMODE_PCI) {
923 if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
924 dev_info(&dev->dev, "Card is already in PCI mode\n");
925 return 0;
926 }
927
928
929
930
931
932 dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n");
933 return -EIO;
934 }
935
936 if (val & CXL_VSEC_PROTOCOL_ENABLE) {
937 dev_info(&dev->dev, "Card is already in CXL mode\n");
938 return 0;
939 }
940
941 dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread "
942 "to switch to CXL mode\n");
943
944 work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL);
945 if (!work)
946 return -ENOMEM;
947
948 pci_dev_get(dev);
949 work->dev = dev;
950 work->vsec = vsec;
951 work->mode = mode;
952 INIT_WORK(&work->work, switch_card_to_cxl);
953
954 schedule_work(&work->work);
955
956
957
958
959
960
961
962
963
964
965 return -EBUSY;
966}
967EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode);
968
969#endif
970
971static int setup_cxl_protocol_area(struct pci_dev *dev)
972{
973 u8 val;
974 int rc;
975 int vsec = find_cxl_vsec(dev);
976
977 if (!vsec) {
978 dev_info(&dev->dev, "CXL VSEC not found\n");
979 return -ENODEV;
980 }
981
982 rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
983 if (rc) {
984 dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc);
985 return rc;
986 }
987
988 if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
989 dev_err(&dev->dev, "Card not in CAPI mode!\n");
990 return -EIO;
991 }
992
993 if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) {
994 val &= ~CXL_VSEC_PROTOCOL_MASK;
995 val |= CXL_VSEC_PROTOCOL_256TB;
996 rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val);
997 if (rc) {
998 dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc);
999 return rc;
1000 }
1001 }
1002
1003 return 0;
1004}
1005
1006static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
1007{
1008 u64 p1n_base, p2n_base, afu_desc;
1009 const u64 p1n_size = 0x100;
1010 const u64 p2n_size = 0x1000;
1011
1012 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
1013 p2n_base = p2_base(dev) + (afu->slice * p2n_size);
1014 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
1015 afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
1016
1017 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
1018 goto err;
1019 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
1020 goto err1;
1021 if (afu_desc) {
1022 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
1023 goto err2;
1024 }
1025
1026 return 0;
1027err2:
1028 iounmap(afu->p2n_mmio);
1029err1:
1030 iounmap(afu->native->p1n_mmio);
1031err:
1032 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
1033 return -ENOMEM;
1034}
1035
1036static void pci_unmap_slice_regs(struct cxl_afu *afu)
1037{
1038 if (afu->p2n_mmio) {
1039 iounmap(afu->p2n_mmio);
1040 afu->p2n_mmio = NULL;
1041 }
1042 if (afu->native->p1n_mmio) {
1043 iounmap(afu->native->p1n_mmio);
1044 afu->native->p1n_mmio = NULL;
1045 }
1046 if (afu->native->afu_desc_mmio) {
1047 iounmap(afu->native->afu_desc_mmio);
1048 afu->native->afu_desc_mmio = NULL;
1049 }
1050}
1051
1052void cxl_pci_release_afu(struct device *dev)
1053{
1054 struct cxl_afu *afu = to_cxl_afu(dev);
1055
1056 pr_devel("%s\n", __func__);
1057
1058 idr_destroy(&afu->contexts_idr);
1059 cxl_release_spa(afu);
1060
1061 kfree(afu->native);
1062 kfree(afu);
1063}
1064
1065
1066static int cxl_read_afu_descriptor(struct cxl_afu *afu)
1067{
1068 u64 val;
1069
1070 val = AFUD_READ_INFO(afu);
1071 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
1072 afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
1073 afu->crs_num = AFUD_NUM_CRS(val);
1074
1075 if (AFUD_AFU_DIRECTED(val))
1076 afu->modes_supported |= CXL_MODE_DIRECTED;
1077 if (AFUD_DEDICATED_PROCESS(val))
1078 afu->modes_supported |= CXL_MODE_DEDICATED;
1079 if (AFUD_TIME_SLICED(val))
1080 afu->modes_supported |= CXL_MODE_TIME_SLICED;
1081
1082 val = AFUD_READ_PPPSA(afu);
1083 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
1084 afu->psa = AFUD_PPPSA_PSA(val);
1085 if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
1086 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
1087
1088 val = AFUD_READ_CR(afu);
1089 afu->crs_len = AFUD_CR_LEN(val) * 256;
1090 afu->crs_offset = AFUD_READ_CR_OFF(afu);
1091
1092
1093
1094 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
1095 afu->eb_offset = AFUD_READ_EB_OFF(afu);
1096
1097
1098 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
1099 dev_warn(&afu->dev,
1100 "Invalid AFU error buffer offset %Lx\n",
1101 afu->eb_offset);
1102 dev_info(&afu->dev,
1103 "Ignoring AFU error buffer in the descriptor\n");
1104
1105 afu->eb_len = 0;
1106 }
1107
1108 return 0;
1109}
1110
1111static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
1112{
1113 int i, rc;
1114 u32 val;
1115
1116 if (afu->psa && afu->adapter->ps_size <
1117 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
1118 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
1119 return -ENODEV;
1120 }
1121
1122 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
1123 dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
1124
1125 for (i = 0; i < afu->crs_num; i++) {
1126 rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
1127 if (rc || val == 0) {
1128 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
1129 return -EINVAL;
1130 }
1131 }
1132
1133 if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144 dev_err(&afu->dev, "AFU does not support any processes\n");
1145 return -EINVAL;
1146 }
1147
1148 return 0;
1149}
1150
1151static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
1152{
1153 u64 reg;
1154
1155
1156
1157
1158
1159
1160 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
1161 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
1162 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
1163 if (cxl_ops->afu_reset(afu))
1164 return -EIO;
1165 if (cxl_afu_disable(afu))
1166 return -EIO;
1167 if (cxl_psl_purge(afu))
1168 return -EIO;
1169 }
1170 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
1171 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
1172 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1173 if (reg) {
1174 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
1175 if (reg & CXL_PSL9_DSISR_An_TF)
1176 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1177 else
1178 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1179 }
1180 if (afu->adapter->native->sl_ops->register_serr_irq) {
1181 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1182 if (reg) {
1183 if (reg & ~0x000000007fffffff)
1184 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
1185 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
1186 }
1187 }
1188 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1189 if (reg) {
1190 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
1191 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
1192 }
1193
1194 return 0;
1195}
1196
1197static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
1198{
1199 u64 reg;
1200
1201
1202
1203
1204
1205
1206 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
1207 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
1208 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
1209 if (cxl_ops->afu_reset(afu))
1210 return -EIO;
1211 if (cxl_afu_disable(afu))
1212 return -EIO;
1213 if (cxl_psl_purge(afu))
1214 return -EIO;
1215 }
1216 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
1217 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
1218 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
1219 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
1220 cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
1221 cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
1222 cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
1223 cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
1224 cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
1225 cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
1226 cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
1227 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1228 if (reg) {
1229 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
1230 if (reg & CXL_PSL_DSISR_TRANS)
1231 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1232 else
1233 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1234 }
1235 if (afu->adapter->native->sl_ops->register_serr_irq) {
1236 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1237 if (reg) {
1238 if (reg & ~0xffff)
1239 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
1240 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
1241 }
1242 }
1243 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1244 if (reg) {
1245 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
1246 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
1247 }
1248
1249 return 0;
1250}
1251
1252#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
1253
1254
1255
1256
1257
1258
1259ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
1260 loff_t off, size_t count)
1261{
1262 loff_t aligned_start, aligned_end;
1263 size_t aligned_length;
1264 void *tbuf;
1265 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
1266
1267 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
1268 return 0;
1269
1270
1271 count = min((size_t)(afu->eb_len - off), count);
1272 aligned_start = round_down(off, 8);
1273 aligned_end = round_up(off + count, 8);
1274 aligned_length = aligned_end - aligned_start;
1275
1276
1277 if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
1278 aligned_length = ERR_BUFF_MAX_COPY_SIZE;
1279 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
1280 }
1281
1282
1283 tbuf = (void *)__get_free_page(GFP_KERNEL);
1284 if (!tbuf)
1285 return -ENOMEM;
1286
1287
1288 memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
1289 memcpy(buf, tbuf + (off & 0x7), count);
1290
1291 free_page((unsigned long)tbuf);
1292
1293 return count;
1294}
1295
1296static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
1297{
1298 int rc;
1299
1300 if ((rc = pci_map_slice_regs(afu, adapter, dev)))
1301 return rc;
1302
1303 if (adapter->native->sl_ops->sanitise_afu_regs) {
1304 rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
1305 if (rc)
1306 goto err1;
1307 }
1308
1309
1310 if ((rc = cxl_ops->afu_reset(afu)))
1311 goto err1;
1312
1313 if (cxl_verbose)
1314 dump_afu_descriptor(afu);
1315
1316 if ((rc = cxl_read_afu_descriptor(afu)))
1317 goto err1;
1318
1319 if ((rc = cxl_afu_descriptor_looks_ok(afu)))
1320 goto err1;
1321
1322 if (adapter->native->sl_ops->afu_regs_init)
1323 if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
1324 goto err1;
1325
1326 if (adapter->native->sl_ops->register_serr_irq)
1327 if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
1328 goto err1;
1329
1330 if ((rc = cxl_native_register_psl_irq(afu)))
1331 goto err2;
1332
1333 atomic_set(&afu->configured_state, 0);
1334 return 0;
1335
1336err2:
1337 if (adapter->native->sl_ops->release_serr_irq)
1338 adapter->native->sl_ops->release_serr_irq(afu);
1339err1:
1340 pci_unmap_slice_regs(afu);
1341 return rc;
1342}
1343
1344static void pci_deconfigure_afu(struct cxl_afu *afu)
1345{
1346
1347
1348
1349
1350 if (atomic_read(&afu->configured_state) != -1) {
1351 while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
1352 schedule();
1353 }
1354 cxl_native_release_psl_irq(afu);
1355 if (afu->adapter->native->sl_ops->release_serr_irq)
1356 afu->adapter->native->sl_ops->release_serr_irq(afu);
1357 pci_unmap_slice_regs(afu);
1358}
1359
1360static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
1361{
1362 struct cxl_afu *afu;
1363 int rc = -ENOMEM;
1364
1365 afu = cxl_alloc_afu(adapter, slice);
1366 if (!afu)
1367 return -ENOMEM;
1368
1369 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
1370 if (!afu->native)
1371 goto err_free_afu;
1372
1373 mutex_init(&afu->native->spa_mutex);
1374
1375 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
1376 if (rc)
1377 goto err_free_native;
1378
1379 rc = pci_configure_afu(afu, adapter, dev);
1380 if (rc)
1381 goto err_free_native;
1382
1383
1384 cxl_debugfs_afu_add(afu);
1385
1386
1387
1388
1389
1390 if ((rc = cxl_register_afu(afu)))
1391 goto err_put1;
1392
1393 if ((rc = cxl_sysfs_afu_add(afu)))
1394 goto err_put1;
1395
1396 adapter->afu[afu->slice] = afu;
1397
1398 if ((rc = cxl_pci_vphb_add(afu)))
1399 dev_info(&afu->dev, "Can't register vPHB\n");
1400
1401 return 0;
1402
1403err_put1:
1404 pci_deconfigure_afu(afu);
1405 cxl_debugfs_afu_remove(afu);
1406 device_unregister(&afu->dev);
1407 return rc;
1408
1409err_free_native:
1410 kfree(afu->native);
1411err_free_afu:
1412 kfree(afu);
1413 return rc;
1414
1415}
1416
1417static void cxl_pci_remove_afu(struct cxl_afu *afu)
1418{
1419 pr_devel("%s\n", __func__);
1420
1421 if (!afu)
1422 return;
1423
1424 cxl_pci_vphb_remove(afu);
1425 cxl_sysfs_afu_remove(afu);
1426 cxl_debugfs_afu_remove(afu);
1427
1428 spin_lock(&afu->adapter->afu_list_lock);
1429 afu->adapter->afu[afu->slice] = NULL;
1430 spin_unlock(&afu->adapter->afu_list_lock);
1431
1432 cxl_context_detach_all(afu);
1433 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1434
1435 pci_deconfigure_afu(afu);
1436 device_unregister(&afu->dev);
1437}
1438
1439int cxl_pci_reset(struct cxl *adapter)
1440{
1441 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1442 int rc;
1443
1444 if (adapter->perst_same_image) {
1445 dev_warn(&dev->dev,
1446 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
1447 return -EINVAL;
1448 }
1449
1450 dev_info(&dev->dev, "CXL reset\n");
1451
1452
1453
1454
1455
1456 if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
1457 cxl_data_cache_flush(adapter);
1458
1459
1460
1461
1462 if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
1463 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
1464 return rc;
1465 }
1466
1467 return rc;
1468}
1469
1470static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
1471{
1472 if (pci_request_region(dev, 2, "priv 2 regs"))
1473 goto err1;
1474 if (pci_request_region(dev, 0, "priv 1 regs"))
1475 goto err2;
1476
1477 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
1478 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
1479
1480 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
1481 goto err3;
1482
1483 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
1484 goto err4;
1485
1486 return 0;
1487
1488err4:
1489 iounmap(adapter->native->p1_mmio);
1490 adapter->native->p1_mmio = NULL;
1491err3:
1492 pci_release_region(dev, 0);
1493err2:
1494 pci_release_region(dev, 2);
1495err1:
1496 return -ENOMEM;
1497}
1498
1499static void cxl_unmap_adapter_regs(struct cxl *adapter)
1500{
1501 if (adapter->native->p1_mmio) {
1502 iounmap(adapter->native->p1_mmio);
1503 adapter->native->p1_mmio = NULL;
1504 pci_release_region(to_pci_dev(adapter->dev.parent), 2);
1505 }
1506 if (adapter->native->p2_mmio) {
1507 iounmap(adapter->native->p2_mmio);
1508 adapter->native->p2_mmio = NULL;
1509 pci_release_region(to_pci_dev(adapter->dev.parent), 0);
1510 }
1511}
1512
1513static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
1514{
1515 int vsec;
1516 u32 afu_desc_off, afu_desc_size;
1517 u32 ps_off, ps_size;
1518 u16 vseclen;
1519 u8 image_state;
1520
1521 if (!(vsec = find_cxl_vsec(dev))) {
1522 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
1523 return -ENODEV;
1524 }
1525
1526 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
1527 if (vseclen < CXL_VSEC_MIN_SIZE) {
1528 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
1529 return -EINVAL;
1530 }
1531
1532 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
1533 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
1534 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
1535 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
1536 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
1537 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
1538 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1539 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1540 adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
1541
1542 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
1543 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
1544 CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
1545 CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
1546 CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
1547
1548
1549
1550 adapter->native->ps_off = ps_off * 64 * 1024;
1551 adapter->ps_size = ps_size * 64 * 1024;
1552 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
1553 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
1554
1555
1556 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
1557
1558 return 0;
1559}
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
1570{
1571 int aer;
1572 u32 data;
1573
1574 if (adapter->psl_rev & 0xf000)
1575 return;
1576 if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
1577 return;
1578 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
1579 if (data & PCI_ERR_UNC_MALF_TLP)
1580 if (data & PCI_ERR_UNC_INTN)
1581 return;
1582 data |= PCI_ERR_UNC_MALF_TLP;
1583 data |= PCI_ERR_UNC_INTN;
1584 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
1585}
1586
1587static bool cxl_compatible_caia_version(struct cxl *adapter)
1588{
1589 if (cxl_is_power8() && (adapter->caia_major == 1))
1590 return true;
1591
1592 if (cxl_is_power9() && (adapter->caia_major == 2))
1593 return true;
1594
1595 return false;
1596}
1597
1598static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
1599{
1600 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
1601 return -EBUSY;
1602
1603 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
1604 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
1605 return -EINVAL;
1606 }
1607
1608 if (!cxl_compatible_caia_version(adapter)) {
1609 dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
1610 adapter->caia_major);
1611 return -ENODEV;
1612 }
1613
1614 if (!adapter->slices) {
1615
1616
1617 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
1618 return -EINVAL;
1619 }
1620
1621 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
1622 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
1623 return -EINVAL;
1624 }
1625
1626 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
1627 dev_err(&dev->dev, "ABORTING: Problem state size larger than "
1628 "available in BAR2: 0x%llx > 0x%llx\n",
1629 adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
1630 return -EINVAL;
1631 }
1632
1633 return 0;
1634}
1635
1636ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1637{
1638 return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
1639}
1640
1641static void cxl_release_adapter(struct device *dev)
1642{
1643 struct cxl *adapter = to_cxl_adapter(dev);
1644
1645 pr_devel("cxl_release_adapter\n");
1646
1647 cxl_remove_adapter_nr(adapter);
1648
1649 kfree(adapter->native);
1650 kfree(adapter);
1651}
1652
1653#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1654
1655static int sanitise_adapter_regs(struct cxl *adapter)
1656{
1657 int rc = 0;
1658
1659
1660 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
1661
1662 if (adapter->native->sl_ops->invalidate_all) {
1663
1664 if (cxl_is_power9() && (adapter->perst_loads_image))
1665 return 0;
1666 rc = adapter->native->sl_ops->invalidate_all(adapter);
1667 }
1668
1669 return rc;
1670}
1671
1672
1673
1674
1675static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1676{
1677 int rc;
1678
1679 adapter->dev.parent = &dev->dev;
1680 adapter->dev.release = cxl_release_adapter;
1681 pci_set_drvdata(dev, adapter);
1682
1683 rc = pci_enable_device(dev);
1684 if (rc) {
1685 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
1686 return rc;
1687 }
1688
1689 if ((rc = cxl_read_vsec(adapter, dev)))
1690 return rc;
1691
1692 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
1693 return rc;
1694
1695 cxl_fixup_malformed_tlp(adapter, dev);
1696
1697 if ((rc = setup_cxl_bars(dev)))
1698 return rc;
1699
1700 if ((rc = setup_cxl_protocol_area(dev)))
1701 return rc;
1702
1703 if ((rc = cxl_update_image_control(adapter)))
1704 return rc;
1705
1706 if ((rc = cxl_map_adapter_regs(adapter, dev)))
1707 return rc;
1708
1709 if ((rc = sanitise_adapter_regs(adapter)))
1710 goto err;
1711
1712 if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
1713 goto err;
1714
1715
1716 pci_set_master(dev);
1717
1718 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
1719 goto err;
1720
1721
1722
1723 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
1724 goto err;
1725
1726
1727 cxl_setup_psl_timebase(adapter, dev);
1728
1729 if ((rc = cxl_native_register_psl_err_irq(adapter)))
1730 goto err;
1731
1732 return 0;
1733
1734err:
1735 cxl_unmap_adapter_regs(adapter);
1736 return rc;
1737
1738}
1739
1740static void cxl_deconfigure_adapter(struct cxl *adapter)
1741{
1742 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
1743
1744 cxl_native_release_psl_err_irq(adapter);
1745 cxl_unmap_adapter_regs(adapter);
1746
1747 pci_disable_device(pdev);
1748}
1749
1750static void cxl_stop_trace_psl9(struct cxl *adapter)
1751{
1752 int traceid;
1753 u64 trace_state, trace_mask;
1754 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1755
1756
1757 for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
1758 trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
1759 trace_mask = (0x3ULL << (62 - traceid * 2));
1760 trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
1761 dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
1762 traceid, trace_state);
1763
1764
1765 if (trace_state != CXL_PSL9_TRACESTATE_FIN)
1766 cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
1767 0x8400000000000000ULL | traceid);
1768 }
1769}
1770
1771static void cxl_stop_trace_psl8(struct cxl *adapter)
1772{
1773 int slice;
1774
1775
1776 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
1777
1778
1779 spin_lock(&adapter->afu_list_lock);
1780 for (slice = 0; slice < adapter->slices; slice++) {
1781 if (adapter->afu[slice])
1782 cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
1783 0x8000000000000000LL);
1784 }
1785 spin_unlock(&adapter->afu_list_lock);
1786}
1787
1788static const struct cxl_service_layer_ops psl9_ops = {
1789 .adapter_regs_init = init_implementation_adapter_regs_psl9,
1790 .invalidate_all = cxl_invalidate_all_psl9,
1791 .afu_regs_init = init_implementation_afu_regs_psl9,
1792 .sanitise_afu_regs = sanitise_afu_regs_psl9,
1793 .register_serr_irq = cxl_native_register_serr_irq,
1794 .release_serr_irq = cxl_native_release_serr_irq,
1795 .handle_interrupt = cxl_irq_psl9,
1796 .fail_irq = cxl_fail_irq_psl,
1797 .activate_dedicated_process = cxl_activate_dedicated_process_psl9,
1798 .attach_afu_directed = cxl_attach_afu_directed_psl9,
1799 .attach_dedicated_process = cxl_attach_dedicated_process_psl9,
1800 .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
1801 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
1802 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
1803 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
1804 .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
1805 .debugfs_stop_trace = cxl_stop_trace_psl9,
1806 .write_timebase_ctrl = write_timebase_ctrl_psl9,
1807 .timebase_read = timebase_read_psl9,
1808 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1809 .needs_reset_before_disable = true,
1810};
1811
1812static const struct cxl_service_layer_ops psl8_ops = {
1813 .adapter_regs_init = init_implementation_adapter_regs_psl8,
1814 .invalidate_all = cxl_invalidate_all_psl8,
1815 .afu_regs_init = init_implementation_afu_regs_psl8,
1816 .sanitise_afu_regs = sanitise_afu_regs_psl8,
1817 .register_serr_irq = cxl_native_register_serr_irq,
1818 .release_serr_irq = cxl_native_release_serr_irq,
1819 .handle_interrupt = cxl_irq_psl8,
1820 .fail_irq = cxl_fail_irq_psl,
1821 .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
1822 .attach_afu_directed = cxl_attach_afu_directed_psl8,
1823 .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
1824 .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
1825 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
1826 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
1827 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
1828 .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
1829 .debugfs_stop_trace = cxl_stop_trace_psl8,
1830 .write_timebase_ctrl = write_timebase_ctrl_psl8,
1831 .timebase_read = timebase_read_psl8,
1832 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1833 .needs_reset_before_disable = true,
1834};
1835
1836static const struct cxl_service_layer_ops xsl_ops = {
1837 .adapter_regs_init = init_implementation_adapter_regs_xsl,
1838 .invalidate_all = cxl_invalidate_all_psl8,
1839 .sanitise_afu_regs = sanitise_afu_regs_psl8,
1840 .handle_interrupt = cxl_irq_psl8,
1841 .fail_irq = cxl_fail_irq_psl,
1842 .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
1843 .attach_afu_directed = cxl_attach_afu_directed_psl8,
1844 .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
1845 .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
1846 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_xsl,
1847 .write_timebase_ctrl = write_timebase_ctrl_xsl,
1848 .timebase_read = timebase_read_xsl,
1849 .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
1850};
1851
1852static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
1853{
1854 if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
1855
1856 dev_info(&dev->dev, "Device uses an XSL\n");
1857 adapter->native->sl_ops = &xsl_ops;
1858 adapter->min_pe = 1;
1859 } else {
1860 if (cxl_is_power8()) {
1861 dev_info(&dev->dev, "Device uses a PSL8\n");
1862 adapter->native->sl_ops = &psl8_ops;
1863 } else {
1864 dev_info(&dev->dev, "Device uses a PSL9\n");
1865 adapter->native->sl_ops = &psl9_ops;
1866 }
1867 }
1868}
1869
1870
1871static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
1872{
1873 struct cxl *adapter;
1874 int rc;
1875
1876 adapter = cxl_alloc_adapter();
1877 if (!adapter)
1878 return ERR_PTR(-ENOMEM);
1879
1880 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
1881 if (!adapter->native) {
1882 rc = -ENOMEM;
1883 goto err_release;
1884 }
1885
1886 set_sl_ops(adapter, dev);
1887
1888
1889
1890
1891 adapter->perst_loads_image = true;
1892 adapter->perst_same_image = false;
1893
1894 rc = cxl_configure_adapter(adapter, dev);
1895 if (rc) {
1896 pci_disable_device(dev);
1897 goto err_release;
1898 }
1899
1900
1901 cxl_debugfs_adapter_add(adapter);
1902
1903
1904
1905
1906
1907 if ((rc = cxl_register_adapter(adapter)))
1908 goto err_put1;
1909
1910 if ((rc = cxl_sysfs_adapter_add(adapter)))
1911 goto err_put1;
1912
1913
1914 cxl_adapter_context_unlock(adapter);
1915
1916 return adapter;
1917
1918err_put1:
1919
1920
1921
1922 cxl_debugfs_adapter_remove(adapter);
1923 cxl_deconfigure_adapter(adapter);
1924 device_unregister(&adapter->dev);
1925 return ERR_PTR(rc);
1926
1927err_release:
1928 cxl_release_adapter(&adapter->dev);
1929 return ERR_PTR(rc);
1930}
1931
1932static void cxl_pci_remove_adapter(struct cxl *adapter)
1933{
1934 pr_devel("cxl_remove_adapter\n");
1935
1936 cxl_sysfs_adapter_remove(adapter);
1937 cxl_debugfs_adapter_remove(adapter);
1938
1939
1940
1941
1942
1943 if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
1944 cxl_data_cache_flush(adapter);
1945
1946 cxl_deconfigure_adapter(adapter);
1947
1948 device_unregister(&adapter->dev);
1949}
1950
1951#define CXL_MAX_PCIEX_PARENT 2
1952
1953int cxl_slot_is_switched(struct pci_dev *dev)
1954{
1955 struct device_node *np;
1956 int depth = 0;
1957 const __be32 *prop;
1958
1959 if (!(np = pci_device_to_OF_node(dev))) {
1960 pr_err("cxl: np = NULL\n");
1961 return -ENODEV;
1962 }
1963 of_node_get(np);
1964 while (np) {
1965 np = of_get_next_parent(np);
1966 prop = of_get_property(np, "device_type", NULL);
1967 if (!prop || strcmp((char *)prop, "pciex"))
1968 break;
1969 depth++;
1970 }
1971 of_node_put(np);
1972 return (depth > CXL_MAX_PCIEX_PARENT);
1973}
1974
1975bool cxl_slot_is_supported(struct pci_dev *dev, int flags)
1976{
1977 if (!cpu_has_feature(CPU_FTR_HVMODE))
1978 return false;
1979
1980 if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) {
1981
1982
1983
1984
1985
1986
1987 return false;
1988 }
1989
1990 if (cxl_slot_is_switched(dev))
1991 return false;
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007 return true;
2008}
2009EXPORT_SYMBOL_GPL(cxl_slot_is_supported);
2010
2011
2012static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
2013{
2014 struct cxl *adapter;
2015 int slice;
2016 int rc;
2017
2018 if (cxl_pci_is_vphb_device(dev)) {
2019 dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
2020 return -ENODEV;
2021 }
2022
2023 if (cxl_slot_is_switched(dev)) {
2024 dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
2025 return -ENODEV;
2026 }
2027
2028 if (cxl_is_power9() && !radix_enabled()) {
2029 dev_info(&dev->dev, "Only Radix mode supported\n");
2030 return -ENODEV;
2031 }
2032
2033 if (cxl_verbose)
2034 dump_cxl_config_space(dev);
2035
2036 adapter = cxl_pci_init_adapter(dev);
2037 if (IS_ERR(adapter)) {
2038 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
2039 return PTR_ERR(adapter);
2040 }
2041
2042 for (slice = 0; slice < adapter->slices; slice++) {
2043 if ((rc = pci_init_afu(adapter, slice, dev))) {
2044 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
2045 continue;
2046 }
2047
2048 rc = cxl_afu_select_best_mode(adapter->afu[slice]);
2049 if (rc)
2050 dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
2051 }
2052
2053 if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1)
2054 pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]);
2055
2056 return 0;
2057}
2058
2059static void cxl_remove(struct pci_dev *dev)
2060{
2061 struct cxl *adapter = pci_get_drvdata(dev);
2062 struct cxl_afu *afu;
2063 int i;
2064
2065
2066
2067
2068
2069 for (i = 0; i < adapter->slices; i++) {
2070 afu = adapter->afu[i];
2071 cxl_pci_remove_afu(afu);
2072 }
2073 cxl_pci_remove_adapter(adapter);
2074}
2075
2076static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
2077 pci_channel_state_t state)
2078{
2079 struct pci_dev *afu_dev;
2080 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
2081 pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
2082
2083
2084
2085
2086 if (afu->phb == NULL)
2087 return result;
2088
2089 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2090 if (!afu_dev->driver)
2091 continue;
2092
2093 afu_dev->error_state = state;
2094
2095 if (afu_dev->driver->err_handler)
2096 afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
2097 state);
2098
2099 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
2100 result = PCI_ERS_RESULT_DISCONNECT;
2101 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
2102 (result == PCI_ERS_RESULT_NEED_RESET))
2103 result = PCI_ERS_RESULT_NONE;
2104 }
2105 return result;
2106}
2107
2108static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
2109 pci_channel_state_t state)
2110{
2111 struct cxl *adapter = pci_get_drvdata(pdev);
2112 struct cxl_afu *afu;
2113 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
2114 int i;
2115
2116
2117
2118
2119
2120 schedule();
2121
2122
2123 if (state == pci_channel_io_perm_failure) {
2124 for (i = 0; i < adapter->slices; i++) {
2125 afu = adapter->afu[i];
2126
2127
2128
2129
2130 cxl_vphb_error_detected(afu, state);
2131 }
2132 return PCI_ERS_RESULT_DISCONNECT;
2133 }
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 if (adapter->perst_loads_image && !adapter->perst_same_image) {
2158
2159 dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
2160 return PCI_ERS_RESULT_NONE;
2161 }
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213 for (i = 0; i < adapter->slices; i++) {
2214 afu = adapter->afu[i];
2215
2216 afu_result = cxl_vphb_error_detected(afu, state);
2217
2218 cxl_context_detach_all(afu);
2219 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
2220 pci_deconfigure_afu(afu);
2221
2222
2223 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
2224 result = PCI_ERS_RESULT_DISCONNECT;
2225 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
2226 (result == PCI_ERS_RESULT_NEED_RESET))
2227 result = PCI_ERS_RESULT_NONE;
2228 }
2229
2230
2231 if (cxl_adapter_context_lock(adapter) != 0)
2232 dev_warn(&adapter->dev,
2233 "Couldn't take context lock with %d active-contexts\n",
2234 atomic_read(&adapter->contexts_num));
2235
2236 cxl_deconfigure_adapter(adapter);
2237
2238 return result;
2239}
2240
2241static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
2242{
2243 struct cxl *adapter = pci_get_drvdata(pdev);
2244 struct cxl_afu *afu;
2245 struct cxl_context *ctx;
2246 struct pci_dev *afu_dev;
2247 pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
2248 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
2249 int i;
2250
2251 if (cxl_configure_adapter(adapter, pdev))
2252 goto err;
2253
2254
2255
2256
2257
2258
2259 cxl_adapter_context_unlock(adapter);
2260
2261 for (i = 0; i < adapter->slices; i++) {
2262 afu = adapter->afu[i];
2263
2264 if (pci_configure_afu(afu, adapter, pdev))
2265 goto err;
2266
2267 if (cxl_afu_select_best_mode(afu))
2268 goto err;
2269
2270 if (afu->phb == NULL)
2271 continue;
2272
2273 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2274
2275
2276
2277 ctx = cxl_get_context(afu_dev);
2278
2279 if (ctx && cxl_release_context(ctx))
2280 goto err;
2281
2282 ctx = cxl_dev_context_init(afu_dev);
2283 if (IS_ERR(ctx))
2284 goto err;
2285
2286 afu_dev->dev.archdata.cxl_ctx = ctx;
2287
2288 if (cxl_ops->afu_check_and_enable(afu))
2289 goto err;
2290
2291 afu_dev->error_state = pci_channel_io_normal;
2292
2293
2294
2295
2296
2297
2298
2299 if (!afu_dev->driver)
2300 continue;
2301
2302 if (afu_dev->driver->err_handler &&
2303 afu_dev->driver->err_handler->slot_reset)
2304 afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
2305
2306 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
2307 result = PCI_ERS_RESULT_DISCONNECT;
2308 }
2309 }
2310 return result;
2311
2312err:
2313
2314
2315
2316
2317 dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
2318 return PCI_ERS_RESULT_DISCONNECT;
2319}
2320
2321static void cxl_pci_resume(struct pci_dev *pdev)
2322{
2323 struct cxl *adapter = pci_get_drvdata(pdev);
2324 struct cxl_afu *afu;
2325 struct pci_dev *afu_dev;
2326 int i;
2327
2328
2329
2330
2331
2332 for (i = 0; i < adapter->slices; i++) {
2333 afu = adapter->afu[i];
2334
2335 if (afu->phb == NULL)
2336 continue;
2337
2338 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2339 if (afu_dev->driver && afu_dev->driver->err_handler &&
2340 afu_dev->driver->err_handler->resume)
2341 afu_dev->driver->err_handler->resume(afu_dev);
2342 }
2343 }
2344}
2345
2346static const struct pci_error_handlers cxl_err_handler = {
2347 .error_detected = cxl_pci_error_detected,
2348 .slot_reset = cxl_pci_slot_reset,
2349 .resume = cxl_pci_resume,
2350};
2351
2352struct pci_driver cxl_pci_driver = {
2353 .name = "cxl-pci",
2354 .id_table = cxl_pci_tbl,
2355 .probe = cxl_probe,
2356 .remove = cxl_remove,
2357 .shutdown = cxl_remove,
2358 .err_handler = &cxl_err_handler,
2359};
2360