1
2
3
4
5
6
7
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/fs.h>
12#include <linux/io.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/random.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/pci_regs.h>
25
26#include <uapi/linux/pcitest.h>
27
28#define DRV_MODULE_NAME "pci-endpoint-test"
29
30#define IRQ_TYPE_UNDEFINED -1
31#define IRQ_TYPE_LEGACY 0
32#define IRQ_TYPE_MSI 1
33#define IRQ_TYPE_MSIX 2
34
35#define PCI_ENDPOINT_TEST_MAGIC 0x0
36
37#define PCI_ENDPOINT_TEST_COMMAND 0x4
38#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
39#define COMMAND_RAISE_MSI_IRQ BIT(1)
40#define COMMAND_RAISE_MSIX_IRQ BIT(2)
41#define COMMAND_READ BIT(3)
42#define COMMAND_WRITE BIT(4)
43#define COMMAND_COPY BIT(5)
44
45#define PCI_ENDPOINT_TEST_STATUS 0x8
46#define STATUS_READ_SUCCESS BIT(0)
47#define STATUS_READ_FAIL BIT(1)
48#define STATUS_WRITE_SUCCESS BIT(2)
49#define STATUS_WRITE_FAIL BIT(3)
50#define STATUS_COPY_SUCCESS BIT(4)
51#define STATUS_COPY_FAIL BIT(5)
52#define STATUS_IRQ_RAISED BIT(6)
53#define STATUS_SRC_ADDR_INVALID BIT(7)
54#define STATUS_DST_ADDR_INVALID BIT(8)
55
56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
58
59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
61
62#define PCI_ENDPOINT_TEST_SIZE 0x1c
63#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
64
65#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
66#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
67
68#define PCI_ENDPOINT_TEST_FLAGS 0x2c
69#define FLAG_USE_DMA BIT(0)
70
71#define PCI_DEVICE_ID_TI_J721E 0xb00d
72#define PCI_DEVICE_ID_TI_AM654 0xb00c
73#define PCI_DEVICE_ID_LS1088A 0x80c0
74
75#define is_am654_pci_dev(pdev) \
76 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
77
78#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
79#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
80#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
81#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
82
83static DEFINE_IDA(pci_endpoint_test_ida);
84
85#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
86 miscdev)
87
88static bool no_msi;
89module_param(no_msi, bool, 0444);
90MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
91
92static int irq_type = IRQ_TYPE_MSI;
93module_param(irq_type, int, 0444);
94MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
95
96enum pci_barno {
97 BAR_0,
98 BAR_1,
99 BAR_2,
100 BAR_3,
101 BAR_4,
102 BAR_5,
103};
104
105struct pci_endpoint_test {
106 struct pci_dev *pdev;
107 void __iomem *base;
108 void __iomem *bar[PCI_STD_NUM_BARS];
109 struct completion irq_raised;
110 int last_irq;
111 int num_irqs;
112 int irq_type;
113
114 struct mutex mutex;
115 struct miscdevice miscdev;
116 enum pci_barno test_reg_bar;
117 size_t alignment;
118 const char *name;
119};
120
121struct pci_endpoint_test_data {
122 enum pci_barno test_reg_bar;
123 size_t alignment;
124 int irq_type;
125};
126
127static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
128 u32 offset)
129{
130 return readl(test->base + offset);
131}
132
133static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
134 u32 offset, u32 value)
135{
136 writel(value, test->base + offset);
137}
138
139static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
140 int bar, int offset)
141{
142 return readl(test->bar[bar] + offset);
143}
144
145static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
146 int bar, u32 offset, u32 value)
147{
148 writel(value, test->bar[bar] + offset);
149}
150
151static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
152{
153 struct pci_endpoint_test *test = dev_id;
154 u32 reg;
155
156 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
157 if (reg & STATUS_IRQ_RAISED) {
158 test->last_irq = irq;
159 complete(&test->irq_raised);
160 reg &= ~STATUS_IRQ_RAISED;
161 }
162 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
163 reg);
164
165 return IRQ_HANDLED;
166}
167
168static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
169{
170 struct pci_dev *pdev = test->pdev;
171
172 pci_free_irq_vectors(pdev);
173 test->irq_type = IRQ_TYPE_UNDEFINED;
174}
175
176static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
177 int type)
178{
179 int irq = -1;
180 struct pci_dev *pdev = test->pdev;
181 struct device *dev = &pdev->dev;
182 bool res = true;
183
184 switch (type) {
185 case IRQ_TYPE_LEGACY:
186 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
187 if (irq < 0)
188 dev_err(dev, "Failed to get Legacy interrupt\n");
189 break;
190 case IRQ_TYPE_MSI:
191 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
192 if (irq < 0)
193 dev_err(dev, "Failed to get MSI interrupts\n");
194 break;
195 case IRQ_TYPE_MSIX:
196 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
197 if (irq < 0)
198 dev_err(dev, "Failed to get MSI-X interrupts\n");
199 break;
200 default:
201 dev_err(dev, "Invalid IRQ type selected\n");
202 }
203
204 if (irq < 0) {
205 irq = 0;
206 res = false;
207 }
208
209 test->irq_type = type;
210 test->num_irqs = irq;
211
212 return res;
213}
214
215static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
216{
217 int i;
218 struct pci_dev *pdev = test->pdev;
219 struct device *dev = &pdev->dev;
220
221 for (i = 0; i < test->num_irqs; i++)
222 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
223
224 test->num_irqs = 0;
225}
226
227static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
228{
229 int i;
230 int err;
231 struct pci_dev *pdev = test->pdev;
232 struct device *dev = &pdev->dev;
233
234 for (i = 0; i < test->num_irqs; i++) {
235 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
236 pci_endpoint_test_irqhandler,
237 IRQF_SHARED, test->name, test);
238 if (err)
239 goto fail;
240 }
241
242 return true;
243
244fail:
245 switch (irq_type) {
246 case IRQ_TYPE_LEGACY:
247 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
248 pci_irq_vector(pdev, i));
249 break;
250 case IRQ_TYPE_MSI:
251 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
252 pci_irq_vector(pdev, i),
253 i + 1);
254 break;
255 case IRQ_TYPE_MSIX:
256 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
257 pci_irq_vector(pdev, i),
258 i + 1);
259 break;
260 }
261
262 return false;
263}
264
265static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
266 enum pci_barno barno)
267{
268 int j;
269 u32 val;
270 int size;
271 struct pci_dev *pdev = test->pdev;
272
273 if (!test->bar[barno])
274 return false;
275
276 size = pci_resource_len(pdev, barno);
277
278 if (barno == test->test_reg_bar)
279 size = 0x4;
280
281 for (j = 0; j < size; j += 4)
282 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
283
284 for (j = 0; j < size; j += 4) {
285 val = pci_endpoint_test_bar_readl(test, barno, j);
286 if (val != 0xA0A0A0A0)
287 return false;
288 }
289
290 return true;
291}
292
293static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
294{
295 u32 val;
296
297 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
298 IRQ_TYPE_LEGACY);
299 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
300 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
301 COMMAND_RAISE_LEGACY_IRQ);
302 val = wait_for_completion_timeout(&test->irq_raised,
303 msecs_to_jiffies(1000));
304 if (!val)
305 return false;
306
307 return true;
308}
309
310static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
311 u16 msi_num, bool msix)
312{
313 u32 val;
314 struct pci_dev *pdev = test->pdev;
315
316 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
317 msix == false ? IRQ_TYPE_MSI :
318 IRQ_TYPE_MSIX);
319 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
320 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
321 msix == false ? COMMAND_RAISE_MSI_IRQ :
322 COMMAND_RAISE_MSIX_IRQ);
323 val = wait_for_completion_timeout(&test->irq_raised,
324 msecs_to_jiffies(1000));
325 if (!val)
326 return false;
327
328 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
329 return true;
330
331 return false;
332}
333
334static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
335 unsigned long arg)
336{
337 struct pci_endpoint_test_xfer_param param;
338 bool ret = false;
339 void *src_addr;
340 void *dst_addr;
341 u32 flags = 0;
342 bool use_dma;
343 size_t size;
344 dma_addr_t src_phys_addr;
345 dma_addr_t dst_phys_addr;
346 struct pci_dev *pdev = test->pdev;
347 struct device *dev = &pdev->dev;
348 void *orig_src_addr;
349 dma_addr_t orig_src_phys_addr;
350 void *orig_dst_addr;
351 dma_addr_t orig_dst_phys_addr;
352 size_t offset;
353 size_t alignment = test->alignment;
354 int irq_type = test->irq_type;
355 u32 src_crc32;
356 u32 dst_crc32;
357 int err;
358
359 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
360 if (err) {
361 dev_err(dev, "Failed to get transfer param\n");
362 return false;
363 }
364
365 size = param.size;
366 if (size > SIZE_MAX - alignment)
367 goto err;
368
369 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
370 if (use_dma)
371 flags |= FLAG_USE_DMA;
372
373 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
374 dev_err(dev, "Invalid IRQ type option\n");
375 goto err;
376 }
377
378 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
379 if (!orig_src_addr) {
380 dev_err(dev, "Failed to allocate source buffer\n");
381 ret = false;
382 goto err;
383 }
384
385 get_random_bytes(orig_src_addr, size + alignment);
386 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
387 size + alignment, DMA_TO_DEVICE);
388 if (dma_mapping_error(dev, orig_src_phys_addr)) {
389 dev_err(dev, "failed to map source buffer address\n");
390 ret = false;
391 goto err_src_phys_addr;
392 }
393
394 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
395 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
396 offset = src_phys_addr - orig_src_phys_addr;
397 src_addr = orig_src_addr + offset;
398 } else {
399 src_phys_addr = orig_src_phys_addr;
400 src_addr = orig_src_addr;
401 }
402
403 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
404 lower_32_bits(src_phys_addr));
405
406 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
407 upper_32_bits(src_phys_addr));
408
409 src_crc32 = crc32_le(~0, src_addr, size);
410
411 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
412 if (!orig_dst_addr) {
413 dev_err(dev, "Failed to allocate destination address\n");
414 ret = false;
415 goto err_dst_addr;
416 }
417
418 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
419 size + alignment, DMA_FROM_DEVICE);
420 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
421 dev_err(dev, "failed to map destination buffer address\n");
422 ret = false;
423 goto err_dst_phys_addr;
424 }
425
426 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
427 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
428 offset = dst_phys_addr - orig_dst_phys_addr;
429 dst_addr = orig_dst_addr + offset;
430 } else {
431 dst_phys_addr = orig_dst_phys_addr;
432 dst_addr = orig_dst_addr;
433 }
434
435 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
436 lower_32_bits(dst_phys_addr));
437 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
438 upper_32_bits(dst_phys_addr));
439
440 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
441 size);
442
443 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
444 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
445 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
446 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
447 COMMAND_COPY);
448
449 wait_for_completion(&test->irq_raised);
450
451 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
452 DMA_FROM_DEVICE);
453
454 dst_crc32 = crc32_le(~0, dst_addr, size);
455 if (dst_crc32 == src_crc32)
456 ret = true;
457
458err_dst_phys_addr:
459 kfree(orig_dst_addr);
460
461err_dst_addr:
462 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
463 DMA_TO_DEVICE);
464
465err_src_phys_addr:
466 kfree(orig_src_addr);
467
468err:
469 return ret;
470}
471
472static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
473 unsigned long arg)
474{
475 struct pci_endpoint_test_xfer_param param;
476 bool ret = false;
477 u32 flags = 0;
478 bool use_dma;
479 u32 reg;
480 void *addr;
481 dma_addr_t phys_addr;
482 struct pci_dev *pdev = test->pdev;
483 struct device *dev = &pdev->dev;
484 void *orig_addr;
485 dma_addr_t orig_phys_addr;
486 size_t offset;
487 size_t alignment = test->alignment;
488 int irq_type = test->irq_type;
489 size_t size;
490 u32 crc32;
491 int err;
492
493 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
494 if (err != 0) {
495 dev_err(dev, "Failed to get transfer param\n");
496 return false;
497 }
498
499 size = param.size;
500 if (size > SIZE_MAX - alignment)
501 goto err;
502
503 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
504 if (use_dma)
505 flags |= FLAG_USE_DMA;
506
507 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
508 dev_err(dev, "Invalid IRQ type option\n");
509 goto err;
510 }
511
512 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
513 if (!orig_addr) {
514 dev_err(dev, "Failed to allocate address\n");
515 ret = false;
516 goto err;
517 }
518
519 get_random_bytes(orig_addr, size + alignment);
520
521 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
522 DMA_TO_DEVICE);
523 if (dma_mapping_error(dev, orig_phys_addr)) {
524 dev_err(dev, "failed to map source buffer address\n");
525 ret = false;
526 goto err_phys_addr;
527 }
528
529 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
530 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
531 offset = phys_addr - orig_phys_addr;
532 addr = orig_addr + offset;
533 } else {
534 phys_addr = orig_phys_addr;
535 addr = orig_addr;
536 }
537
538 crc32 = crc32_le(~0, addr, size);
539 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
540 crc32);
541
542 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
543 lower_32_bits(phys_addr));
544 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
545 upper_32_bits(phys_addr));
546
547 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
548
549 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
550 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
551 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
552 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
553 COMMAND_READ);
554
555 wait_for_completion(&test->irq_raised);
556
557 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
558 if (reg & STATUS_READ_SUCCESS)
559 ret = true;
560
561 dma_unmap_single(dev, orig_phys_addr, size + alignment,
562 DMA_TO_DEVICE);
563
564err_phys_addr:
565 kfree(orig_addr);
566
567err:
568 return ret;
569}
570
571static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
572 unsigned long arg)
573{
574 struct pci_endpoint_test_xfer_param param;
575 bool ret = false;
576 u32 flags = 0;
577 bool use_dma;
578 size_t size;
579 void *addr;
580 dma_addr_t phys_addr;
581 struct pci_dev *pdev = test->pdev;
582 struct device *dev = &pdev->dev;
583 void *orig_addr;
584 dma_addr_t orig_phys_addr;
585 size_t offset;
586 size_t alignment = test->alignment;
587 int irq_type = test->irq_type;
588 u32 crc32;
589 int err;
590
591 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
592 if (err) {
593 dev_err(dev, "Failed to get transfer param\n");
594 return false;
595 }
596
597 size = param.size;
598 if (size > SIZE_MAX - alignment)
599 goto err;
600
601 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
602 if (use_dma)
603 flags |= FLAG_USE_DMA;
604
605 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
606 dev_err(dev, "Invalid IRQ type option\n");
607 goto err;
608 }
609
610 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
611 if (!orig_addr) {
612 dev_err(dev, "Failed to allocate destination address\n");
613 ret = false;
614 goto err;
615 }
616
617 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
618 DMA_FROM_DEVICE);
619 if (dma_mapping_error(dev, orig_phys_addr)) {
620 dev_err(dev, "failed to map source buffer address\n");
621 ret = false;
622 goto err_phys_addr;
623 }
624
625 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
626 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
627 offset = phys_addr - orig_phys_addr;
628 addr = orig_addr + offset;
629 } else {
630 phys_addr = orig_phys_addr;
631 addr = orig_addr;
632 }
633
634 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
635 lower_32_bits(phys_addr));
636 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
637 upper_32_bits(phys_addr));
638
639 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
640
641 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
642 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
643 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
644 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
645 COMMAND_WRITE);
646
647 wait_for_completion(&test->irq_raised);
648
649 dma_unmap_single(dev, orig_phys_addr, size + alignment,
650 DMA_FROM_DEVICE);
651
652 crc32 = crc32_le(~0, addr, size);
653 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
654 ret = true;
655
656err_phys_addr:
657 kfree(orig_addr);
658err:
659 return ret;
660}
661
662static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
663{
664 pci_endpoint_test_release_irq(test);
665 pci_endpoint_test_free_irq_vectors(test);
666 return true;
667}
668
669static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
670 int req_irq_type)
671{
672 struct pci_dev *pdev = test->pdev;
673 struct device *dev = &pdev->dev;
674
675 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
676 dev_err(dev, "Invalid IRQ type option\n");
677 return false;
678 }
679
680 if (test->irq_type == req_irq_type)
681 return true;
682
683 pci_endpoint_test_release_irq(test);
684 pci_endpoint_test_free_irq_vectors(test);
685
686 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
687 goto err;
688
689 if (!pci_endpoint_test_request_irq(test))
690 goto err;
691
692 return true;
693
694err:
695 pci_endpoint_test_free_irq_vectors(test);
696 return false;
697}
698
699static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
700 unsigned long arg)
701{
702 int ret = -EINVAL;
703 enum pci_barno bar;
704 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
705 struct pci_dev *pdev = test->pdev;
706
707 mutex_lock(&test->mutex);
708 switch (cmd) {
709 case PCITEST_BAR:
710 bar = arg;
711 if (bar < 0 || bar > 5)
712 goto ret;
713 if (is_am654_pci_dev(pdev) && bar == BAR_0)
714 goto ret;
715 ret = pci_endpoint_test_bar(test, bar);
716 break;
717 case PCITEST_LEGACY_IRQ:
718 ret = pci_endpoint_test_legacy_irq(test);
719 break;
720 case PCITEST_MSI:
721 case PCITEST_MSIX:
722 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
723 break;
724 case PCITEST_WRITE:
725 ret = pci_endpoint_test_write(test, arg);
726 break;
727 case PCITEST_READ:
728 ret = pci_endpoint_test_read(test, arg);
729 break;
730 case PCITEST_COPY:
731 ret = pci_endpoint_test_copy(test, arg);
732 break;
733 case PCITEST_SET_IRQTYPE:
734 ret = pci_endpoint_test_set_irq(test, arg);
735 break;
736 case PCITEST_GET_IRQTYPE:
737 ret = irq_type;
738 break;
739 case PCITEST_CLEAR_IRQ:
740 ret = pci_endpoint_test_clear_irq(test);
741 break;
742 }
743
744ret:
745 mutex_unlock(&test->mutex);
746 return ret;
747}
748
749static const struct file_operations pci_endpoint_test_fops = {
750 .owner = THIS_MODULE,
751 .unlocked_ioctl = pci_endpoint_test_ioctl,
752};
753
754static int pci_endpoint_test_probe(struct pci_dev *pdev,
755 const struct pci_device_id *ent)
756{
757 int err;
758 int id;
759 char name[24];
760 enum pci_barno bar;
761 void __iomem *base;
762 struct device *dev = &pdev->dev;
763 struct pci_endpoint_test *test;
764 struct pci_endpoint_test_data *data;
765 enum pci_barno test_reg_bar = BAR_0;
766 struct miscdevice *misc_device;
767
768 if (pci_is_bridge(pdev))
769 return -ENODEV;
770
771 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
772 if (!test)
773 return -ENOMEM;
774
775 test->test_reg_bar = 0;
776 test->alignment = 0;
777 test->pdev = pdev;
778 test->irq_type = IRQ_TYPE_UNDEFINED;
779
780 if (no_msi)
781 irq_type = IRQ_TYPE_LEGACY;
782
783 data = (struct pci_endpoint_test_data *)ent->driver_data;
784 if (data) {
785 test_reg_bar = data->test_reg_bar;
786 test->test_reg_bar = test_reg_bar;
787 test->alignment = data->alignment;
788 irq_type = data->irq_type;
789 }
790
791 init_completion(&test->irq_raised);
792 mutex_init(&test->mutex);
793
794 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
795 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
796 dev_err(dev, "Cannot set DMA mask\n");
797 return -EINVAL;
798 }
799
800 err = pci_enable_device(pdev);
801 if (err) {
802 dev_err(dev, "Cannot enable PCI device\n");
803 return err;
804 }
805
806 err = pci_request_regions(pdev, DRV_MODULE_NAME);
807 if (err) {
808 dev_err(dev, "Cannot obtain PCI resources\n");
809 goto err_disable_pdev;
810 }
811
812 pci_set_master(pdev);
813
814 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
815 goto err_disable_irq;
816
817 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
818 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
819 base = pci_ioremap_bar(pdev, bar);
820 if (!base) {
821 dev_err(dev, "Failed to read BAR%d\n", bar);
822 WARN_ON(bar == test_reg_bar);
823 }
824 test->bar[bar] = base;
825 }
826 }
827
828 test->base = test->bar[test_reg_bar];
829 if (!test->base) {
830 err = -ENOMEM;
831 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
832 test_reg_bar);
833 goto err_iounmap;
834 }
835
836 pci_set_drvdata(pdev, test);
837
838 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
839 if (id < 0) {
840 err = id;
841 dev_err(dev, "Unable to get id\n");
842 goto err_iounmap;
843 }
844
845 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
846 test->name = kstrdup(name, GFP_KERNEL);
847 if (!test->name) {
848 err = -ENOMEM;
849 goto err_ida_remove;
850 }
851
852 if (!pci_endpoint_test_request_irq(test))
853 goto err_kfree_test_name;
854
855 misc_device = &test->miscdev;
856 misc_device->minor = MISC_DYNAMIC_MINOR;
857 misc_device->name = kstrdup(name, GFP_KERNEL);
858 if (!misc_device->name) {
859 err = -ENOMEM;
860 goto err_release_irq;
861 }
862 misc_device->fops = &pci_endpoint_test_fops,
863
864 err = misc_register(misc_device);
865 if (err) {
866 dev_err(dev, "Failed to register device\n");
867 goto err_kfree_name;
868 }
869
870 return 0;
871
872err_kfree_name:
873 kfree(misc_device->name);
874
875err_release_irq:
876 pci_endpoint_test_release_irq(test);
877
878err_kfree_test_name:
879 kfree(test->name);
880
881err_ida_remove:
882 ida_simple_remove(&pci_endpoint_test_ida, id);
883
884err_iounmap:
885 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
886 if (test->bar[bar])
887 pci_iounmap(pdev, test->bar[bar]);
888 }
889
890err_disable_irq:
891 pci_endpoint_test_free_irq_vectors(test);
892 pci_release_regions(pdev);
893
894err_disable_pdev:
895 pci_disable_device(pdev);
896
897 return err;
898}
899
900static void pci_endpoint_test_remove(struct pci_dev *pdev)
901{
902 int id;
903 enum pci_barno bar;
904 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
905 struct miscdevice *misc_device = &test->miscdev;
906
907 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
908 return;
909 if (id < 0)
910 return;
911
912 misc_deregister(&test->miscdev);
913 kfree(misc_device->name);
914 kfree(test->name);
915 ida_simple_remove(&pci_endpoint_test_ida, id);
916 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
917 if (test->bar[bar])
918 pci_iounmap(pdev, test->bar[bar]);
919 }
920
921 pci_endpoint_test_release_irq(test);
922 pci_endpoint_test_free_irq_vectors(test);
923
924 pci_release_regions(pdev);
925 pci_disable_device(pdev);
926}
927
928static const struct pci_endpoint_test_data default_data = {
929 .test_reg_bar = BAR_0,
930 .alignment = SZ_4K,
931 .irq_type = IRQ_TYPE_MSI,
932};
933
934static const struct pci_endpoint_test_data am654_data = {
935 .test_reg_bar = BAR_2,
936 .alignment = SZ_64K,
937 .irq_type = IRQ_TYPE_MSI,
938};
939
940static const struct pci_endpoint_test_data j721e_data = {
941 .alignment = 256,
942 .irq_type = IRQ_TYPE_MSI,
943};
944
945static const struct pci_device_id pci_endpoint_test_tbl[] = {
946 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
947 .driver_data = (kernel_ulong_t)&default_data,
948 },
949 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
950 .driver_data = (kernel_ulong_t)&default_data,
951 },
952 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
953 .driver_data = (kernel_ulong_t)&default_data,
954 },
955 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
956 .driver_data = (kernel_ulong_t)&default_data,
957 },
958 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
959 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
960 .driver_data = (kernel_ulong_t)&am654_data
961 },
962 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
963 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
964 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
965 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
966 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
967 .driver_data = (kernel_ulong_t)&j721e_data,
968 },
969 { }
970};
971MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
972
973static struct pci_driver pci_endpoint_test_driver = {
974 .name = DRV_MODULE_NAME,
975 .id_table = pci_endpoint_test_tbl,
976 .probe = pci_endpoint_test_probe,
977 .remove = pci_endpoint_test_remove,
978};
979module_pci_driver(pci_endpoint_test_driver);
980
981MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
982MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
983MODULE_LICENSE("GPL v2");
984