1
2
3
4
5
6
7
8#include <linux/anon_inodes.h>
9#include <linux/cdev.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/file.h>
14#include <linux/firmware/xlnx-zynqmp.h>
15#include <linux/fs.h>
16#include <linux/idr.h>
17#include <linux/interrupt.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/mutex.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/uaccess.h>
26#include <linux/xlnx-ai-engine.h>
27#include <uapi/linux/xlnx-ai-engine.h>
28
29#include "ai-engine-internal.h"
30
31#define AIE_DEV_MAX (MINORMASK + 1)
32#define VERSAL_SILICON_REV_MASK GENMASK(31, 28)
33
34static dev_t aie_major;
35struct class *aie_class;
36
37static DEFINE_IDA(aie_device_ida);
38static DEFINE_IDA(aie_minor_ida);
39
40
41
42
43
44
45
46
47
48
49
50static int aie_partition_fd(struct aie_partition *apart)
51{
52 int ret;
53
54 ret = get_unused_fd_flags(O_CLOEXEC);
55 if (ret < 0) {
56 dev_err(&apart->dev,
57 "Failed to get fd for partition %u.\n",
58 apart->partition_id);
59 return ret;
60 }
61 fd_install(ret, apart->filep);
62
63 return ret;
64}
65
66
67
68
69
70
71
72static int aie_enquire_partitions(struct aie_device *adev,
73 struct aie_partition_query *query)
74{
75 struct aie_partition *apart;
76 u32 partition_cnt, i = 0;
77 int ret;
78
79 if (!query->partitions) {
80
81
82
83
84 query->partition_cnt = 0;
85 list_for_each_entry(apart, &adev->partitions, node)
86 query->partition_cnt++;
87 return 0;
88 }
89
90 partition_cnt = query->partition_cnt;
91 if (!partition_cnt)
92 return 0;
93
94 ret = mutex_lock_interruptible(&adev->mlock);
95 if (ret)
96 return ret;
97
98 list_for_each_entry(apart, &adev->partitions, node) {
99 struct aie_range_args part;
100
101 if (i >= partition_cnt)
102 break;
103 part.partition_id = apart->partition_id;
104
105
106
107
108
109 part.uid = 0;
110 part.range.start.col = apart->range.start.col;
111 part.range.start.row = apart->range.start.row;
112 part.range.size.col = apart->range.size.col;
113 part.range.size.row = apart->range.size.row;
114
115 part.status = apart->status;
116 if (copy_to_user((void __user *)&query->partitions[i], &part,
117 sizeof(part))) {
118 mutex_unlock(&adev->mlock);
119 return -EFAULT;
120 }
121 i++;
122 }
123 mutex_unlock(&adev->mlock);
124 query->partition_cnt = i;
125
126 return 0;
127}
128
129
130
131
132
133
134
135
136
137
138struct aie_partition *aie_get_partition_from_id(struct aie_device *adev,
139 u32 partition_id)
140{
141 struct aie_partition *apart;
142
143 list_for_each_entry(apart, &adev->partitions, node) {
144 if (apart->partition_id == partition_id)
145 return apart;
146 }
147
148 return NULL;
149}
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167static int aie_partition_get(struct aie_partition *apart,
168 struct aie_partition_req *req)
169{
170 struct file *filep;
171 int ret;
172
173 (void)req;
174
175 if (apart->status & XAIE_PART_STATUS_INUSE) {
176 dev_err(&apart->dev,
177 "request partition %u failed, partition in use.\n",
178 apart->partition_id);
179 return -EBUSY;
180 }
181
182
183
184
185
186
187
188
189 filep = anon_inode_getfile(dev_name(&apart->dev), &aie_part_fops,
190 apart, O_RDWR);
191 if (IS_ERR(filep)) {
192 dev_err(&apart->dev,
193 "Failed to request partition %u, failed to get file.\n",
194 apart->partition_id);
195 return PTR_ERR(filep);
196 }
197
198 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
199 apart->filep = filep;
200
201 apart->status = XAIE_PART_STATUS_INUSE;
202 apart->cntrflag = req->flag;
203
204
205 ret = aie_part_open(apart, (void *)req->meta_data);
206 if (ret) {
207 dev_err(&apart->dev, "Failed to open partition %u instance.\n",
208 apart->partition_id);
209 fput(filep);
210 return ret;
211 }
212
213 return 0;
214}
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229static struct aie_partition *
230aie_partition_request_from_adev(struct aie_device *adev,
231 struct aie_partition_req *req)
232{
233 struct aie_partition *apart;
234 int ret;
235
236 ret = mutex_lock_interruptible(&adev->mlock);
237 if (ret)
238 return ERR_PTR(ret);
239
240 apart = aie_get_partition_from_id(adev, req->partition_id);
241 if (!apart) {
242 dev_err(&adev->dev,
243 "request partition %u failed, not exist.\n",
244 req->partition_id);
245 mutex_unlock(&adev->mlock);
246 return ERR_PTR(-EINVAL);
247 }
248 mutex_unlock(&adev->mlock);
249
250 ret = mutex_lock_interruptible(&apart->mlock);
251 if (ret)
252 return ERR_PTR(ret);
253
254 ret = aie_partition_get(apart, req);
255
256 mutex_unlock(&apart->mlock);
257
258 if (ret)
259 apart = ERR_PTR(ret);
260 return apart;
261}
262
263static long xilinx_ai_engine_ioctl(struct file *filp, unsigned int cmd,
264 unsigned long arg)
265{
266 struct inode *inode = file_inode(filp);
267 struct aie_device *adev = cdev_to_aiedev(inode->i_cdev);
268 void __user *argp = (void __user *)arg;
269 int ret;
270
271 switch (cmd) {
272 case AIE_ENQUIRE_PART_IOCTL:
273 {
274 struct aie_partition_query query;
275 struct aie_partition_query __user *uquery_ptr = argp;
276
277 if (copy_from_user(&query, uquery_ptr, sizeof(query)))
278 return -EFAULT;
279 ret = aie_enquire_partitions(adev, &query);
280 if (ret < 0)
281 return ret;
282 if (copy_to_user((void __user *)&uquery_ptr->partition_cnt,
283 &query.partition_cnt,
284 sizeof(query.partition_cnt)))
285 return -EFAULT;
286 break;
287 }
288 case AIE_REQUEST_PART_IOCTL:
289 {
290 struct aie_partition_req req;
291 struct aie_partition *apart;
292
293 if (copy_from_user(&req, argp, sizeof(req)))
294 return -EFAULT;
295
296 apart = aie_partition_request_from_adev(adev, &req);
297 if (IS_ERR(apart))
298 return PTR_ERR(apart);
299
300
301 ret = aie_partition_fd(apart);
302 if (ret < 0) {
303 fput(apart->filep);
304 break;
305 }
306 break;
307 }
308 default:
309 dev_err(&adev->dev, "Invalid ioctl command %u.\n", cmd);
310 ret = -EINVAL;
311 break;
312 }
313
314 return ret;
315}
316
317static const struct file_operations aie_device_fops = {
318 .owner = THIS_MODULE,
319 .unlocked_ioctl = xilinx_ai_engine_ioctl,
320};
321
322static void xilinx_ai_engine_release_device(struct device *dev)
323{
324 struct aie_device *adev = dev_to_aiedev(dev);
325
326 ida_simple_remove(&aie_device_ida, dev->id);
327 ida_simple_remove(&aie_minor_ida, MINOR(dev->devt));
328 cdev_del(&adev->cdev);
329 aie_resource_uninitialize(&adev->cols_res);
330}
331
332
333
334
335
336
337
338
339static void of_xilinx_ai_engine_part_probe(struct aie_device *adev)
340{
341 struct device_node *nc;
342
343 for_each_available_child_of_node(adev->dev.of_node, nc) {
344 struct aie_partition *apart;
345
346 if (of_node_test_and_set_flag(nc, OF_POPULATED))
347 continue;
348 apart = of_aie_part_probe(adev, nc);
349 if (IS_ERR(apart)) {
350 dev_err(&adev->dev,
351 "Failed to probe AI engine part for %pOF\n",
352 nc);
353 of_node_clear_flag(nc, OF_POPULATED);
354 }
355 }
356}
357
358static int xilinx_ai_engine_probe(struct platform_device *pdev)
359{
360 struct aie_device *adev;
361 struct device *dev;
362 u32 idcode, version, pm_reg[2];
363 int ret;
364
365 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
366 if (!adev)
367 return -ENOMEM;
368 platform_set_drvdata(pdev, adev);
369 INIT_LIST_HEAD(&adev->partitions);
370 mutex_init(&adev->mlock);
371
372 adev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
373 if (!adev->res) {
374 dev_err(&pdev->dev, "No memory resource.\n");
375 return -EINVAL;
376 }
377 adev->base = devm_ioremap_resource(&pdev->dev, adev->res);
378 if (IS_ERR(adev->base)) {
379 dev_err(&pdev->dev, "no io memory resource.\n");
380 return PTR_ERR(adev->base);
381 }
382
383
384 ret = aie_device_init(adev);
385 if (ret < 0) {
386 dev_err(&pdev->dev, "failed to initialize device instance.\n");
387 return ret;
388 }
389
390
391
392
393
394 ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
395 pm_reg, ARRAY_SIZE(pm_reg));
396 if (ret < 0) {
397 dev_err(&pdev->dev,
398 "Failed to read power manangement information\n");
399 return ret;
400 }
401 adev->pm_node_id = pm_reg[1];
402
403 ret = zynqmp_pm_get_chipid(&idcode, &version);
404 if (ret < 0) {
405 dev_err(&pdev->dev, "Failed to get chip ID\n");
406 return ret;
407 }
408 adev->version = FIELD_GET(VERSAL_SILICON_REV_MASK, idcode);
409
410 dev = &adev->dev;
411 device_initialize(dev);
412 dev->class = aie_class;
413 dev->parent = &pdev->dev;
414 dev->of_node = pdev->dev.of_node;
415
416 ret = ida_simple_get(&aie_minor_ida, 0, AIE_DEV_MAX, GFP_KERNEL);
417 if (ret < 0)
418 goto free_dev;
419 dev->devt = MKDEV(MAJOR(aie_major), ret);
420 ret = ida_simple_get(&aie_device_ida, 0, 0, GFP_KERNEL);
421 if (ret < 0)
422 goto free_minor_ida;
423 dev->id = ret;
424 dev_set_name(&adev->dev, "aie%d", dev->id);
425
426 cdev_init(&adev->cdev, &aie_device_fops);
427 adev->cdev.owner = THIS_MODULE;
428 ret = cdev_add(&adev->cdev, dev->devt, 1);
429 if (ret)
430 goto free_ida;
431
432 dev->release = xilinx_ai_engine_release_device;
433
434 ret = device_add(dev);
435 if (ret) {
436 dev_err(&pdev->dev, "device_add failed: %d\n", ret);
437 put_device(dev);
438 return ret;
439 }
440
441 of_xilinx_ai_engine_part_probe(adev);
442 dev_info(&pdev->dev, "Xilinx AI Engine device(cols=%u) probed\n",
443 adev->cols_res.total);
444
445 INIT_WORK(&adev->backtrack, aie_array_backtrack);
446
447 adev->irq = platform_get_irq_byname(pdev, "interrupt1");
448 if (adev->irq < 0)
449 goto free_ida;
450
451 ret = devm_request_threaded_irq(dev, adev->irq, NULL, aie_interrupt,
452 IRQF_ONESHOT, dev_name(dev), adev);
453 if (ret) {
454 dev_err(&pdev->dev, "Failed to request AIE IRQ.\n");
455 goto free_ida;
456 }
457
458 adev->clk = devm_clk_get(&pdev->dev, NULL);
459 if (!adev->clk) {
460 dev_err(&pdev->dev, "Failed to get device clock.\n");
461 goto free_ida;
462 }
463
464 return 0;
465
466free_ida:
467 ida_simple_remove(&aie_device_ida, dev->id);
468free_minor_ida:
469 ida_simple_remove(&aie_minor_ida, MINOR(dev->devt));
470free_dev:
471 put_device(dev);
472
473 return ret;
474}
475
476static int xilinx_ai_engine_remove(struct platform_device *pdev)
477{
478 struct aie_device *adev = platform_get_drvdata(pdev);
479 struct list_head *node, *pos;
480
481 list_for_each_safe(pos, node, &adev->partitions) {
482 struct aie_partition *apart;
483
484 apart = list_entry(pos, struct aie_partition, node);
485 aie_part_remove(apart);
486 }
487
488 device_del(&adev->dev);
489 put_device(&adev->dev);
490
491 return 0;
492}
493
494static const struct of_device_id xilinx_ai_engine_of_match[] = {
495 { .compatible = "xlnx,ai-engine-v1.0", },
496 { },
497};
498MODULE_DEVICE_TABLE(of, xilinx_ai_engine_of_match);
499
500static struct platform_driver xilinx_ai_engine_driver = {
501 .probe = xilinx_ai_engine_probe,
502 .remove = xilinx_ai_engine_remove,
503 .driver = {
504 .name = "xilinx-ai-engine",
505 .of_match_table = xilinx_ai_engine_of_match,
506 },
507};
508
509
510
511
512
513
514
515
516static int aie_partition_dev_match(struct device *dev, const void *data)
517{
518 struct aie_partition *apart;
519 u32 partition_id = (u32)(uintptr_t)data;
520
521 if (strncmp(dev_name(dev), "aiepart", strlen("aiepart")))
522 return 0;
523
524 apart = dev_to_aiepart(dev);
525 if (apart->partition_id == partition_id)
526 return 1;
527 return 0;
528}
529
530
531
532
533
534
535
536
537
538
539
540static struct aie_partition *aie_class_find_partition_from_id(u32 partition_id)
541{
542 struct device *dev;
543
544 dev = class_find_device(aie_class, NULL,
545 (void *)(uintptr_t)partition_id,
546 aie_partition_dev_match);
547 if (!dev)
548 return NULL;
549 return dev_to_aiepart(dev);
550}
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572bool aie_partition_is_available(struct aie_partition_req *req)
573{
574 struct aie_partition *apart;
575 int ret;
576
577 if (!req)
578 return false;
579
580 apart = aie_class_find_partition_from_id(req->partition_id);
581 if (!apart)
582 return false;
583
584 ret = mutex_lock_interruptible(&apart->mlock);
585 if (ret)
586 return false;
587
588 if (apart->status & XAIE_PART_STATUS_INUSE) {
589 mutex_unlock(&apart->mlock);
590 return false;
591 }
592
593 mutex_unlock(&apart->mlock);
594 return true;
595}
596EXPORT_SYMBOL_GPL(aie_partition_is_available);
597
598
599
600
601
602
603
604
605
606
607
608
609struct device *aie_partition_request(struct aie_partition_req *req)
610{
611 struct aie_partition *apart;
612 int ret;
613
614 if (!req)
615 return ERR_PTR(-EINVAL);
616
617 apart = aie_class_find_partition_from_id(req->partition_id);
618 if (!apart)
619 return ERR_PTR(-ENODEV);
620
621 ret = mutex_lock_interruptible(&apart->mlock);
622 if (ret)
623 return ERR_PTR(ret);
624
625 ret = aie_partition_get(apart, req);
626
627 mutex_unlock(&apart->mlock);
628 if (ret)
629 return ERR_PTR(ret);
630
631 if (apart->error_to_report)
632 schedule_work(&apart->adev->backtrack);
633
634 return &apart->dev;
635}
636EXPORT_SYMBOL_GPL(aie_partition_request);
637
638
639
640
641
642
643
644
645
646
647int aie_partition_get_fd(struct device *dev)
648{
649 struct aie_partition *apart;
650 int ret;
651
652 if (!dev)
653 return -EINVAL;
654
655 apart = dev_to_aiepart(dev);
656
657 ret = aie_partition_fd(apart);
658 if (ret < 0)
659 return ret;
660
661 get_file(apart->filep);
662
663 return ret;
664}
665EXPORT_SYMBOL_GPL(aie_partition_get_fd);
666
667
668
669
670
671void aie_partition_release(struct device *dev)
672{
673 struct aie_partition *apart;
674
675 if (WARN_ON(!dev))
676 return;
677
678 apart = dev_to_aiepart(dev);
679 fput(apart->filep);
680}
681EXPORT_SYMBOL_GPL(aie_partition_release);
682
683
684
685
686
687
688int aie_partition_reset(struct device *dev)
689{
690 struct aie_partition *apart;
691
692 if (WARN_ON(!dev))
693 return -EINVAL;
694
695 apart = dev_to_aiepart(dev);
696 return aie_part_reset(apart);
697}
698EXPORT_SYMBOL_GPL(aie_partition_reset);
699
700
701
702
703
704
705
706
707
708
709int aie_partition_post_reinit(struct device *dev)
710{
711 struct aie_partition *apart;
712
713 if (WARN_ON(!dev))
714 return -EINVAL;
715
716 apart = dev_to_aiepart(dev);
717 return aie_part_post_reinit(apart);
718}
719EXPORT_SYMBOL_GPL(aie_partition_post_reinit);
720
721static int __init xilinx_ai_engine_init(void)
722{
723 int ret;
724
725 ret = alloc_chrdev_region(&aie_major, 0, AIE_DEV_MAX, "aie");
726 if (ret < 0) {
727 pr_err("aie: failed to allocate aie region\n");
728 return ret;
729 }
730
731 aie_class = class_create(THIS_MODULE, "aie");
732 if (IS_ERR(aie_class)) {
733 pr_err("failed to create aie class\n");
734 unregister_chrdev_region(aie_major, AIE_DEV_MAX);
735 return PTR_ERR(aie_class);
736 }
737
738 platform_driver_register(&xilinx_ai_engine_driver);
739
740 return 0;
741}
742postcore_initcall(xilinx_ai_engine_init);
743
744static void __exit xilinx_ai_engine_exit(void)
745{
746 platform_driver_unregister(&xilinx_ai_engine_driver);
747 class_destroy(aie_class);
748 unregister_chrdev_region(aie_major, AIE_DEV_MAX);
749}
750module_exit(xilinx_ai_engine_exit);
751
752MODULE_AUTHOR("Xilinx, Inc.");
753MODULE_LICENSE("GPL v2");
754