1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14
15#include "dfl.h"
16
17static DEFINE_MUTEX(dfl_id_mutex);
18
19
20
21
22
23
24
25
26
27
28
29
30
31enum dfl_id_type {
32 FME_ID,
33 PORT_ID,
34 DFL_ID_MAX,
35};
36
37enum dfl_fpga_devt_type {
38 DFL_FPGA_DEVT_FME,
39 DFL_FPGA_DEVT_PORT,
40 DFL_FPGA_DEVT_MAX,
41};
42
43
44
45
46
47
48
49
50struct dfl_dev_info {
51 const char *name;
52 u32 dfh_id;
53 struct idr id;
54 enum dfl_fpga_devt_type devt_type;
55};
56
57
58static struct dfl_dev_info dfl_devs[] = {
59 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
60 .devt_type = DFL_FPGA_DEVT_FME},
61 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
62 .devt_type = DFL_FPGA_DEVT_PORT},
63};
64
65
66
67
68
69
70struct dfl_chardev_info {
71 const char *name;
72 dev_t devt;
73};
74
75
76static struct dfl_chardev_info dfl_chrdevs[] = {
77 {.name = DFL_FPGA_FEATURE_DEV_FME},
78 {.name = DFL_FPGA_FEATURE_DEV_PORT},
79};
80
81static void dfl_ids_init(void)
82{
83 int i;
84
85 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
86 idr_init(&dfl_devs[i].id);
87}
88
89static void dfl_ids_destroy(void)
90{
91 int i;
92
93 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
94 idr_destroy(&dfl_devs[i].id);
95}
96
97static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
98{
99 int id;
100
101 WARN_ON(type >= DFL_ID_MAX);
102 mutex_lock(&dfl_id_mutex);
103 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
104 mutex_unlock(&dfl_id_mutex);
105
106 return id;
107}
108
109static void dfl_id_free(enum dfl_id_type type, int id)
110{
111 WARN_ON(type >= DFL_ID_MAX);
112 mutex_lock(&dfl_id_mutex);
113 idr_remove(&dfl_devs[type].id, id);
114 mutex_unlock(&dfl_id_mutex);
115}
116
117static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
118{
119 int i;
120
121 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
122 if (!strcmp(dfl_devs[i].name, pdev->name))
123 return i;
124
125 return DFL_ID_MAX;
126}
127
128static enum dfl_id_type dfh_id_to_type(u32 id)
129{
130 int i;
131
132 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
133 if (dfl_devs[i].dfh_id == id)
134 return i;
135
136 return DFL_ID_MAX;
137}
138
139
140
141
142
143
144
145
146
147
148
149static DEFINE_MUTEX(dfl_port_ops_mutex);
150static LIST_HEAD(dfl_port_ops_list);
151
152
153
154
155
156
157
158
159struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
160{
161 struct dfl_fpga_port_ops *ops = NULL;
162
163 mutex_lock(&dfl_port_ops_mutex);
164 if (list_empty(&dfl_port_ops_list))
165 goto done;
166
167 list_for_each_entry(ops, &dfl_port_ops_list, node) {
168
169 if (!strcmp(pdev->name, ops->name)) {
170 if (!try_module_get(ops->owner))
171 ops = NULL;
172 goto done;
173 }
174 }
175
176 ops = NULL;
177done:
178 mutex_unlock(&dfl_port_ops_mutex);
179 return ops;
180}
181EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
182
183
184
185
186
187void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
188{
189 if (ops && ops->owner)
190 module_put(ops->owner);
191}
192EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
193
194
195
196
197
198void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
199{
200 mutex_lock(&dfl_port_ops_mutex);
201 list_add_tail(&ops->node, &dfl_port_ops_list);
202 mutex_unlock(&dfl_port_ops_mutex);
203}
204EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
205
206
207
208
209
210void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
211{
212 mutex_lock(&dfl_port_ops_mutex);
213 list_del(&ops->node);
214 mutex_unlock(&dfl_port_ops_mutex);
215}
216EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
217
218
219
220
221
222
223
224
225int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
226{
227 struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev);
228 int port_id;
229
230 if (!port_ops || !port_ops->get_id)
231 return 0;
232
233 port_id = port_ops->get_id(pdev);
234 dfl_fpga_port_ops_put(port_ops);
235
236 return port_id == *(int *)pport_id;
237}
238EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
239
240
241
242
243
244void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
245{
246 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
247 struct dfl_feature *feature;
248
249 dfl_fpga_dev_for_each_feature(pdata, feature)
250 if (feature->ops) {
251 feature->ops->uinit(pdev, feature);
252 feature->ops = NULL;
253 }
254}
255EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
256
257static int dfl_feature_instance_init(struct platform_device *pdev,
258 struct dfl_feature_platform_data *pdata,
259 struct dfl_feature *feature,
260 struct dfl_feature_driver *drv)
261{
262 int ret;
263
264 ret = drv->ops->init(pdev, feature);
265 if (ret)
266 return ret;
267
268 feature->ops = drv->ops;
269
270 return ret;
271}
272
273
274
275
276
277
278
279
280
281
282
283int dfl_fpga_dev_feature_init(struct platform_device *pdev,
284 struct dfl_feature_driver *feature_drvs)
285{
286 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
287 struct dfl_feature_driver *drv = feature_drvs;
288 struct dfl_feature *feature;
289 int ret;
290
291 while (drv->ops) {
292 dfl_fpga_dev_for_each_feature(pdata, feature) {
293
294 if (feature->id == drv->id) {
295 ret = dfl_feature_instance_init(pdev, pdata,
296 feature, drv);
297 if (ret)
298 goto exit;
299 }
300 }
301 drv++;
302 }
303
304 return 0;
305exit:
306 dfl_fpga_dev_feature_uinit(pdev);
307 return ret;
308}
309EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
310
311static void dfl_chardev_uinit(void)
312{
313 int i;
314
315 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
316 if (MAJOR(dfl_chrdevs[i].devt)) {
317 unregister_chrdev_region(dfl_chrdevs[i].devt,
318 MINORMASK);
319 dfl_chrdevs[i].devt = MKDEV(0, 0);
320 }
321}
322
323static int dfl_chardev_init(void)
324{
325 int i, ret;
326
327 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
328 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
329 dfl_chrdevs[i].name);
330 if (ret)
331 goto exit;
332 }
333
334 return 0;
335
336exit:
337 dfl_chardev_uinit();
338 return ret;
339}
340
341static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
342{
343 if (type >= DFL_FPGA_DEVT_MAX)
344 return 0;
345
346 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
347}
348
349
350
351
352
353
354
355
356
357
358int dfl_fpga_dev_ops_register(struct platform_device *pdev,
359 const struct file_operations *fops,
360 struct module *owner)
361{
362 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
363
364 cdev_init(&pdata->cdev, fops);
365 pdata->cdev.owner = owner;
366
367
368
369
370
371
372
373 pdata->cdev.kobj.parent = &pdev->dev.kobj;
374
375 return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
376}
377EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
378
379
380
381
382
383void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
384{
385 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
386
387 cdev_del(&pdata->cdev);
388}
389EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
390
391
392
393
394
395
396
397
398
399
400
401struct build_feature_devs_info {
402 struct device *dev;
403 struct dfl_fpga_cdev *cdev;
404 struct platform_device *feature_dev;
405 void __iomem *ioaddr;
406 struct list_head sub_features;
407 int feature_num;
408};
409
410
411
412
413
414
415
416
417
418struct dfl_feature_info {
419 u64 fid;
420 struct resource mmio_res;
421 void __iomem *ioaddr;
422 struct list_head node;
423};
424
425static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
426 struct platform_device *port)
427{
428 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
429
430 mutex_lock(&cdev->lock);
431 list_add(&pdata->node, &cdev->port_dev_list);
432 get_device(&pdata->dev->dev);
433 mutex_unlock(&cdev->lock);
434}
435
436
437
438
439
440
441static int build_info_commit_dev(struct build_feature_devs_info *binfo)
442{
443 struct platform_device *fdev = binfo->feature_dev;
444 struct dfl_feature_platform_data *pdata;
445 struct dfl_feature_info *finfo, *p;
446 int ret, index = 0;
447
448 if (!fdev)
449 return 0;
450
451
452
453
454
455
456
457 pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
458 GFP_KERNEL);
459 if (!pdata)
460 return -ENOMEM;
461
462 pdata->dev = fdev;
463 pdata->num = binfo->feature_num;
464 pdata->dfl_cdev = binfo->cdev;
465 mutex_init(&pdata->lock);
466
467
468
469
470
471
472
473 WARN_ON(pdata->disable_count);
474
475 fdev->dev.platform_data = pdata;
476
477
478 fdev->num_resources = binfo->feature_num;
479 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
480 GFP_KERNEL);
481 if (!fdev->resource)
482 return -ENOMEM;
483
484
485 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
486 struct dfl_feature *feature = &pdata->features[index];
487
488
489 feature->id = finfo->fid;
490 feature->resource_index = index;
491 feature->ioaddr = finfo->ioaddr;
492 fdev->resource[index++] = finfo->mmio_res;
493
494 list_del(&finfo->node);
495 kfree(finfo);
496 }
497
498 ret = platform_device_add(binfo->feature_dev);
499 if (!ret) {
500 if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
501 dfl_fpga_cdev_add_port_dev(binfo->cdev,
502 binfo->feature_dev);
503 else
504 binfo->cdev->fme_dev =
505 get_device(&binfo->feature_dev->dev);
506
507
508
509
510
511
512
513 binfo->feature_dev = NULL;
514 }
515
516 return ret;
517}
518
519static int
520build_info_create_dev(struct build_feature_devs_info *binfo,
521 enum dfl_id_type type, void __iomem *ioaddr)
522{
523 struct platform_device *fdev;
524 int ret;
525
526 if (type >= DFL_ID_MAX)
527 return -EINVAL;
528
529
530 ret = build_info_commit_dev(binfo);
531 if (ret)
532 return ret;
533
534
535
536
537
538 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
539 if (!fdev)
540 return -ENOMEM;
541
542 binfo->feature_dev = fdev;
543 binfo->feature_num = 0;
544 binfo->ioaddr = ioaddr;
545 INIT_LIST_HEAD(&binfo->sub_features);
546
547 fdev->id = dfl_id_alloc(type, &fdev->dev);
548 if (fdev->id < 0)
549 return fdev->id;
550
551 fdev->dev.parent = &binfo->cdev->region->dev;
552 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
553
554 return 0;
555}
556
557static void build_info_free(struct build_feature_devs_info *binfo)
558{
559 struct dfl_feature_info *finfo, *p;
560
561
562
563
564
565 if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
566 dfl_id_free(feature_dev_id_type(binfo->feature_dev),
567 binfo->feature_dev->id);
568
569 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
570 list_del(&finfo->node);
571 kfree(finfo);
572 }
573 }
574
575 platform_device_put(binfo->feature_dev);
576
577 devm_kfree(binfo->dev, binfo);
578}
579
580static inline u32 feature_size(void __iomem *start)
581{
582 u64 v = readq(start + DFH);
583 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
584
585 return ofst ? ofst : 4096;
586}
587
588static u64 feature_id(void __iomem *start)
589{
590 u64 v = readq(start + DFH);
591 u16 id = FIELD_GET(DFH_ID, v);
592 u8 type = FIELD_GET(DFH_TYPE, v);
593
594 if (type == DFH_TYPE_FIU)
595 return FEATURE_ID_FIU_HEADER;
596 else if (type == DFH_TYPE_PRIVATE)
597 return id;
598 else if (type == DFH_TYPE_AFU)
599 return FEATURE_ID_AFU;
600
601 WARN_ON(1);
602 return 0;
603}
604
605
606
607
608
609
610
611
612static int
613create_feature_instance(struct build_feature_devs_info *binfo,
614 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
615 resource_size_t size, u64 fid)
616{
617 struct dfl_feature_info *finfo;
618
619
620 size = size ? size : feature_size(dfl->ioaddr + ofst);
621 fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
622
623 if (dfl->len - ofst < size)
624 return -EINVAL;
625
626 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
627 if (!finfo)
628 return -ENOMEM;
629
630 finfo->fid = fid;
631 finfo->mmio_res.start = dfl->start + ofst;
632 finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
633 finfo->mmio_res.flags = IORESOURCE_MEM;
634 finfo->ioaddr = dfl->ioaddr + ofst;
635
636 list_add_tail(&finfo->node, &binfo->sub_features);
637 binfo->feature_num++;
638
639 return 0;
640}
641
642static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
643 struct dfl_fpga_enum_dfl *dfl,
644 resource_size_t ofst)
645{
646 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
647 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
648
649 WARN_ON(!size);
650
651 return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
652}
653
654static int parse_feature_afu(struct build_feature_devs_info *binfo,
655 struct dfl_fpga_enum_dfl *dfl,
656 resource_size_t ofst)
657{
658 if (!binfo->feature_dev) {
659 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
660 return -EINVAL;
661 }
662
663 switch (feature_dev_id_type(binfo->feature_dev)) {
664 case PORT_ID:
665 return parse_feature_port_afu(binfo, dfl, ofst);
666 default:
667 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
668 binfo->feature_dev->name);
669 }
670
671 return 0;
672}
673
674static int parse_feature_fiu(struct build_feature_devs_info *binfo,
675 struct dfl_fpga_enum_dfl *dfl,
676 resource_size_t ofst)
677{
678 u32 id, offset;
679 u64 v;
680 int ret = 0;
681
682 v = readq(dfl->ioaddr + ofst + DFH);
683 id = FIELD_GET(DFH_ID, v);
684
685
686 ret = build_info_create_dev(binfo, dfh_id_to_type(id),
687 dfl->ioaddr + ofst);
688 if (ret)
689 return ret;
690
691 ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
692 if (ret)
693 return ret;
694
695
696
697
698 v = readq(dfl->ioaddr + ofst + NEXT_AFU);
699
700 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
701 if (offset)
702 return parse_feature_afu(binfo, dfl, ofst + offset);
703
704 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
705
706 return ret;
707}
708
709static int parse_feature_private(struct build_feature_devs_info *binfo,
710 struct dfl_fpga_enum_dfl *dfl,
711 resource_size_t ofst)
712{
713 if (!binfo->feature_dev) {
714 dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
715 (unsigned long long)feature_id(dfl->ioaddr + ofst));
716 return -EINVAL;
717 }
718
719 return create_feature_instance(binfo, dfl, ofst, 0, 0);
720}
721
722
723
724
725
726
727
728
729static int parse_feature(struct build_feature_devs_info *binfo,
730 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
731{
732 u64 v;
733 u32 type;
734
735 v = readq(dfl->ioaddr + ofst + DFH);
736 type = FIELD_GET(DFH_TYPE, v);
737
738 switch (type) {
739 case DFH_TYPE_AFU:
740 return parse_feature_afu(binfo, dfl, ofst);
741 case DFH_TYPE_PRIVATE:
742 return parse_feature_private(binfo, dfl, ofst);
743 case DFH_TYPE_FIU:
744 return parse_feature_fiu(binfo, dfl, ofst);
745 default:
746 dev_info(binfo->dev,
747 "Feature Type %x is not supported.\n", type);
748 }
749
750 return 0;
751}
752
753static int parse_feature_list(struct build_feature_devs_info *binfo,
754 struct dfl_fpga_enum_dfl *dfl)
755{
756 void __iomem *start = dfl->ioaddr;
757 void __iomem *end = dfl->ioaddr + dfl->len;
758 int ret = 0;
759 u32 ofst = 0;
760 u64 v;
761
762
763 for (; start < end; start += ofst) {
764 if (end - start < DFH_SIZE) {
765 dev_err(binfo->dev, "The region is too small to contain a feature.\n");
766 return -EINVAL;
767 }
768
769 ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
770 if (ret)
771 return ret;
772
773 v = readq(start + DFH);
774 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
775
776
777 if ((v & DFH_EOL) || !ofst)
778 break;
779 }
780
781
782 return build_info_commit_dev(binfo);
783}
784
785struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
786{
787 struct dfl_fpga_enum_info *info;
788
789 get_device(dev);
790
791 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
792 if (!info) {
793 put_device(dev);
794 return NULL;
795 }
796
797 info->dev = dev;
798 INIT_LIST_HEAD(&info->dfls);
799
800 return info;
801}
802EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
803
804void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
805{
806 struct dfl_fpga_enum_dfl *tmp, *dfl;
807 struct device *dev;
808
809 if (!info)
810 return;
811
812 dev = info->dev;
813
814
815 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
816 list_del(&dfl->node);
817 devm_kfree(dev, dfl);
818 }
819
820 devm_kfree(dev, info);
821 put_device(dev);
822}
823EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
840 resource_size_t start, resource_size_t len,
841 void __iomem *ioaddr)
842{
843 struct dfl_fpga_enum_dfl *dfl;
844
845 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
846 if (!dfl)
847 return -ENOMEM;
848
849 dfl->start = start;
850 dfl->len = len;
851 dfl->ioaddr = ioaddr;
852
853 list_add_tail(&dfl->node, &info->dfls);
854
855 return 0;
856}
857EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
858
859static int remove_feature_dev(struct device *dev, void *data)
860{
861 struct platform_device *pdev = to_platform_device(dev);
862 enum dfl_id_type type = feature_dev_id_type(pdev);
863 int id = pdev->id;
864
865 platform_device_unregister(pdev);
866
867 dfl_id_free(type, id);
868
869 return 0;
870}
871
872static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
873{
874 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
875}
876
877
878
879
880
881
882
883
884
885
886
887struct dfl_fpga_cdev *
888dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
889{
890 struct build_feature_devs_info *binfo;
891 struct dfl_fpga_enum_dfl *dfl;
892 struct dfl_fpga_cdev *cdev;
893 int ret = 0;
894
895 if (!info->dev)
896 return ERR_PTR(-ENODEV);
897
898 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
899 if (!cdev)
900 return ERR_PTR(-ENOMEM);
901
902 cdev->region = fpga_region_create(info->dev, NULL, NULL);
903 if (!cdev->region) {
904 ret = -ENOMEM;
905 goto free_cdev_exit;
906 }
907
908 cdev->parent = info->dev;
909 mutex_init(&cdev->lock);
910 INIT_LIST_HEAD(&cdev->port_dev_list);
911
912 ret = fpga_region_register(cdev->region);
913 if (ret)
914 goto free_region_exit;
915
916
917 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
918 if (!binfo) {
919 ret = -ENOMEM;
920 goto unregister_region_exit;
921 }
922
923 binfo->dev = info->dev;
924 binfo->cdev = cdev;
925
926
927
928
929
930 list_for_each_entry(dfl, &info->dfls, node) {
931 ret = parse_feature_list(binfo, dfl);
932 if (ret) {
933 remove_feature_devs(cdev);
934 build_info_free(binfo);
935 goto unregister_region_exit;
936 }
937 }
938
939 build_info_free(binfo);
940
941 return cdev;
942
943unregister_region_exit:
944 fpga_region_unregister(cdev->region);
945free_region_exit:
946 fpga_region_free(cdev->region);
947free_cdev_exit:
948 devm_kfree(info->dev, cdev);
949 return ERR_PTR(ret);
950}
951EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
952
953
954
955
956
957
958
959
960void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
961{
962 struct dfl_feature_platform_data *pdata, *ptmp;
963
964 remove_feature_devs(cdev);
965
966 mutex_lock(&cdev->lock);
967 if (cdev->fme_dev) {
968
969 WARN_ON(device_is_registered(cdev->fme_dev));
970 put_device(cdev->fme_dev);
971 }
972
973 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
974 struct platform_device *port_dev = pdata->dev;
975
976
977 WARN_ON(device_is_registered(&port_dev->dev));
978 list_del(&pdata->node);
979 put_device(&port_dev->dev);
980 }
981 mutex_unlock(&cdev->lock);
982
983 fpga_region_unregister(cdev->region);
984 devm_kfree(cdev->parent, cdev);
985}
986EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002struct platform_device *
1003__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1004 int (*match)(struct platform_device *, void *))
1005{
1006 struct dfl_feature_platform_data *pdata;
1007 struct platform_device *port_dev;
1008
1009 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1010 port_dev = pdata->dev;
1011
1012 if (match(port_dev, data) && get_device(&port_dev->dev))
1013 return port_dev;
1014 }
1015
1016 return NULL;
1017}
1018EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1019
1020static int __init dfl_fpga_init(void)
1021{
1022 int ret;
1023
1024 dfl_ids_init();
1025
1026 ret = dfl_chardev_init();
1027 if (ret)
1028 dfl_ids_destroy();
1029
1030 return ret;
1031}
1032
1033static void __exit dfl_fpga_exit(void)
1034{
1035 dfl_chardev_uinit();
1036 dfl_ids_destroy();
1037}
1038
1039module_init(dfl_fpga_init);
1040module_exit(dfl_fpga_exit);
1041
1042MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1043MODULE_AUTHOR("Intel Corporation");
1044MODULE_LICENSE("GPL v2");
1045