1
2
3
4
5
6
7
8
9#include <linux/firmware/xlnx-zynqmp.h>
10#include <linux/interrupt.h>
11#include <linux/kernel.h>
12#include <linux/list.h>
13#include <linux/mailbox_client.h>
14#include <linux/mailbox/zynqmp-ipi-message.h>
15#include <linux/module.h>
16#include <linux/of_address.h>
17#include <linux/of_platform.h>
18#include <linux/of_reserved_mem.h>
19#include <linux/platform_device.h>
20#include <linux/remoteproc.h>
21#include <linux/skbuff.h>
22#include <linux/sysfs.h>
23
24#include "remoteproc_internal.h"
25
26#define MAX_RPROCS 2
27#define BANK_LIST_PROP "sram"
28#define DDR_LIST_PROP "memory-region"
29
30
31#define IPI_BUF_LEN_MAX 32U
32
33#define RX_MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
34 sizeof(struct zynqmp_ipi_message))
35
36
37
38
39
40struct sram_addr_data {
41 phys_addr_t addr;
42 enum pm_node_id id;
43};
44
45#define NUM_SRAMS 8U
46static const struct sram_addr_data zynqmp_banks[NUM_SRAMS] = {
47 {0xfffc0000UL, NODE_OCM_BANK_0},
48 {0xfffd0000UL, NODE_OCM_BANK_1},
49 {0xfffe0000UL, NODE_OCM_BANK_2},
50 {0xffff0000UL, NODE_OCM_BANK_3},
51 {0xffe00000UL, NODE_TCM_0_A},
52 {0xffe20000UL, NODE_TCM_0_B},
53 {0xffe90000UL, NODE_TCM_1_A},
54 {0xffeb0000UL, NODE_TCM_1_B},
55};
56
57#define VERSAL_TCM(ID) ((ID) + 0x18317FFCU)
58#define VERSAL_OCM(ID) ((ID) + 0x18313FFCU)
59#define VERSAL_RPU_0 (NODE_RPU_0 + 0x1810FFFEU)
60#define VERSAL_RPU_1 (VERSAL_RPU_0 + 1U)
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78struct zynqmp_r5_rproc {
79 unsigned char rx_mc_buf[RX_MBOX_CLIENT_BUF_MAX];
80 struct mbox_client tx_mc;
81 struct mbox_client rx_mc;
82 struct work_struct mbox_work;
83 struct sk_buff_head tx_mc_skbs;
84 struct device *dev;
85 struct rproc *rproc;
86 struct mbox_chan *tx_chan;
87 struct mbox_chan *rx_chan;
88 u32 pnode_id;
89 struct list_head elem;
90 bool versal;
91};
92
93
94
95
96
97
98
99
100
101
102static int r5_set_mode(struct zynqmp_r5_rproc *z_rproc,
103 enum rpu_oper_mode rpu_mode)
104{
105 enum rpu_tcm_comb tcm_mode;
106 enum rpu_oper_mode cur_rpu_mode;
107 int ret;
108
109 ret = zynqmp_pm_get_rpu_mode(z_rproc->pnode_id, &cur_rpu_mode);
110 if (ret < 0)
111 return ret;
112
113 if (rpu_mode != cur_rpu_mode) {
114 ret = zynqmp_pm_set_rpu_mode(z_rproc->pnode_id,
115 rpu_mode);
116 if (ret < 0)
117 return ret;
118 }
119
120 tcm_mode = (rpu_mode == PM_RPU_MODE_LOCKSTEP) ?
121 PM_RPU_TCM_COMB : PM_RPU_TCM_SPLIT;
122 return zynqmp_pm_set_tcm_config(z_rproc->pnode_id, tcm_mode);
123}
124
125
126
127
128
129
130
131
132
133
134static int sram_mem_release(struct rproc *rproc, struct rproc_mem_entry *mem)
135{
136 u32 pnode_id = (u64)mem->priv;
137
138 iounmap(mem->va);
139 return zynqmp_pm_release_node(pnode_id);
140}
141
142
143
144
145
146
147
148
149
150static int zynqmp_r5_rproc_start(struct rproc *rproc)
151{
152 struct zynqmp_r5_rproc *z_rproc = rproc->priv;
153 enum rpu_boot_mem bootmem;
154
155 bootmem = (rproc->bootaddr & 0xF0000000) == 0xF0000000 ?
156 PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC;
157
158 dev_dbg(rproc->dev.parent, "RPU boot from %s.",
159 bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
160
161 return zynqmp_pm_request_wake(z_rproc->pnode_id, 1,
162 bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
163}
164
165
166
167
168
169
170
171
172
173static int zynqmp_r5_rproc_stop(struct rproc *rproc)
174{
175 struct zynqmp_r5_rproc *z_rproc = rproc->priv;
176
177 return zynqmp_pm_force_pwrdwn(z_rproc->pnode_id,
178 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
179}
180
181
182
183
184
185
186
187
188
189
190static int zynqmp_r5_rproc_mem_alloc(struct rproc *rproc,
191 struct rproc_mem_entry *mem)
192{
193 void *va;
194
195 va = ioremap_wc(mem->dma, mem->len);
196 if (IS_ERR_OR_NULL(va))
197 return -ENOMEM;
198
199 mem->va = va;
200
201 return 0;
202}
203
204
205
206
207
208
209
210
211
212
213static int zynqmp_r5_rproc_mem_release(struct rproc *rproc,
214 struct rproc_mem_entry *mem)
215{
216 iounmap(mem->va);
217 return 0;
218}
219
220
221
222
223
224
225
226
227
228
229static int parse_mem_regions(struct rproc *rproc)
230{
231 int num_mems, i;
232 struct zynqmp_r5_rproc *z_rproc = rproc->priv;
233 struct device *dev = &rproc->dev;
234 struct device_node *np = z_rproc->dev->of_node;
235 struct rproc_mem_entry *mem;
236
237 num_mems = of_count_phandle_with_args(np, DDR_LIST_PROP, NULL);
238 if (num_mems <= 0)
239 return 0;
240
241 for (i = 0; i < num_mems; i++) {
242 struct device_node *node;
243 struct reserved_mem *rmem;
244
245 node = of_parse_phandle(np, DDR_LIST_PROP, i);
246 if (!node)
247 return -EINVAL;
248
249 rmem = of_reserved_mem_lookup(node);
250 if (!rmem)
251 return -EINVAL;
252
253 if (strstr(node->name, "vdev0vring")) {
254 int vring_id;
255 char name[16];
256
257
258
259
260
261 if (strlen(node->name) < 15) {
262 dev_err(dev, "%pOF is less than 14 chars",
263 node);
264 return -EINVAL;
265 }
266
267
268
269
270
271 vring_id = node->name[14] - '0';
272 snprintf(name, sizeof(name), "vdev0vring%d", vring_id);
273
274 mem = rproc_mem_entry_init(dev, NULL,
275 (dma_addr_t)rmem->base,
276 rmem->size, rmem->base,
277 zynqmp_r5_rproc_mem_alloc,
278 zynqmp_r5_rproc_mem_release,
279 name);
280 } else {
281
282 int (*alloc)(struct rproc *r,
283 struct rproc_mem_entry *rme);
284 int (*release)(struct rproc *r,
285 struct rproc_mem_entry *rme);
286 char name[20];
287
288 if (strstr(node->name, "vdev0buffer")) {
289 alloc = NULL;
290 release = NULL;
291 strcpy(name, "vdev0buffer");
292 } else {
293 alloc = zynqmp_r5_rproc_mem_alloc;
294 release = zynqmp_r5_rproc_mem_release;
295 strcpy(name, node->name);
296 }
297
298 mem = rproc_mem_entry_init(dev, NULL,
299 (dma_addr_t)rmem->base,
300 rmem->size, rmem->base,
301 alloc, release, name);
302 }
303 if (!mem)
304 return -ENOMEM;
305
306 rproc_add_carveout(rproc, mem);
307 }
308
309 return 0;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324static int zynqmp_r5_pm_request_sram(phys_addr_t addr, bool versal,
325 u32 *pnode_id)
326{
327 unsigned int i;
328
329 for (i = 0; i < NUM_SRAMS; i++) {
330 if (zynqmp_banks[i].addr == addr) {
331 *pnode_id = zynqmp_banks[i].id;
332
333 if (versal) {
334 switch (addr) {
335 case 0xffe00000UL:
336 case 0xffe20000UL:
337 case 0xffe90000UL:
338 case 0xffeb0000UL:
339 *pnode_id = VERSAL_TCM(zynqmp_banks[i].id);
340 break;
341 case 0xfffc0000UL:
342 case 0xfffd0000UL:
343 case 0xfffe0000UL:
344 case 0xffff0000UL:
345 *pnode_id = VERSAL_OCM(zynqmp_banks[i].id);
346 break;
347 default:
348 return -EINVAL;
349 }
350 }
351
352 return zynqmp_pm_request_node(*pnode_id,
353 ZYNQMP_PM_CAPABILITY_ACCESS,
354 0,
355 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
356 }
357 }
358
359 return -EINVAL;
360}
361
362
363
364
365
366
367
368
369
370
371
372
373static int sram_mem_alloc(struct rproc *rproc, struct rproc_mem_entry *mem)
374{
375 void *va;
376 struct device *dev = rproc->dev.parent;
377
378 va = ioremap_wc(mem->dma, mem->len);
379 if (IS_ERR_OR_NULL(va))
380 return -ENOMEM;
381
382
383 mem->va = va;
384
385 va = devm_ioremap_wc(dev, mem->da, mem->len);
386 if (!va)
387 return -ENOMEM;
388
389 if (mem->da >= 0xffe00000UL && mem->da <= 0xffeb0000UL) {
390
391 mem->da &= 0x000fffff;
392
393
394
395
396
397
398
399
400
401
402 if (mem->da == 0x90000 || mem->da == 0xB0000)
403 mem->da -= 0x90000;
404
405
406 if (mem->da != 0x0 && mem->da != 0x20000) {
407 dev_err(dev, "invalid TCM bank address: %x\n", mem->da);
408 return -EINVAL;
409 }
410 }
411
412 return 0;
413}
414
415
416
417
418
419
420
421
422
423
424
425static int parse_tcm_banks(struct rproc *rproc)
426{
427 int i, num_banks;
428 struct zynqmp_r5_rproc *z_rproc = rproc->priv;
429 struct device *dev = &rproc->dev;
430 struct device_node *r5_node = z_rproc->dev->of_node;
431
432
433 num_banks = of_count_phandle_with_args(r5_node, BANK_LIST_PROP, NULL);
434 if (num_banks <= 0) {
435 dev_err(dev, "need to specify TCM banks\n");
436 return -EINVAL;
437 }
438 for (i = 0; i < num_banks; i++) {
439 struct resource rsc;
440 resource_size_t size;
441 struct device_node *dt_node;
442 struct rproc_mem_entry *mem;
443 int ret;
444 u32 pnode_id;
445
446 dt_node = of_parse_phandle(r5_node, BANK_LIST_PROP, i);
447 if (!dt_node)
448 return -EINVAL;
449
450 if (of_device_is_available(dt_node)) {
451 ret = of_address_to_resource(dt_node, 0, &rsc);
452 if (ret < 0)
453 return ret;
454 ret = zynqmp_r5_pm_request_sram(rsc.start,
455 z_rproc->versal,
456 &pnode_id);
457 if (ret < 0)
458 return ret;
459
460
461 size = resource_size(&rsc);
462 mem = rproc_mem_entry_init(dev, NULL, rsc.start,
463 (int)size, rsc.start,
464 sram_mem_alloc,
465 sram_mem_release,
466 rsc.name);
467 if (!mem)
468 return -ENOMEM;
469
470 mem->priv = (void *)(u64)pnode_id;
471 rproc_add_carveout(rproc, mem);
472 }
473 }
474
475 return 0;
476}
477
478
479
480
481
482
483
484
485
486
487static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
488{
489 int ret;
490
491 ret = parse_tcm_banks(rproc);
492 if (ret)
493 return ret;
494
495 ret = parse_mem_regions(rproc);
496 if (ret)
497 return ret;
498
499 ret = rproc_elf_load_rsc_table(rproc, fw);
500 if (ret == -EINVAL) {
501
502
503
504
505
506
507 dev_info(&rproc->dev, "no resource table found.\n");
508 ret = 0;
509 }
510 return ret;
511}
512
513
514
515
516
517
518static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid)
519{
520 struct sk_buff *skb = NULL;
521 unsigned int skb_len = 0;
522 struct zynqmp_ipi_message *mb_msg = NULL;
523 int ret = 0;
524
525 struct device *dev = rproc->dev.parent;
526 struct zynqmp_r5_rproc *z_rproc = rproc->priv;
527
528 if (of_property_read_bool(dev->of_node, "mboxes")) {
529 skb_len = (unsigned int)(sizeof(vqid) + sizeof(mb_msg));
530 skb = alloc_skb(skb_len, GFP_ATOMIC);
531 if (!skb)
532 return;
533
534 mb_msg = (struct zynqmp_ipi_message *)skb_put(skb, skb_len);
535 mb_msg->len = sizeof(vqid);
536 memcpy(mb_msg->data, &vqid, sizeof(vqid));
537
538 skb_queue_tail(&z_rproc->tx_mc_skbs, skb);
539 ret = mbox_send_message(z_rproc->tx_chan, mb_msg);
540 if (ret < 0) {
541 dev_warn(dev, "Failed to kick remote.\n");
542 skb_dequeue_tail(&z_rproc->tx_mc_skbs);
543 kfree_skb(skb);
544 }
545 } else {
546 (void)skb;
547 (void)skb_len;
548 (void)mb_msg;
549 (void)ret;
550 (void)vqid;
551 }
552}
553
554static struct rproc_ops zynqmp_r5_rproc_ops = {
555 .start = zynqmp_r5_rproc_start,
556 .stop = zynqmp_r5_rproc_stop,
557 .load = rproc_elf_load_segments,
558 .parse_fw = zynqmp_r5_parse_fw,
559 .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
560 .sanity_check = rproc_elf_sanity_check,
561 .get_boot_addr = rproc_elf_get_boot_addr,
562 .kick = zynqmp_r5_rproc_kick,
563};
564
565
566
567
568
569
570
571
572
573
574
575
576static int event_notified_idr_cb(int id, void *ptr, void *data)
577{
578 struct rproc *rproc = data;
579
580 (void)rproc_vq_interrupt(rproc, id);
581 return 0;
582}
583
584
585
586
587
588
589
590static void handle_event_notified(struct work_struct *work)
591{
592 struct rproc *rproc;
593 struct zynqmp_r5_rproc *z_rproc;
594
595 z_rproc = container_of(work, struct zynqmp_r5_rproc, mbox_work);
596
597 (void)mbox_send_message(z_rproc->rx_chan, NULL);
598 rproc = z_rproc->rproc;
599
600
601
602
603
604 idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc);
605}
606
607
608
609
610
611
612
613
614static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *msg)
615{
616 struct zynqmp_r5_rproc *z_rproc;
617
618 z_rproc = container_of(cl, struct zynqmp_r5_rproc, rx_mc);
619 if (msg) {
620 struct zynqmp_ipi_message *ipi_msg, *buf_msg;
621 size_t len;
622
623 ipi_msg = (struct zynqmp_ipi_message *)msg;
624 buf_msg = (struct zynqmp_ipi_message *)z_rproc->rx_mc_buf;
625 len = (ipi_msg->len >= IPI_BUF_LEN_MAX) ?
626 IPI_BUF_LEN_MAX : ipi_msg->len;
627 buf_msg->len = len;
628 memcpy(buf_msg->data, ipi_msg->data, len);
629 }
630 schedule_work(&z_rproc->mbox_work);
631}
632
633
634
635
636
637
638
639
640
641static void zynqmp_r5_mb_tx_done(struct mbox_client *cl, void *msg, int r)
642{
643 struct zynqmp_r5_rproc *z_rproc;
644 struct sk_buff *skb;
645
646 if (!msg)
647 return;
648 z_rproc = container_of(cl, struct zynqmp_r5_rproc, tx_mc);
649 skb = skb_dequeue(&z_rproc->tx_mc_skbs);
650 kfree_skb(skb);
651}
652
653
654
655
656
657
658
659
660
661
662
663
664static int zynqmp_r5_setup_mbox(struct zynqmp_r5_rproc *z_rproc,
665 struct device_node *node)
666{
667 struct mbox_client *mclient;
668
669
670 mclient = &z_rproc->tx_mc;
671 mclient->rx_callback = NULL;
672 mclient->tx_block = false;
673 mclient->knows_txdone = false;
674 mclient->tx_done = zynqmp_r5_mb_tx_done;
675 mclient->dev = z_rproc->dev;
676
677
678 mclient = &z_rproc->rx_mc;
679 mclient->dev = z_rproc->dev;
680 mclient->rx_callback = zynqmp_r5_mb_rx_cb;
681 mclient->tx_block = false;
682 mclient->knows_txdone = false;
683
684 INIT_WORK(&z_rproc->mbox_work, handle_event_notified);
685
686
687 z_rproc->tx_chan = mbox_request_channel_byname(&z_rproc->tx_mc, "tx");
688 if (IS_ERR(z_rproc->tx_chan)) {
689 dev_err(z_rproc->dev, "failed to request mbox tx channel.\n");
690 z_rproc->tx_chan = NULL;
691 return -EINVAL;
692 }
693
694 z_rproc->rx_chan = mbox_request_channel_byname(&z_rproc->rx_mc, "rx");
695 if (IS_ERR(z_rproc->rx_chan)) {
696 dev_err(z_rproc->dev, "failed to request mbox rx channel.\n");
697 z_rproc->rx_chan = NULL;
698 return -EINVAL;
699 }
700 skb_queue_head_init(&z_rproc->tx_mc_skbs);
701
702 return 0;
703}
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719static int zynqmp_r5_probe(struct platform_device *pdev,
720 struct device_node *node,
721 enum rpu_oper_mode rpu_mode,
722 struct zynqmp_r5_rproc **z_rproc)
723{
724 int ret;
725 struct device *dev = &pdev->dev;
726 struct rproc *rproc_ptr;
727
728
729 rproc_ptr = devm_rproc_alloc(dev, dev_name(dev), &zynqmp_r5_rproc_ops,
730 NULL, sizeof(struct zynqmp_r5_rproc));
731 if (!rproc_ptr) {
732 ret = -ENOMEM;
733 goto error;
734 }
735
736 rproc_ptr->auto_boot = false;
737 *z_rproc = rproc_ptr->priv;
738 (*z_rproc)->rproc = rproc_ptr;
739 (*z_rproc)->dev = dev;
740
741 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
742 if (ret)
743 goto error;
744
745
746 ret = of_property_read_u32(node, "power-domain", &(*z_rproc)->pnode_id);
747 if (ret)
748 goto error;
749
750 if ((VERSAL_RPU_0 == (*z_rproc)->pnode_id) ||
751 (VERSAL_RPU_1 == (*z_rproc)->pnode_id))
752 (*z_rproc)->versal = true;
753
754 ret = r5_set_mode(*z_rproc, rpu_mode);
755 if (ret)
756 goto error;
757
758 if (of_property_read_bool(node, "mboxes")) {
759 ret = zynqmp_r5_setup_mbox(*z_rproc, node);
760 if (ret)
761 goto error;
762 }
763
764
765 ret = devm_rproc_add(dev, rproc_ptr);
766 if (ret)
767 goto error;
768
769
770
771
772
773
774
775
776
777
778 if ((*z_rproc)->versal) {
779 ret = zynqmp_pm_request_node((*z_rproc)->pnode_id,
780 ZYNQMP_PM_CAPABILITY_ACCESS, 0,
781 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
782 if (ret < 0)
783 goto error;
784 }
785
786 return 0;
787error:
788 *z_rproc = NULL;
789 return ret;
790}
791
792
793
794
795
796
797
798
799
800
801
802static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
803{
804 int ret, core_count;
805 struct device *dev = &pdev->dev;
806 struct device_node *nc;
807 enum rpu_oper_mode rpu_mode = PM_RPU_MODE_LOCKSTEP;
808 struct list_head *cluster;
809 struct zynqmp_r5_rproc *z_rproc = NULL;
810 struct platform_device *child_pdev;
811 struct list_head *pos;
812
813 ret = of_property_read_u32(dev->of_node, "xlnx,cluster-mode", &rpu_mode);
814 if (ret < 0 || (rpu_mode != PM_RPU_MODE_LOCKSTEP &&
815 rpu_mode != PM_RPU_MODE_SPLIT)) {
816 dev_err(dev, "invalid format cluster mode: ret %d mode %x\n",
817 ret, rpu_mode);
818 return ret;
819 }
820
821 dev_dbg(dev, "RPU configuration: %s\n",
822 rpu_mode == PM_RPU_MODE_LOCKSTEP ? "lockstep" : "split");
823
824
825
826
827
828
829 core_count = of_get_available_child_count(dev->of_node);
830 if ((rpu_mode == PM_RPU_MODE_LOCKSTEP && core_count != 1) ||
831 core_count > MAX_RPROCS)
832 return -EINVAL;
833
834 cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
835 if (!cluster)
836 return -ENOMEM;
837 INIT_LIST_HEAD(cluster);
838
839 ret = devm_of_platform_populate(dev);
840 if (ret) {
841 dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
842 ret);
843 return ret;
844 }
845
846
847 for_each_available_child_of_node(dev->of_node, nc) {
848 child_pdev = of_find_device_by_node(nc);
849 if (!child_pdev) {
850 dev_err(dev, "could not get R5 core platform device\n");
851 ret = -ENODEV;
852 goto out;
853 }
854
855 ret = zynqmp_r5_probe(child_pdev, nc, rpu_mode, &z_rproc);
856 dev_dbg(dev, "%s to probe rpu %pOF\n",
857 ret ? "Failed" : "Able",
858 nc);
859 if (!z_rproc)
860 ret = -EINVAL;
861 if (ret)
862 goto out;
863 list_add_tail(&z_rproc->elem, cluster);
864 }
865
866 platform_set_drvdata(pdev, cluster);
867 return 0;
868out:
869
870
871
872
873
874
875 if (ret && !z_rproc && rpu_mode == PM_RPU_MODE_SPLIT &&
876 !list_empty(cluster)) {
877 list_for_each(pos, cluster) {
878 z_rproc = list_entry(pos, struct zynqmp_r5_rproc, elem);
879 if (of_property_read_bool(z_rproc->dev->of_node, "mboxes")) {
880 mbox_free_channel(z_rproc->tx_chan);
881 mbox_free_channel(z_rproc->rx_chan);
882 }
883 }
884 }
885 return ret;
886}
887
888
889
890
891
892
893
894
895
896static int zynqmp_r5_remoteproc_remove(struct platform_device *pdev)
897{
898 struct list_head *pos, *temp, *cluster = (struct list_head *)
899 platform_get_drvdata(pdev);
900 struct zynqmp_r5_rproc *z_rproc = NULL;
901
902 list_for_each_safe(pos, temp, cluster) {
903 z_rproc = list_entry(pos, struct zynqmp_r5_rproc, elem);
904
905
906
907
908
909
910 if (z_rproc->versal)
911 zynqmp_pm_release_node(z_rproc->pnode_id);
912
913 if (of_property_read_bool(z_rproc->dev->of_node, "mboxes")) {
914 mbox_free_channel(z_rproc->tx_chan);
915 mbox_free_channel(z_rproc->rx_chan);
916 }
917 list_del(pos);
918 }
919 return 0;
920}
921
922
923static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
924 { .compatible = "xlnx,zynqmp-r5-remoteproc", },
925 { },
926};
927MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
928
929static struct platform_driver zynqmp_r5_remoteproc_driver = {
930 .probe = zynqmp_r5_remoteproc_probe,
931 .remove = zynqmp_r5_remoteproc_remove,
932 .driver = {
933 .name = "zynqmp_r5_remoteproc",
934 .of_match_table = zynqmp_r5_remoteproc_match,
935 },
936};
937module_platform_driver(zynqmp_r5_remoteproc_driver);
938
939MODULE_AUTHOR("Ben Levinsky <ben.levinsky@xilinx.com>");
940MODULE_LICENSE("GPL v2");
941