1
2
3
4
5
6
7
8
9#include <linux/dma-mapping.h>
10#include <linux/err.h>
11#include <linux/interrupt.h>
12#include <linux/kernel.h>
13#include <linux/mailbox_client.h>
14#include <linux/module.h>
15#include <linux/of_address.h>
16#include <linux/of_device.h>
17#include <linux/of_reserved_mem.h>
18#include <linux/omap-mailbox.h>
19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h>
21#include <linux/remoteproc.h>
22#include <linux/reset.h>
23#include <linux/slab.h>
24
25#include "omap_remoteproc.h"
26#include "remoteproc_internal.h"
27#include "ti_sci_proc.h"
28
29
30#define K3_R5_TCM_DEV_ADDR 0x41010000
31
32
33#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
34#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
35#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
36#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
37#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
38#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
39#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
40#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
41
42#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
43
44
45#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
46
47
48#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
49#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
50#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
51#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
52
53
54
55
56
57
58
59
60struct k3_r5_mem {
61 void __iomem *cpu_addr;
62 phys_addr_t bus_addr;
63 u32 dev_addr;
64 size_t size;
65};
66
67enum cluster_mode {
68 CLUSTER_MODE_SPLIT = 0,
69 CLUSTER_MODE_LOCKSTEP,
70};
71
72
73
74
75
76
77struct k3_r5_soc_data {
78 bool tcm_is_double;
79 bool tcm_ecc_autoinit;
80};
81
82
83
84
85
86
87
88
89struct k3_r5_cluster {
90 struct device *dev;
91 enum cluster_mode mode;
92 struct list_head cores;
93 const struct k3_r5_soc_data *soc_data;
94};
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113struct k3_r5_core {
114 struct list_head elem;
115 struct device *dev;
116 struct rproc *rproc;
117 struct k3_r5_mem *mem;
118 struct k3_r5_mem *sram;
119 int num_mems;
120 int num_sram;
121 struct reset_control *reset;
122 struct ti_sci_proc *tsp;
123 const struct ti_sci_handle *ti_sci;
124 u32 ti_sci_id;
125 u32 atcm_enable;
126 u32 btcm_enable;
127 u32 loczrama;
128};
129
130
131
132
133
134
135
136
137
138
139
140
141struct k3_r5_rproc {
142 struct device *dev;
143 struct k3_r5_cluster *cluster;
144 struct mbox_chan *mbox;
145 struct mbox_client client;
146 struct rproc *rproc;
147 struct k3_r5_core *core;
148 struct k3_r5_mem *rmem;
149 int num_rmems;
150};
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
167{
168 struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
169 client);
170 struct device *dev = kproc->rproc->dev.parent;
171 const char *name = kproc->rproc->name;
172 u32 msg = omap_mbox_message(data);
173
174 dev_dbg(dev, "mbox msg: 0x%x\n", msg);
175
176 switch (msg) {
177 case RP_MBOX_CRASH:
178
179
180
181
182 dev_err(dev, "K3 R5F rproc %s crashed\n", name);
183 break;
184 case RP_MBOX_ECHO_REPLY:
185 dev_info(dev, "received echo reply from %s\n", name);
186 break;
187 default:
188
189 if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
190 return;
191 if (msg > kproc->rproc->max_notifyid) {
192 dev_dbg(dev, "dropping unknown message 0x%x", msg);
193 return;
194 }
195
196 if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
197 dev_dbg(dev, "no message was found in vqid %d\n", msg);
198 }
199}
200
201
202static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
203{
204 struct k3_r5_rproc *kproc = rproc->priv;
205 struct device *dev = rproc->dev.parent;
206 mbox_msg_t msg = (mbox_msg_t)vqid;
207 int ret;
208
209
210 ret = mbox_send_message(kproc->mbox, (void *)msg);
211 if (ret < 0)
212 dev_err(dev, "failed to send mailbox message, status = %d\n",
213 ret);
214}
215
216static int k3_r5_split_reset(struct k3_r5_core *core)
217{
218 int ret;
219
220 ret = reset_control_assert(core->reset);
221 if (ret) {
222 dev_err(core->dev, "local-reset assert failed, ret = %d\n",
223 ret);
224 return ret;
225 }
226
227 ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
228 core->ti_sci_id);
229 if (ret) {
230 dev_err(core->dev, "module-reset assert failed, ret = %d\n",
231 ret);
232 if (reset_control_deassert(core->reset))
233 dev_warn(core->dev, "local-reset deassert back failed\n");
234 }
235
236 return ret;
237}
238
239static int k3_r5_split_release(struct k3_r5_core *core)
240{
241 int ret;
242
243 ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
244 core->ti_sci_id);
245 if (ret) {
246 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
247 ret);
248 return ret;
249 }
250
251 ret = reset_control_deassert(core->reset);
252 if (ret) {
253 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
254 ret);
255 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
256 core->ti_sci_id))
257 dev_warn(core->dev, "module-reset assert back failed\n");
258 }
259
260 return ret;
261}
262
263static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
264{
265 struct k3_r5_core *core;
266 int ret;
267
268
269 list_for_each_entry(core, &cluster->cores, elem) {
270 ret = reset_control_assert(core->reset);
271 if (ret) {
272 dev_err(core->dev, "local-reset assert failed, ret = %d\n",
273 ret);
274 core = list_prev_entry(core, elem);
275 goto unroll_local_reset;
276 }
277 }
278
279
280 list_for_each_entry(core, &cluster->cores, elem) {
281 ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
282 core->ti_sci_id);
283 if (ret) {
284 dev_err(core->dev, "module-reset assert failed, ret = %d\n",
285 ret);
286 goto unroll_module_reset;
287 }
288 }
289
290 return 0;
291
292unroll_module_reset:
293 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
294 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
295 core->ti_sci_id))
296 dev_warn(core->dev, "module-reset assert back failed\n");
297 }
298 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
299unroll_local_reset:
300 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
301 if (reset_control_deassert(core->reset))
302 dev_warn(core->dev, "local-reset deassert back failed\n");
303 }
304
305 return ret;
306}
307
308static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
309{
310 struct k3_r5_core *core;
311 int ret;
312
313
314 list_for_each_entry_reverse(core, &cluster->cores, elem) {
315 ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
316 core->ti_sci_id);
317 if (ret) {
318 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
319 ret);
320 core = list_next_entry(core, elem);
321 goto unroll_module_reset;
322 }
323 }
324
325
326 list_for_each_entry_reverse(core, &cluster->cores, elem) {
327 ret = reset_control_deassert(core->reset);
328 if (ret) {
329 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
330 ret);
331 goto unroll_local_reset;
332 }
333 }
334
335 return 0;
336
337unroll_local_reset:
338 list_for_each_entry_continue(core, &cluster->cores, elem) {
339 if (reset_control_assert(core->reset))
340 dev_warn(core->dev, "local-reset assert back failed\n");
341 }
342 core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
343unroll_module_reset:
344 list_for_each_entry_from(core, &cluster->cores, elem) {
345 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
346 core->ti_sci_id))
347 dev_warn(core->dev, "module-reset assert back failed\n");
348 }
349
350 return ret;
351}
352
353static inline int k3_r5_core_halt(struct k3_r5_core *core)
354{
355 return ti_sci_proc_set_control(core->tsp,
356 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
357}
358
359static inline int k3_r5_core_run(struct k3_r5_core *core)
360{
361 return ti_sci_proc_set_control(core->tsp,
362 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
363}
364
365
366
367
368
369
370
371
372
373static int k3_r5_rproc_prepare(struct rproc *rproc)
374{
375 struct k3_r5_rproc *kproc = rproc->priv;
376 struct k3_r5_cluster *cluster = kproc->cluster;
377 struct k3_r5_core *core = kproc->core;
378 struct device *dev = kproc->dev;
379 u32 ctrl = 0, cfg = 0, stat = 0;
380 u64 boot_vec = 0;
381 bool mem_init_dis;
382 int ret;
383
384 ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
385 if (ret < 0)
386 return ret;
387 mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
388
389 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
390 k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
391 if (ret) {
392 dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
393 ret);
394 return ret;
395 }
396
397
398
399
400
401
402
403 if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
404 dev_dbg(dev, "leveraging h/w init for TCM memories\n");
405 return 0;
406 }
407
408
409
410
411
412
413 dev_dbg(dev, "zeroing out ATCM memory\n");
414 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
415
416 dev_dbg(dev, "zeroing out BTCM memory\n");
417 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
418
419 return 0;
420}
421
422
423
424
425
426
427
428
429
430
431static int k3_r5_rproc_unprepare(struct rproc *rproc)
432{
433 struct k3_r5_rproc *kproc = rproc->priv;
434 struct k3_r5_cluster *cluster = kproc->cluster;
435 struct k3_r5_core *core = kproc->core;
436 struct device *dev = kproc->dev;
437 int ret;
438
439 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
440 k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
441 if (ret)
442 dev_err(dev, "unable to disable cores, ret = %d\n", ret);
443
444 return ret;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459static int k3_r5_rproc_start(struct rproc *rproc)
460{
461 struct k3_r5_rproc *kproc = rproc->priv;
462 struct k3_r5_cluster *cluster = kproc->cluster;
463 struct mbox_client *client = &kproc->client;
464 struct device *dev = kproc->dev;
465 struct k3_r5_core *core;
466 u32 boot_addr;
467 int ret;
468
469 client->dev = dev;
470 client->tx_done = NULL;
471 client->rx_callback = k3_r5_rproc_mbox_callback;
472 client->tx_block = false;
473 client->knows_txdone = false;
474
475 kproc->mbox = mbox_request_channel(client, 0);
476 if (IS_ERR(kproc->mbox)) {
477 ret = -EBUSY;
478 dev_err(dev, "mbox_request_channel failed: %ld\n",
479 PTR_ERR(kproc->mbox));
480 return ret;
481 }
482
483
484
485
486
487
488
489
490 ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
491 if (ret < 0) {
492 dev_err(dev, "mbox_send_message failed: %d\n", ret);
493 goto put_mbox;
494 }
495
496 boot_addr = rproc->bootaddr;
497
498 dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
499
500
501 core = kproc->core;
502 ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
503 if (ret)
504 goto put_mbox;
505
506
507 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
508 list_for_each_entry_reverse(core, &cluster->cores, elem) {
509 ret = k3_r5_core_run(core);
510 if (ret)
511 goto unroll_core_run;
512 }
513 } else {
514 ret = k3_r5_core_run(core);
515 if (ret)
516 goto put_mbox;
517 }
518
519 return 0;
520
521unroll_core_run:
522 list_for_each_entry_continue(core, &cluster->cores, elem) {
523 if (k3_r5_core_halt(core))
524 dev_warn(core->dev, "core halt back failed\n");
525 }
526put_mbox:
527 mbox_free_channel(kproc->mbox);
528 return ret;
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550static int k3_r5_rproc_stop(struct rproc *rproc)
551{
552 struct k3_r5_rproc *kproc = rproc->priv;
553 struct k3_r5_cluster *cluster = kproc->cluster;
554 struct k3_r5_core *core = kproc->core;
555 int ret;
556
557
558 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
559 list_for_each_entry(core, &cluster->cores, elem) {
560 ret = k3_r5_core_halt(core);
561 if (ret) {
562 core = list_prev_entry(core, elem);
563 goto unroll_core_halt;
564 }
565 }
566 } else {
567 ret = k3_r5_core_halt(core);
568 if (ret)
569 goto out;
570 }
571
572 mbox_free_channel(kproc->mbox);
573
574 return 0;
575
576unroll_core_halt:
577 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
578 if (k3_r5_core_run(core))
579 dev_warn(core->dev, "core run back failed\n");
580 }
581out:
582 return ret;
583}
584
585
586
587
588
589
590
591
592
593static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
594{
595 struct k3_r5_rproc *kproc = rproc->priv;
596 struct k3_r5_core *core = kproc->core;
597 void __iomem *va = NULL;
598 phys_addr_t bus_addr;
599 u32 dev_addr, offset;
600 size_t size;
601 int i;
602
603 if (len == 0)
604 return NULL;
605
606
607 for (i = 0; i < core->num_mems; i++) {
608 bus_addr = core->mem[i].bus_addr;
609 dev_addr = core->mem[i].dev_addr;
610 size = core->mem[i].size;
611
612
613 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
614 offset = da - dev_addr;
615 va = core->mem[i].cpu_addr + offset;
616 return (__force void *)va;
617 }
618
619
620 if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
621 offset = da - bus_addr;
622 va = core->mem[i].cpu_addr + offset;
623 return (__force void *)va;
624 }
625 }
626
627
628 for (i = 0; i < core->num_sram; i++) {
629 dev_addr = core->sram[i].dev_addr;
630 size = core->sram[i].size;
631
632 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
633 offset = da - dev_addr;
634 va = core->sram[i].cpu_addr + offset;
635 return (__force void *)va;
636 }
637 }
638
639
640 for (i = 0; i < kproc->num_rmems; i++) {
641 dev_addr = kproc->rmem[i].dev_addr;
642 size = kproc->rmem[i].size;
643
644 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
645 offset = da - dev_addr;
646 va = kproc->rmem[i].cpu_addr + offset;
647 return (__force void *)va;
648 }
649 }
650
651 return NULL;
652}
653
654static const struct rproc_ops k3_r5_rproc_ops = {
655 .prepare = k3_r5_rproc_prepare,
656 .unprepare = k3_r5_rproc_unprepare,
657 .start = k3_r5_rproc_start,
658 .stop = k3_r5_rproc_stop,
659 .kick = k3_r5_rproc_kick,
660 .da_to_va = k3_r5_rproc_da_to_va,
661};
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
695{
696 struct k3_r5_cluster *cluster = kproc->cluster;
697 struct device *dev = kproc->dev;
698 struct k3_r5_core *core0, *core, *temp;
699 u32 ctrl = 0, cfg = 0, stat = 0;
700 u32 set_cfg = 0, clr_cfg = 0;
701 u64 boot_vec = 0;
702 bool lockstep_en;
703 int ret;
704
705 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
706 core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core;
707
708 ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
709 &stat);
710 if (ret < 0)
711 return ret;
712
713 dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
714 boot_vec, cfg, ctrl, stat);
715
716 lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
717 if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
718 dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
719 cluster->mode = CLUSTER_MODE_SPLIT;
720 }
721
722
723 boot_vec = 0x0;
724 if (core == core0) {
725 clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
726
727
728
729
730
731 if (lockstep_en)
732 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
733 }
734
735 if (core->atcm_enable)
736 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
737 else
738 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
739
740 if (core->btcm_enable)
741 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
742 else
743 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
744
745 if (core->loczrama)
746 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
747 else
748 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
749
750 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
751
752
753
754
755
756 list_for_each_entry(temp, &cluster->cores, elem) {
757 ret = k3_r5_core_halt(temp);
758 if (ret)
759 goto out;
760
761 if (temp != core) {
762 clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
763 clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
764 }
765 ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
766 set_cfg, clr_cfg);
767 if (ret)
768 goto out;
769 }
770
771 set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
772 clr_cfg = 0;
773 ret = ti_sci_proc_set_config(core->tsp, boot_vec,
774 set_cfg, clr_cfg);
775 } else {
776 ret = k3_r5_core_halt(core);
777 if (ret)
778 goto out;
779
780 ret = ti_sci_proc_set_config(core->tsp, boot_vec,
781 set_cfg, clr_cfg);
782 }
783
784out:
785 return ret;
786}
787
788static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
789{
790 struct device *dev = kproc->dev;
791 struct device_node *np = dev_of_node(dev);
792 struct device_node *rmem_np;
793 struct reserved_mem *rmem;
794 int num_rmems;
795 int ret, i;
796
797 num_rmems = of_property_count_elems_of_size(np, "memory-region",
798 sizeof(phandle));
799 if (num_rmems <= 0) {
800 dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
801 num_rmems);
802 return -EINVAL;
803 }
804 if (num_rmems < 2) {
805 dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
806 num_rmems);
807 return -EINVAL;
808 }
809
810
811 ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
812 if (ret) {
813 dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
814 ret);
815 return ret;
816 }
817
818 num_rmems--;
819 kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
820 if (!kproc->rmem) {
821 ret = -ENOMEM;
822 goto release_rmem;
823 }
824
825
826 for (i = 0; i < num_rmems; i++) {
827 rmem_np = of_parse_phandle(np, "memory-region", i + 1);
828 if (!rmem_np) {
829 ret = -EINVAL;
830 goto unmap_rmem;
831 }
832
833 rmem = of_reserved_mem_lookup(rmem_np);
834 if (!rmem) {
835 of_node_put(rmem_np);
836 ret = -EINVAL;
837 goto unmap_rmem;
838 }
839 of_node_put(rmem_np);
840
841 kproc->rmem[i].bus_addr = rmem->base;
842
843
844
845
846
847
848
849
850
851
852 kproc->rmem[i].dev_addr = (u32)rmem->base;
853 kproc->rmem[i].size = rmem->size;
854 kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
855 if (!kproc->rmem[i].cpu_addr) {
856 dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
857 i + 1, &rmem->base, &rmem->size);
858 ret = -ENOMEM;
859 goto unmap_rmem;
860 }
861
862 dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
863 i + 1, &kproc->rmem[i].bus_addr,
864 kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
865 kproc->rmem[i].dev_addr);
866 }
867 kproc->num_rmems = num_rmems;
868
869 return 0;
870
871unmap_rmem:
872 for (i--; i >= 0; i--)
873 iounmap(kproc->rmem[i].cpu_addr);
874 kfree(kproc->rmem);
875release_rmem:
876 of_reserved_mem_device_release(dev);
877 return ret;
878}
879
880static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
881{
882 int i;
883
884 for (i = 0; i < kproc->num_rmems; i++)
885 iounmap(kproc->rmem[i].cpu_addr);
886 kfree(kproc->rmem);
887
888 of_reserved_mem_device_release(kproc->dev);
889}
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
905{
906 struct k3_r5_cluster *cluster = kproc->cluster;
907 struct k3_r5_core *core = kproc->core;
908 struct device *cdev = core->dev;
909 struct k3_r5_core *core0;
910
911 if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
912 !cluster->soc_data->tcm_is_double)
913 return;
914
915 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
916 if (core == core0) {
917 WARN_ON(core->mem[0].size != SZ_64K);
918 WARN_ON(core->mem[1].size != SZ_64K);
919
920 core->mem[0].size /= 2;
921 core->mem[1].size /= 2;
922
923 dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
924 core->mem[0].size, core->mem[1].size);
925 }
926}
927
928static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
929{
930 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
931 struct device *dev = &pdev->dev;
932 struct k3_r5_rproc *kproc;
933 struct k3_r5_core *core, *core1;
934 struct device *cdev;
935 const char *fw_name;
936 struct rproc *rproc;
937 int ret;
938
939 core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
940 list_for_each_entry(core, &cluster->cores, elem) {
941 cdev = core->dev;
942 ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
943 if (ret) {
944 dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
945 ret);
946 goto out;
947 }
948
949 rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
950 fw_name, sizeof(*kproc));
951 if (!rproc) {
952 ret = -ENOMEM;
953 goto out;
954 }
955
956
957 rproc->has_iommu = false;
958
959 rproc->recovery_disabled = true;
960
961 kproc = rproc->priv;
962 kproc->cluster = cluster;
963 kproc->core = core;
964 kproc->dev = cdev;
965 kproc->rproc = rproc;
966 core->rproc = rproc;
967
968 ret = k3_r5_rproc_configure(kproc);
969 if (ret) {
970 dev_err(dev, "initial configure failed, ret = %d\n",
971 ret);
972 goto err_config;
973 }
974
975 k3_r5_adjust_tcm_sizes(kproc);
976
977 ret = k3_r5_reserved_mem_init(kproc);
978 if (ret) {
979 dev_err(dev, "reserved memory init failed, ret = %d\n",
980 ret);
981 goto err_config;
982 }
983
984 ret = rproc_add(rproc);
985 if (ret) {
986 dev_err(dev, "rproc_add failed, ret = %d\n", ret);
987 goto err_add;
988 }
989
990
991 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
992 break;
993 }
994
995 return 0;
996
997err_split:
998 rproc_del(rproc);
999err_add:
1000 k3_r5_reserved_mem_exit(kproc);
1001err_config:
1002 rproc_free(rproc);
1003 core->rproc = NULL;
1004out:
1005
1006 if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
1007 core = list_prev_entry(core, elem);
1008 rproc = core->rproc;
1009 kproc = rproc->priv;
1010 goto err_split;
1011 }
1012 return ret;
1013}
1014
1015static void k3_r5_cluster_rproc_exit(void *data)
1016{
1017 struct k3_r5_cluster *cluster = platform_get_drvdata(data);
1018 struct k3_r5_rproc *kproc;
1019 struct k3_r5_core *core;
1020 struct rproc *rproc;
1021
1022
1023
1024
1025
1026
1027 core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
1028 list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
1029 list_last_entry(&cluster->cores, struct k3_r5_core, elem);
1030
1031 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
1032 rproc = core->rproc;
1033 kproc = rproc->priv;
1034
1035 rproc_del(rproc);
1036
1037 k3_r5_reserved_mem_exit(kproc);
1038
1039 rproc_free(rproc);
1040 core->rproc = NULL;
1041 }
1042}
1043
1044static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
1045 struct k3_r5_core *core)
1046{
1047 static const char * const mem_names[] = {"atcm", "btcm"};
1048 struct device *dev = &pdev->dev;
1049 struct resource *res;
1050 int num_mems;
1051 int i;
1052
1053 num_mems = ARRAY_SIZE(mem_names);
1054 core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
1055 if (!core->mem)
1056 return -ENOMEM;
1057
1058 for (i = 0; i < num_mems; i++) {
1059 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1060 mem_names[i]);
1061 if (!res) {
1062 dev_err(dev, "found no memory resource for %s\n",
1063 mem_names[i]);
1064 return -EINVAL;
1065 }
1066 if (!devm_request_mem_region(dev, res->start,
1067 resource_size(res),
1068 dev_name(dev))) {
1069 dev_err(dev, "could not request %s region for resource\n",
1070 mem_names[i]);
1071 return -EBUSY;
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081 core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
1082 resource_size(res));
1083 if (!core->mem[i].cpu_addr) {
1084 dev_err(dev, "failed to map %s memory\n", mem_names[i]);
1085 return -ENOMEM;
1086 }
1087 core->mem[i].bus_addr = res->start;
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 if (!strcmp(mem_names[i], "atcm")) {
1098 core->mem[i].dev_addr = core->loczrama ?
1099 0 : K3_R5_TCM_DEV_ADDR;
1100 } else {
1101 core->mem[i].dev_addr = core->loczrama ?
1102 K3_R5_TCM_DEV_ADDR : 0;
1103 }
1104 core->mem[i].size = resource_size(res);
1105
1106 dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1107 mem_names[i], &core->mem[i].bus_addr,
1108 core->mem[i].size, core->mem[i].cpu_addr,
1109 core->mem[i].dev_addr);
1110 }
1111 core->num_mems = num_mems;
1112
1113 return 0;
1114}
1115
1116static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
1117 struct k3_r5_core *core)
1118{
1119 struct device_node *np = pdev->dev.of_node;
1120 struct device *dev = &pdev->dev;
1121 struct device_node *sram_np;
1122 struct resource res;
1123 int num_sram;
1124 int i, ret;
1125
1126 num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
1127 if (num_sram <= 0) {
1128 dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
1129 num_sram);
1130 return 0;
1131 }
1132
1133 core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
1134 if (!core->sram)
1135 return -ENOMEM;
1136
1137 for (i = 0; i < num_sram; i++) {
1138 sram_np = of_parse_phandle(np, "sram", i);
1139 if (!sram_np)
1140 return -EINVAL;
1141
1142 if (!of_device_is_available(sram_np)) {
1143 of_node_put(sram_np);
1144 return -EINVAL;
1145 }
1146
1147 ret = of_address_to_resource(sram_np, 0, &res);
1148 of_node_put(sram_np);
1149 if (ret)
1150 return -EINVAL;
1151
1152 core->sram[i].bus_addr = res.start;
1153 core->sram[i].dev_addr = res.start;
1154 core->sram[i].size = resource_size(&res);
1155 core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
1156 resource_size(&res));
1157 if (!core->sram[i].cpu_addr) {
1158 dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
1159 i, &res.start);
1160 return -ENOMEM;
1161 }
1162
1163 dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1164 i, &core->sram[i].bus_addr,
1165 core->sram[i].size, core->sram[i].cpu_addr,
1166 core->sram[i].dev_addr);
1167 }
1168 core->num_sram = num_sram;
1169
1170 return 0;
1171}
1172
1173static
1174struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
1175 const struct ti_sci_handle *sci)
1176{
1177 struct ti_sci_proc *tsp;
1178 u32 temp[2];
1179 int ret;
1180
1181 ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
1182 temp, 2);
1183 if (ret < 0)
1184 return ERR_PTR(ret);
1185
1186 tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
1187 if (!tsp)
1188 return ERR_PTR(-ENOMEM);
1189
1190 tsp->dev = dev;
1191 tsp->sci = sci;
1192 tsp->ops = &sci->ops.proc_ops;
1193 tsp->proc_id = temp[0];
1194 tsp->host_id = temp[1];
1195
1196 return tsp;
1197}
1198
1199static int k3_r5_core_of_init(struct platform_device *pdev)
1200{
1201 struct device *dev = &pdev->dev;
1202 struct device_node *np = dev_of_node(dev);
1203 struct k3_r5_core *core;
1204 int ret;
1205
1206 if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
1207 return -ENOMEM;
1208
1209 core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
1210 if (!core) {
1211 ret = -ENOMEM;
1212 goto err;
1213 }
1214
1215 core->dev = dev;
1216
1217
1218
1219
1220 core->atcm_enable = 0;
1221 core->btcm_enable = 1;
1222 core->loczrama = 1;
1223
1224 ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
1225 if (ret < 0 && ret != -EINVAL) {
1226 dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
1227 ret);
1228 goto err;
1229 }
1230
1231 ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
1232 if (ret < 0 && ret != -EINVAL) {
1233 dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
1234 ret);
1235 goto err;
1236 }
1237
1238 ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
1239 if (ret < 0 && ret != -EINVAL) {
1240 dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
1241 goto err;
1242 }
1243
1244 core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
1245 if (IS_ERR(core->ti_sci)) {
1246 ret = PTR_ERR(core->ti_sci);
1247 if (ret != -EPROBE_DEFER) {
1248 dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
1249 ret);
1250 }
1251 core->ti_sci = NULL;
1252 goto err;
1253 }
1254
1255 ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
1256 if (ret) {
1257 dev_err(dev, "missing 'ti,sci-dev-id' property\n");
1258 goto err;
1259 }
1260
1261 core->reset = devm_reset_control_get_exclusive(dev, NULL);
1262 if (IS_ERR_OR_NULL(core->reset)) {
1263 ret = PTR_ERR_OR_ZERO(core->reset);
1264 if (!ret)
1265 ret = -ENODEV;
1266 if (ret != -EPROBE_DEFER) {
1267 dev_err(dev, "failed to get reset handle, ret = %d\n",
1268 ret);
1269 }
1270 goto err;
1271 }
1272
1273 core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
1274 if (IS_ERR(core->tsp)) {
1275 dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
1276 ret);
1277 ret = PTR_ERR(core->tsp);
1278 goto err;
1279 }
1280
1281 ret = k3_r5_core_of_get_internal_memories(pdev, core);
1282 if (ret) {
1283 dev_err(dev, "failed to get internal memories, ret = %d\n",
1284 ret);
1285 goto err;
1286 }
1287
1288 ret = k3_r5_core_of_get_sram_memories(pdev, core);
1289 if (ret) {
1290 dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
1291 goto err;
1292 }
1293
1294 ret = ti_sci_proc_request(core->tsp);
1295 if (ret < 0) {
1296 dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
1297 goto err;
1298 }
1299
1300 platform_set_drvdata(pdev, core);
1301 devres_close_group(dev, k3_r5_core_of_init);
1302
1303 return 0;
1304
1305err:
1306 devres_release_group(dev, k3_r5_core_of_init);
1307 return ret;
1308}
1309
1310
1311
1312
1313
1314static void k3_r5_core_of_exit(struct platform_device *pdev)
1315{
1316 struct k3_r5_core *core = platform_get_drvdata(pdev);
1317 struct device *dev = &pdev->dev;
1318 int ret;
1319
1320 ret = ti_sci_proc_release(core->tsp);
1321 if (ret)
1322 dev_err(dev, "failed to release proc, ret = %d\n", ret);
1323
1324 platform_set_drvdata(pdev, NULL);
1325 devres_release_group(dev, k3_r5_core_of_init);
1326}
1327
1328static void k3_r5_cluster_of_exit(void *data)
1329{
1330 struct k3_r5_cluster *cluster = platform_get_drvdata(data);
1331 struct platform_device *cpdev;
1332 struct k3_r5_core *core, *temp;
1333
1334 list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
1335 list_del(&core->elem);
1336 cpdev = to_platform_device(core->dev);
1337 k3_r5_core_of_exit(cpdev);
1338 }
1339}
1340
1341static int k3_r5_cluster_of_init(struct platform_device *pdev)
1342{
1343 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1344 struct device *dev = &pdev->dev;
1345 struct device_node *np = dev_of_node(dev);
1346 struct platform_device *cpdev;
1347 struct device_node *child;
1348 struct k3_r5_core *core;
1349 int ret;
1350
1351 for_each_available_child_of_node(np, child) {
1352 cpdev = of_find_device_by_node(child);
1353 if (!cpdev) {
1354 ret = -ENODEV;
1355 dev_err(dev, "could not get R5 core platform device\n");
1356 goto fail;
1357 }
1358
1359 ret = k3_r5_core_of_init(cpdev);
1360 if (ret) {
1361 dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
1362 ret);
1363 put_device(&cpdev->dev);
1364 goto fail;
1365 }
1366
1367 core = platform_get_drvdata(cpdev);
1368 put_device(&cpdev->dev);
1369 list_add_tail(&core->elem, &cluster->cores);
1370 }
1371
1372 return 0;
1373
1374fail:
1375 k3_r5_cluster_of_exit(pdev);
1376 return ret;
1377}
1378
1379static int k3_r5_probe(struct platform_device *pdev)
1380{
1381 struct device *dev = &pdev->dev;
1382 struct device_node *np = dev_of_node(dev);
1383 struct k3_r5_cluster *cluster;
1384 const struct k3_r5_soc_data *data;
1385 int ret;
1386 int num_cores;
1387
1388 data = of_device_get_match_data(&pdev->dev);
1389 if (!data) {
1390 dev_err(dev, "SoC-specific data is not defined\n");
1391 return -ENODEV;
1392 }
1393
1394 cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
1395 if (!cluster)
1396 return -ENOMEM;
1397
1398 cluster->dev = dev;
1399 cluster->mode = CLUSTER_MODE_LOCKSTEP;
1400 cluster->soc_data = data;
1401 INIT_LIST_HEAD(&cluster->cores);
1402
1403 ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
1404 if (ret < 0 && ret != -EINVAL) {
1405 dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
1406 ret);
1407 return ret;
1408 }
1409
1410 num_cores = of_get_available_child_count(np);
1411 if (num_cores != 2) {
1412 dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
1413 num_cores);
1414 return -ENODEV;
1415 }
1416
1417 platform_set_drvdata(pdev, cluster);
1418
1419 ret = devm_of_platform_populate(dev);
1420 if (ret) {
1421 dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
1422 ret);
1423 return ret;
1424 }
1425
1426 ret = k3_r5_cluster_of_init(pdev);
1427 if (ret) {
1428 dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
1429 return ret;
1430 }
1431
1432 ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
1433 if (ret)
1434 return ret;
1435
1436 ret = k3_r5_cluster_rproc_init(pdev);
1437 if (ret) {
1438 dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
1439 ret);
1440 return ret;
1441 }
1442
1443 ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
1444 if (ret)
1445 return ret;
1446
1447 return 0;
1448}
1449
1450static const struct k3_r5_soc_data am65_j721e_soc_data = {
1451 .tcm_is_double = false,
1452 .tcm_ecc_autoinit = false,
1453};
1454
1455static const struct k3_r5_soc_data j7200_soc_data = {
1456 .tcm_is_double = true,
1457 .tcm_ecc_autoinit = true,
1458};
1459
1460static const struct of_device_id k3_r5_of_match[] = {
1461 { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
1462 { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
1463 { .compatible = "ti,j7200-r5fss", .data = &j7200_soc_data, },
1464 { },
1465};
1466MODULE_DEVICE_TABLE(of, k3_r5_of_match);
1467
1468static struct platform_driver k3_r5_rproc_driver = {
1469 .probe = k3_r5_probe,
1470 .driver = {
1471 .name = "k3_r5_rproc",
1472 .of_match_table = k3_r5_of_match,
1473 },
1474};
1475
1476module_platform_driver(k3_r5_rproc_driver);
1477
1478MODULE_LICENSE("GPL v2");
1479MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
1480MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
1481