1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#define pr_fmt(fmt) "%s: " fmt, __func__
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/device.h>
30#include <linux/slab.h>
31#include <linux/mutex.h>
32#include <linux/dma-mapping.h>
33#include <linux/firmware.h>
34#include <linux/string.h>
35#include <linux/debugfs.h>
36#include <linux/remoteproc.h>
37#include <linux/iommu.h>
38#include <linux/idr.h>
39#include <linux/elf.h>
40#include <linux/crc32.h>
41#include <linux/virtio_ids.h>
42#include <linux/virtio_ring.h>
43#include <asm/byteorder.h>
44
45#include "remoteproc_internal.h"
46
47typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
48 struct resource_table *table, int len);
49typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
50 void *, int offset, int avail);
51
52
53static DEFINE_IDA(rproc_dev_index);
54
55static const char * const rproc_crash_names[] = {
56 [RPROC_MMUFAULT] = "mmufault",
57};
58
59
60static const char *rproc_crash_to_string(enum rproc_crash_type type)
61{
62 if (type < ARRAY_SIZE(rproc_crash_names))
63 return rproc_crash_names[type];
64 return "unknown";
65}
66
67
68
69
70
71
72
73
74
75static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
76 unsigned long iova, int flags, void *token)
77{
78 struct rproc *rproc = token;
79
80 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
81
82 rproc_report_crash(rproc, RPROC_MMUFAULT);
83
84
85
86
87
88 return -ENOSYS;
89}
90
91static int rproc_enable_iommu(struct rproc *rproc)
92{
93 struct iommu_domain *domain;
94 struct device *dev = rproc->dev.parent;
95 int ret;
96
97
98
99
100
101
102
103
104
105
106
107
108 if (!iommu_present(dev->bus)) {
109 dev_dbg(dev, "iommu not found\n");
110 return 0;
111 }
112
113 domain = iommu_domain_alloc(dev->bus);
114 if (!domain) {
115 dev_err(dev, "can't alloc iommu domain\n");
116 return -ENOMEM;
117 }
118
119 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
120
121 ret = iommu_attach_device(domain, dev);
122 if (ret) {
123 dev_err(dev, "can't attach iommu device: %d\n", ret);
124 goto free_domain;
125 }
126
127 rproc->domain = domain;
128
129 return 0;
130
131free_domain:
132 iommu_domain_free(domain);
133 return ret;
134}
135
136static void rproc_disable_iommu(struct rproc *rproc)
137{
138 struct iommu_domain *domain = rproc->domain;
139 struct device *dev = rproc->dev.parent;
140
141 if (!domain)
142 return;
143
144 iommu_detach_device(domain, dev);
145 iommu_domain_free(domain);
146
147 return;
148}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
168{
169 struct rproc_mem_entry *carveout;
170 void *ptr = NULL;
171
172 list_for_each_entry(carveout, &rproc->carveouts, node) {
173 int offset = da - carveout->da;
174
175
176 if (offset < 0)
177 continue;
178
179
180 if (offset + len > carveout->len)
181 continue;
182
183 ptr = carveout->va + offset;
184
185 break;
186 }
187
188 return ptr;
189}
190EXPORT_SYMBOL(rproc_da_to_va);
191
192int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
193{
194 struct rproc *rproc = rvdev->rproc;
195 struct device *dev = &rproc->dev;
196 struct rproc_vring *rvring = &rvdev->vring[i];
197 struct fw_rsc_vdev *rsc;
198 dma_addr_t dma;
199 void *va;
200 int ret, size, notifyid;
201
202
203 size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
204
205
206
207
208
209 va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
210 if (!va) {
211 dev_err(dev->parent, "dma_alloc_coherent failed\n");
212 return -EINVAL;
213 }
214
215
216
217
218
219
220 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
221 if (ret < 0) {
222 dev_err(dev, "idr_alloc failed: %d\n", ret);
223 dma_free_coherent(dev->parent, size, va, dma);
224 return ret;
225 }
226 notifyid = ret;
227
228 dev_dbg(dev, "vring%d: va %p dma %llx size %x idr %d\n", i, va,
229 (unsigned long long)dma, size, notifyid);
230
231 rvring->va = va;
232 rvring->dma = dma;
233 rvring->notifyid = notifyid;
234
235
236
237
238
239
240
241 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
242 rsc->vring[i].da = dma;
243 rsc->vring[i].notifyid = notifyid;
244 return 0;
245}
246
247static int
248rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
249{
250 struct rproc *rproc = rvdev->rproc;
251 struct device *dev = &rproc->dev;
252 struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
253 struct rproc_vring *rvring = &rvdev->vring[i];
254
255 dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
256 i, vring->da, vring->num, vring->align);
257
258
259 if (vring->reserved) {
260 dev_err(dev, "vring rsc has non zero reserved bytes\n");
261 return -EINVAL;
262 }
263
264
265 if (!vring->num || !vring->align) {
266 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
267 vring->num, vring->align);
268 return -EINVAL;
269 }
270
271 rvring->len = vring->num;
272 rvring->align = vring->align;
273 rvring->rvdev = rvdev;
274
275 return 0;
276}
277
278void rproc_free_vring(struct rproc_vring *rvring)
279{
280 int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
281 struct rproc *rproc = rvring->rvdev->rproc;
282 int idx = rvring->rvdev->vring - rvring;
283 struct fw_rsc_vdev *rsc;
284
285 dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
286 idr_remove(&rproc->notifyids, rvring->notifyid);
287
288
289 rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
290 rsc->vring[idx].da = 0;
291 rsc->vring[idx].notifyid = -1;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
322 int offset, int avail)
323{
324 struct device *dev = &rproc->dev;
325 struct rproc_vdev *rvdev;
326 int i, ret;
327
328
329 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
330 + rsc->config_len > avail) {
331 dev_err(dev, "vdev rsc is truncated\n");
332 return -EINVAL;
333 }
334
335
336 if (rsc->reserved[0] || rsc->reserved[1]) {
337 dev_err(dev, "vdev rsc has non zero reserved bytes\n");
338 return -EINVAL;
339 }
340
341 dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
342 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
343
344
345 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
346 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
347 return -EINVAL;
348 }
349
350 rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL);
351 if (!rvdev)
352 return -ENOMEM;
353
354 rvdev->rproc = rproc;
355
356
357 for (i = 0; i < rsc->num_of_vrings; i++) {
358 ret = rproc_parse_vring(rvdev, rsc, i);
359 if (ret)
360 goto free_rvdev;
361 }
362
363
364 rvdev->rsc_offset = offset;
365
366 list_add_tail(&rvdev->node, &rproc->rvdevs);
367
368
369 ret = rproc_add_virtio_dev(rvdev, rsc->id);
370 if (ret)
371 goto remove_rvdev;
372
373 return 0;
374
375remove_rvdev:
376 list_del(&rvdev->node);
377free_rvdev:
378 kfree(rvdev);
379 return ret;
380}
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
399 int offset, int avail)
400{
401 struct rproc_mem_entry *trace;
402 struct device *dev = &rproc->dev;
403 void *ptr;
404 char name[15];
405
406 if (sizeof(*rsc) > avail) {
407 dev_err(dev, "trace rsc is truncated\n");
408 return -EINVAL;
409 }
410
411
412 if (rsc->reserved) {
413 dev_err(dev, "trace rsc has non zero reserved bytes\n");
414 return -EINVAL;
415 }
416
417
418 ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
419 if (!ptr) {
420 dev_err(dev, "erroneous trace resource entry\n");
421 return -EINVAL;
422 }
423
424 trace = kzalloc(sizeof(*trace), GFP_KERNEL);
425 if (!trace) {
426 dev_err(dev, "kzalloc trace failed\n");
427 return -ENOMEM;
428 }
429
430
431 trace->len = rsc->len;
432 trace->va = ptr;
433
434
435 snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
436
437
438 trace->priv = rproc_create_trace_file(name, rproc, trace);
439 if (!trace->priv) {
440 trace->va = NULL;
441 kfree(trace);
442 return -EINVAL;
443 }
444
445 list_add_tail(&trace->node, &rproc->traces);
446
447 rproc->num_traces++;
448
449 dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr,
450 rsc->da, rsc->len);
451
452 return 0;
453}
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
481 int offset, int avail)
482{
483 struct rproc_mem_entry *mapping;
484 struct device *dev = &rproc->dev;
485 int ret;
486
487
488 if (!rproc->domain)
489 return -EINVAL;
490
491 if (sizeof(*rsc) > avail) {
492 dev_err(dev, "devmem rsc is truncated\n");
493 return -EINVAL;
494 }
495
496
497 if (rsc->reserved) {
498 dev_err(dev, "devmem rsc has non zero reserved bytes\n");
499 return -EINVAL;
500 }
501
502 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
503 if (!mapping) {
504 dev_err(dev, "kzalloc mapping failed\n");
505 return -ENOMEM;
506 }
507
508 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
509 if (ret) {
510 dev_err(dev, "failed to map devmem: %d\n", ret);
511 goto out;
512 }
513
514
515
516
517
518
519
520
521 mapping->da = rsc->da;
522 mapping->len = rsc->len;
523 list_add_tail(&mapping->node, &rproc->mappings);
524
525 dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
526 rsc->pa, rsc->da, rsc->len);
527
528 return 0;
529
530out:
531 kfree(mapping);
532 return ret;
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553static int rproc_handle_carveout(struct rproc *rproc,
554 struct fw_rsc_carveout *rsc,
555 int offset, int avail)
556
557{
558 struct rproc_mem_entry *carveout, *mapping;
559 struct device *dev = &rproc->dev;
560 dma_addr_t dma;
561 void *va;
562 int ret;
563
564 if (sizeof(*rsc) > avail) {
565 dev_err(dev, "carveout rsc is truncated\n");
566 return -EINVAL;
567 }
568
569
570 if (rsc->reserved) {
571 dev_err(dev, "carveout rsc has non zero reserved bytes\n");
572 return -EINVAL;
573 }
574
575 dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n",
576 rsc->da, rsc->pa, rsc->len, rsc->flags);
577
578 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
579 if (!carveout) {
580 dev_err(dev, "kzalloc carveout failed\n");
581 return -ENOMEM;
582 }
583
584 va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
585 if (!va) {
586 dev_err(dev->parent, "dma_alloc_coherent err: %d\n", rsc->len);
587 ret = -ENOMEM;
588 goto free_carv;
589 }
590
591 dev_dbg(dev, "carveout va %p, dma %llx, len 0x%x\n", va,
592 (unsigned long long)dma, rsc->len);
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611 if (rproc->domain) {
612 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
613 if (!mapping) {
614 dev_err(dev, "kzalloc mapping failed\n");
615 ret = -ENOMEM;
616 goto dma_free;
617 }
618
619 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
620 rsc->flags);
621 if (ret) {
622 dev_err(dev, "iommu_map failed: %d\n", ret);
623 goto free_mapping;
624 }
625
626
627
628
629
630
631
632
633 mapping->da = rsc->da;
634 mapping->len = rsc->len;
635 list_add_tail(&mapping->node, &rproc->mappings);
636
637 dev_dbg(dev, "carveout mapped 0x%x to 0x%llx\n",
638 rsc->da, (unsigned long long)dma);
639 }
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658 rsc->pa = dma;
659
660 carveout->va = va;
661 carveout->len = rsc->len;
662 carveout->dma = dma;
663 carveout->da = rsc->da;
664
665 list_add_tail(&carveout->node, &rproc->carveouts);
666
667 return 0;
668
669free_mapping:
670 kfree(mapping);
671dma_free:
672 dma_free_coherent(dev->parent, rsc->len, va, dma);
673free_carv:
674 kfree(carveout);
675 return ret;
676}
677
678static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc,
679 int offset, int avail)
680{
681
682 rproc->max_notifyid += rsc->num_of_vrings;
683
684 return 0;
685}
686
687
688
689
690
691static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
692 [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
693 [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
694 [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
695 [RSC_VDEV] = NULL,
696};
697
698static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = {
699 [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
700};
701
702static rproc_handle_resource_t rproc_count_vrings_handler[RSC_LAST] = {
703 [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
704};
705
706
707static int rproc_handle_resources(struct rproc *rproc, int len,
708 rproc_handle_resource_t handlers[RSC_LAST])
709{
710 struct device *dev = &rproc->dev;
711 rproc_handle_resource_t handler;
712 int ret = 0, i;
713
714 for (i = 0; i < rproc->table_ptr->num; i++) {
715 int offset = rproc->table_ptr->offset[i];
716 struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
717 int avail = len - offset - sizeof(*hdr);
718 void *rsc = (void *)hdr + sizeof(*hdr);
719
720
721 if (avail < 0) {
722 dev_err(dev, "rsc table is truncated\n");
723 return -EINVAL;
724 }
725
726 dev_dbg(dev, "rsc: type %d\n", hdr->type);
727
728 if (hdr->type >= RSC_LAST) {
729 dev_warn(dev, "unsupported resource %d\n", hdr->type);
730 continue;
731 }
732
733 handler = handlers[hdr->type];
734 if (!handler)
735 continue;
736
737 ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
738 if (ret)
739 break;
740 }
741
742 return ret;
743}
744
745
746
747
748
749
750
751
752static void rproc_resource_cleanup(struct rproc *rproc)
753{
754 struct rproc_mem_entry *entry, *tmp;
755 struct device *dev = &rproc->dev;
756
757
758 list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
759 rproc_remove_trace_file(entry->priv);
760 rproc->num_traces--;
761 list_del(&entry->node);
762 kfree(entry);
763 }
764
765
766 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
767 dma_free_coherent(dev->parent, entry->len, entry->va, entry->dma);
768 list_del(&entry->node);
769 kfree(entry);
770 }
771
772
773 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
774 size_t unmapped;
775
776 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
777 if (unmapped != entry->len) {
778
779 dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
780 unmapped);
781 }
782
783 list_del(&entry->node);
784 kfree(entry);
785 }
786}
787
788
789
790
791static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
792{
793 struct device *dev = &rproc->dev;
794 const char *name = rproc->firmware;
795 struct resource_table *table, *loaded_table;
796 int ret, tablesz;
797
798 if (!rproc->table_ptr)
799 return -ENOMEM;
800
801 ret = rproc_fw_sanity_check(rproc, fw);
802 if (ret)
803 return ret;
804
805 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
806
807
808
809
810
811 ret = rproc_enable_iommu(rproc);
812 if (ret) {
813 dev_err(dev, "can't enable iommu: %d\n", ret);
814 return ret;
815 }
816
817 rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
818
819
820 table = rproc_find_rsc_table(rproc, fw, &tablesz);
821 if (!table) {
822 ret = -EINVAL;
823 goto clean_up;
824 }
825
826
827 if (rproc->table_csum != crc32(0, table, tablesz)) {
828 dev_err(dev, "resource checksum failed, fw changed?\n");
829 ret = -EINVAL;
830 goto clean_up;
831 }
832
833
834 ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers);
835 if (ret) {
836 dev_err(dev, "Failed to process resources: %d\n", ret);
837 goto clean_up;
838 }
839
840
841 ret = rproc_load_segments(rproc, fw);
842 if (ret) {
843 dev_err(dev, "Failed to load program segments: %d\n", ret);
844 goto clean_up;
845 }
846
847
848
849
850
851
852
853
854 loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
855 if (!loaded_table)
856 goto clean_up;
857
858 memcpy(loaded_table, rproc->cached_table, tablesz);
859
860
861 ret = rproc->ops->start(rproc);
862 if (ret) {
863 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
864 goto clean_up;
865 }
866
867
868
869
870
871
872 rproc->table_ptr = loaded_table;
873
874 rproc->state = RPROC_RUNNING;
875
876 dev_info(dev, "remote processor %s is now up\n", rproc->name);
877
878 return 0;
879
880clean_up:
881 rproc_resource_cleanup(rproc);
882 rproc_disable_iommu(rproc);
883 return ret;
884}
885
886
887
888
889
890
891
892
893
894static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
895{
896 struct rproc *rproc = context;
897 struct resource_table *table;
898 int ret, tablesz;
899
900 if (rproc_fw_sanity_check(rproc, fw) < 0)
901 goto out;
902
903
904 table = rproc_find_rsc_table(rproc, fw, &tablesz);
905 if (!table)
906 goto out;
907
908 rproc->table_csum = crc32(0, table, tablesz);
909
910
911
912
913
914
915
916 rproc->cached_table = kmalloc(tablesz, GFP_KERNEL);
917 if (!rproc->cached_table)
918 goto out;
919
920 memcpy(rproc->cached_table, table, tablesz);
921 rproc->table_ptr = rproc->cached_table;
922
923
924 rproc->max_notifyid = -1;
925 ret = rproc_handle_resources(rproc, tablesz, rproc_count_vrings_handler);
926 if (ret)
927 goto out;
928
929
930 ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
931
932out:
933 release_firmware(fw);
934
935 complete_all(&rproc->firmware_loading_complete);
936}
937
938static int rproc_add_virtio_devices(struct rproc *rproc)
939{
940 int ret;
941
942
943 init_completion(&rproc->firmware_loading_complete);
944
945
946
947
948
949
950
951
952
953 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
954 rproc->firmware, &rproc->dev, GFP_KERNEL,
955 rproc, rproc_fw_config_virtio);
956 if (ret < 0) {
957 dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
958 complete_all(&rproc->firmware_loading_complete);
959 }
960
961 return ret;
962}
963
964
965
966
967
968
969
970
971
972
973
974int rproc_trigger_recovery(struct rproc *rproc)
975{
976 struct rproc_vdev *rvdev, *rvtmp;
977
978 dev_err(&rproc->dev, "recovering %s\n", rproc->name);
979
980 init_completion(&rproc->crash_comp);
981
982
983 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
984 rproc_remove_virtio_dev(rvdev);
985
986
987 wait_for_completion(&rproc->crash_comp);
988
989
990 kfree(rproc->cached_table);
991
992 return rproc_add_virtio_devices(rproc);
993}
994
995
996
997
998
999
1000
1001static void rproc_crash_handler_work(struct work_struct *work)
1002{
1003 struct rproc *rproc = container_of(work, struct rproc, crash_handler);
1004 struct device *dev = &rproc->dev;
1005
1006 dev_dbg(dev, "enter %s\n", __func__);
1007
1008 mutex_lock(&rproc->lock);
1009
1010 if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) {
1011
1012 mutex_unlock(&rproc->lock);
1013 return;
1014 }
1015
1016 rproc->state = RPROC_CRASHED;
1017 dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
1018 rproc->name);
1019
1020 mutex_unlock(&rproc->lock);
1021
1022 if (!rproc->recovery_disabled)
1023 rproc_trigger_recovery(rproc);
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037int rproc_boot(struct rproc *rproc)
1038{
1039 const struct firmware *firmware_p;
1040 struct device *dev;
1041 int ret;
1042
1043 if (!rproc) {
1044 pr_err("invalid rproc handle\n");
1045 return -EINVAL;
1046 }
1047
1048 dev = &rproc->dev;
1049
1050 ret = mutex_lock_interruptible(&rproc->lock);
1051 if (ret) {
1052 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1053 return ret;
1054 }
1055
1056
1057 if (!rproc->firmware) {
1058 dev_err(dev, "%s: no firmware to load\n", __func__);
1059 ret = -EINVAL;
1060 goto unlock_mutex;
1061 }
1062
1063
1064 if (!try_module_get(dev->parent->driver->owner)) {
1065 dev_err(dev, "%s: can't get owner\n", __func__);
1066 ret = -EINVAL;
1067 goto unlock_mutex;
1068 }
1069
1070
1071 if (atomic_inc_return(&rproc->power) > 1) {
1072 ret = 0;
1073 goto unlock_mutex;
1074 }
1075
1076 dev_info(dev, "powering up %s\n", rproc->name);
1077
1078
1079 ret = request_firmware(&firmware_p, rproc->firmware, dev);
1080 if (ret < 0) {
1081 dev_err(dev, "request_firmware failed: %d\n", ret);
1082 goto downref_rproc;
1083 }
1084
1085 ret = rproc_fw_boot(rproc, firmware_p);
1086
1087 release_firmware(firmware_p);
1088
1089downref_rproc:
1090 if (ret) {
1091 module_put(dev->parent->driver->owner);
1092 atomic_dec(&rproc->power);
1093 }
1094unlock_mutex:
1095 mutex_unlock(&rproc->lock);
1096 return ret;
1097}
1098EXPORT_SYMBOL(rproc_boot);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119void rproc_shutdown(struct rproc *rproc)
1120{
1121 struct device *dev = &rproc->dev;
1122 int ret;
1123
1124 ret = mutex_lock_interruptible(&rproc->lock);
1125 if (ret) {
1126 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1127 return;
1128 }
1129
1130
1131 if (!atomic_dec_and_test(&rproc->power))
1132 goto out;
1133
1134
1135 ret = rproc->ops->stop(rproc);
1136 if (ret) {
1137 atomic_inc(&rproc->power);
1138 dev_err(dev, "can't stop rproc: %d\n", ret);
1139 goto out;
1140 }
1141
1142
1143 rproc_resource_cleanup(rproc);
1144
1145 rproc_disable_iommu(rproc);
1146
1147
1148 rproc->table_ptr = rproc->cached_table;
1149
1150
1151 if (rproc->state == RPROC_CRASHED)
1152 complete_all(&rproc->crash_comp);
1153
1154 rproc->state = RPROC_OFFLINE;
1155
1156 dev_info(dev, "stopped remote processor %s\n", rproc->name);
1157
1158out:
1159 mutex_unlock(&rproc->lock);
1160 if (!ret)
1161 module_put(dev->parent->driver->owner);
1162}
1163EXPORT_SYMBOL(rproc_shutdown);
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185int rproc_add(struct rproc *rproc)
1186{
1187 struct device *dev = &rproc->dev;
1188 int ret;
1189
1190 ret = device_add(dev);
1191 if (ret < 0)
1192 return ret;
1193
1194 dev_info(dev, "%s is available\n", rproc->name);
1195
1196 dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
1197 dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");
1198
1199
1200 rproc_create_debug_dir(rproc);
1201
1202 return rproc_add_virtio_devices(rproc);
1203}
1204EXPORT_SYMBOL(rproc_add);
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static void rproc_type_release(struct device *dev)
1216{
1217 struct rproc *rproc = container_of(dev, struct rproc, dev);
1218
1219 dev_info(&rproc->dev, "releasing %s\n", rproc->name);
1220
1221 rproc_delete_debug_dir(rproc);
1222
1223 idr_destroy(&rproc->notifyids);
1224
1225 if (rproc->index >= 0)
1226 ida_simple_remove(&rproc_dev_index, rproc->index);
1227
1228 kfree(rproc);
1229}
1230
1231static struct device_type rproc_type = {
1232 .name = "remoteproc",
1233 .release = rproc_type_release,
1234};
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259struct rproc *rproc_alloc(struct device *dev, const char *name,
1260 const struct rproc_ops *ops,
1261 const char *firmware, int len)
1262{
1263 struct rproc *rproc;
1264 char *p, *template = "rproc-%s-fw";
1265 int name_len = 0;
1266
1267 if (!dev || !name || !ops)
1268 return NULL;
1269
1270 if (!firmware)
1271
1272
1273
1274
1275
1276
1277
1278
1279 name_len = strlen(name) + strlen(template) - 2 + 1;
1280
1281 rproc = kzalloc(sizeof(struct rproc) + len + name_len, GFP_KERNEL);
1282 if (!rproc) {
1283 dev_err(dev, "%s: kzalloc failed\n", __func__);
1284 return NULL;
1285 }
1286
1287 if (!firmware) {
1288 p = (char *)rproc + sizeof(struct rproc) + len;
1289 snprintf(p, name_len, template, name);
1290 } else {
1291 p = (char *)firmware;
1292 }
1293
1294 rproc->firmware = p;
1295 rproc->name = name;
1296 rproc->ops = ops;
1297 rproc->priv = &rproc[1];
1298
1299 device_initialize(&rproc->dev);
1300 rproc->dev.parent = dev;
1301 rproc->dev.type = &rproc_type;
1302
1303
1304 rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
1305 if (rproc->index < 0) {
1306 dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
1307 put_device(&rproc->dev);
1308 return NULL;
1309 }
1310
1311 dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
1312
1313 atomic_set(&rproc->power, 0);
1314
1315
1316 rproc->fw_ops = &rproc_elf_fw_ops;
1317
1318 mutex_init(&rproc->lock);
1319
1320 idr_init(&rproc->notifyids);
1321
1322 INIT_LIST_HEAD(&rproc->carveouts);
1323 INIT_LIST_HEAD(&rproc->mappings);
1324 INIT_LIST_HEAD(&rproc->traces);
1325 INIT_LIST_HEAD(&rproc->rvdevs);
1326
1327 INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
1328 init_completion(&rproc->crash_comp);
1329
1330 rproc->state = RPROC_OFFLINE;
1331
1332 return rproc;
1333}
1334EXPORT_SYMBOL(rproc_alloc);
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345void rproc_put(struct rproc *rproc)
1346{
1347 put_device(&rproc->dev);
1348}
1349EXPORT_SYMBOL(rproc_put);
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366int rproc_del(struct rproc *rproc)
1367{
1368 struct rproc_vdev *rvdev, *tmp;
1369
1370 if (!rproc)
1371 return -EINVAL;
1372
1373
1374 wait_for_completion(&rproc->firmware_loading_complete);
1375
1376
1377 list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
1378 rproc_remove_virtio_dev(rvdev);
1379
1380
1381 kfree(rproc->cached_table);
1382
1383 device_del(&rproc->dev);
1384
1385 return 0;
1386}
1387EXPORT_SYMBOL(rproc_del);
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
1401{
1402 if (!rproc) {
1403 pr_err("NULL rproc pointer\n");
1404 return;
1405 }
1406
1407 dev_err(&rproc->dev, "crash detected in %s: type %s\n",
1408 rproc->name, rproc_crash_to_string(type));
1409
1410
1411 schedule_work(&rproc->crash_handler);
1412}
1413EXPORT_SYMBOL(rproc_report_crash);
1414
1415static int __init remoteproc_init(void)
1416{
1417 rproc_init_debugfs();
1418
1419 return 0;
1420}
1421module_init(remoteproc_init);
1422
1423static void __exit remoteproc_exit(void)
1424{
1425 rproc_exit_debugfs();
1426}
1427module_exit(remoteproc_exit);
1428
1429MODULE_LICENSE("GPL v2");
1430MODULE_DESCRIPTION("Generic Remote Processor Framework");
1431