1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "acr_r352.h"
24#include "hs_ucode.h"
25
26#include <core/gpuobj.h>
27#include <core/firmware.h>
28#include <engine/falcon.h>
29#include <subdev/pmu.h>
30#include <core/msgqueue.h>
31#include <engine/sec2.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56struct acr_r352_flcn_bl_desc {
57 u32 reserved[4];
58 u32 signature[4];
59 u32 ctx_dma;
60 u32 code_dma_base;
61 u32 non_sec_code_off;
62 u32 non_sec_code_size;
63 u32 sec_code_off;
64 u32 sec_code_size;
65 u32 code_entry_point;
66 u32 data_dma_base;
67 u32 data_size;
68 u32 code_dma_base1;
69 u32 data_dma_base1;
70};
71
72
73
74
75static void
76acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
77 const struct ls_ucode_img *img, u64 wpr_addr,
78 void *_desc)
79{
80 struct acr_r352_flcn_bl_desc *desc = _desc;
81 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
82 u64 base, addr_code, addr_data;
83
84 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
85 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
86 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
87
88 desc->ctx_dma = FALCON_DMAIDX_UCODE;
89 desc->code_dma_base = lower_32_bits(addr_code);
90 desc->code_dma_base1 = upper_32_bits(addr_code);
91 desc->non_sec_code_off = pdesc->app_resident_code_offset;
92 desc->non_sec_code_size = pdesc->app_resident_code_size;
93 desc->code_entry_point = pdesc->app_imem_entry;
94 desc->data_dma_base = lower_32_bits(addr_data);
95 desc->data_dma_base1 = upper_32_bits(addr_data);
96 desc->data_size = pdesc->app_resident_data_size;
97}
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112struct hsflcn_acr_desc {
113 union {
114 u8 reserved_dmem[0x200];
115 u32 signatures[4];
116 } ucode_reserved_space;
117 u32 wpr_region_id;
118 u32 wpr_offset;
119 u32 mmu_mem_range;
120#define FLCN_ACR_MAX_REGIONS 2
121 struct {
122 u32 no_regions;
123 struct {
124 u32 start_addr;
125 u32 end_addr;
126 u32 region_id;
127 u32 read_mask;
128 u32 write_mask;
129 u32 client_mask;
130 } region_props[FLCN_ACR_MAX_REGIONS];
131 } regions;
132 u32 ucode_blob_size;
133 u64 ucode_blob_base __aligned(8);
134 struct {
135 u32 vpr_enabled;
136 u32 vpr_start;
137 u32 vpr_end;
138 u32 hdcp_policies;
139 } vpr_desc;
140};
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169struct acr_r352_lsf_lsb_header {
170
171
172
173
174
175
176
177
178 struct {
179 u8 prd_keys[2][16];
180 u8 dbg_keys[2][16];
181 u32 b_prd_present;
182 u32 b_dbg_present;
183 u32 falcon_id;
184 } signature;
185 u32 ucode_off;
186 u32 ucode_size;
187 u32 data_size;
188 u32 bl_code_size;
189 u32 bl_imem_off;
190 u32 bl_data_off;
191 u32 bl_data_size;
192 u32 app_code_off;
193 u32 app_code_size;
194 u32 app_data_off;
195 u32 app_data_size;
196 u32 flags;
197};
198
199
200
201
202
203
204
205
206
207
208
209
210
211struct acr_r352_lsf_wpr_header {
212 u32 falcon_id;
213 u32 lsb_offset;
214 u32 bootstrap_owner;
215 u32 lazy_bootstrap;
216 u32 status;
217#define LSF_IMAGE_STATUS_NONE 0
218#define LSF_IMAGE_STATUS_COPY 1
219#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
220#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
221#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
222#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
223#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
224};
225
226
227
228
229struct ls_ucode_img_r352 {
230 struct ls_ucode_img base;
231
232 struct acr_r352_lsf_wpr_header wpr_header;
233 struct acr_r352_lsf_lsb_header lsb_header;
234};
235#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
236
237
238
239
240struct ls_ucode_img *
241acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
242 const struct nvkm_secboot *sb,
243 enum nvkm_secboot_falcon falcon_id)
244{
245 const struct nvkm_subdev *subdev = acr->base.subdev;
246 struct ls_ucode_img_r352 *img;
247 int ret;
248
249 img = kzalloc(sizeof(*img), GFP_KERNEL);
250 if (!img)
251 return ERR_PTR(-ENOMEM);
252
253 img->base.falcon_id = falcon_id;
254
255 ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
256
257 if (ret) {
258 kfree(img->base.ucode_data);
259 kfree(img->base.sig);
260 kfree(img);
261 return ERR_PTR(ret);
262 }
263
264
265 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
266 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
267 nvkm_secboot_falcon_name[falcon_id]);
268 return ERR_PTR(-EINVAL);
269 }
270
271
272 memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
273
274
275 img->lsb_header.signature.falcon_id = falcon_id;
276
277 return &img->base;
278}
279
280#define LSF_LSB_HEADER_ALIGN 256
281#define LSF_BL_DATA_ALIGN 256
282#define LSF_BL_DATA_SIZE_ALIGN 256
283#define LSF_BL_CODE_SIZE_ALIGN 256
284#define LSF_UCODE_DATA_ALIGN 4096
285
286
287
288
289
290
291
292
293
294
295
296
297static u32
298acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
299 struct ls_ucode_img_r352 *img, u32 offset)
300{
301 struct ls_ucode_img *_img = &img->base;
302 struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
303 struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
304 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
305 const struct acr_r352_ls_func *func =
306 acr->func->ls_func[_img->falcon_id];
307
308
309 whdr->falcon_id = _img->falcon_id;
310 whdr->bootstrap_owner = acr->base.boot_falcon;
311 whdr->status = LSF_IMAGE_STATUS_COPY;
312
313
314 if (acr->lazy_bootstrap & BIT(_img->falcon_id))
315 whdr->lazy_bootstrap = 1;
316
317
318 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
319 whdr->lsb_offset = offset;
320 offset += sizeof(*lhdr);
321
322
323
324
325
326 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
327 _img->ucode_off = lhdr->ucode_off = offset;
328 offset += _img->ucode_size;
329
330
331
332
333
334
335
336
337
338 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
339 LSF_BL_CODE_SIZE_ALIGN);
340 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
341 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
342 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
343 lhdr->bl_code_size - lhdr->ucode_size;
344
345
346
347
348
349 lhdr->bl_imem_off = desc->bootloader_imem_offset;
350 lhdr->app_code_off = desc->app_start_offset +
351 desc->app_resident_code_offset;
352 lhdr->app_code_size = desc->app_resident_code_size;
353 lhdr->app_data_off = desc->app_start_offset +
354 desc->app_resident_data_offset;
355 lhdr->app_data_size = desc->app_resident_data_size;
356
357 lhdr->flags = func->lhdr_flags;
358 if (_img->falcon_id == acr->base.boot_falcon)
359 lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
360
361
362 lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
363
364
365
366
367 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
368 lhdr->bl_data_off = offset;
369 offset += lhdr->bl_data_size;
370
371 return offset;
372}
373
374
375
376
377int
378acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
379{
380 struct ls_ucode_img_r352 *img;
381 struct list_head *l;
382 u32 count = 0;
383 u32 offset;
384
385
386 list_for_each(l, imgs)
387 count++;
388
389
390
391
392
393
394
395 offset = sizeof(img->wpr_header) * (count + 1);
396
397
398
399
400
401 list_for_each_entry(img, imgs, base.node) {
402 offset = acr_r352_ls_img_fill_headers(acr, img, offset);
403 }
404
405 return offset;
406}
407
408
409
410
411int
412acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
413 struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
414{
415 struct ls_ucode_img *_img;
416 u32 pos = 0;
417
418 nvkm_kmap(wpr_blob);
419
420 list_for_each_entry(_img, imgs, node) {
421 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
422 const struct acr_r352_ls_func *ls_func =
423 acr->func->ls_func[_img->falcon_id];
424 u8 gdesc[ls_func->bl_desc_size];
425
426 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
427 sizeof(img->wpr_header));
428
429 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
430 &img->lsb_header, sizeof(img->lsb_header));
431
432
433 memset(gdesc, 0, ls_func->bl_desc_size);
434 ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
435
436 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
437 gdesc, ls_func->bl_desc_size);
438
439
440 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
441 _img->ucode_data, _img->ucode_size);
442
443 pos += sizeof(img->wpr_header);
444 }
445
446 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
447
448 nvkm_done(wpr_blob);
449
450 return 0;
451}
452
453
454#define WPR_ALIGNMENT 0x40000
455
456
457
458
459
460
461
462
463static int
464acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
465{
466 const struct nvkm_subdev *subdev = acr->base.subdev;
467 struct list_head imgs;
468 struct ls_ucode_img *img, *t;
469 unsigned long managed_falcons = acr->base.managed_falcons;
470 u64 wpr_addr = sb->wpr_addr;
471 u32 wpr_size = sb->wpr_size;
472 int managed_count = 0;
473 u32 image_wpr_size, ls_blob_size;
474 int falcon_id;
475 int ret;
476
477 INIT_LIST_HEAD(&imgs);
478
479
480 for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
481 struct ls_ucode_img *img;
482
483 img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
484 if (IS_ERR(img)) {
485 if (acr->base.optional_falcons & BIT(falcon_id)) {
486 managed_falcons &= ~BIT(falcon_id);
487 nvkm_info(subdev, "skipping %s falcon...\n",
488 nvkm_secboot_falcon_name[falcon_id]);
489 continue;
490 }
491 ret = PTR_ERR(img);
492 goto cleanup;
493 }
494
495 list_add_tail(&img->node, &imgs);
496 managed_count++;
497 }
498
499
500 acr->base.managed_falcons = managed_falcons;
501
502
503
504
505
506 if (acr->func->ls_func[acr->base.boot_falcon] &&
507 (managed_falcons & BIT(acr->base.boot_falcon))) {
508 for_each_set_bit(falcon_id, &managed_falcons,
509 NVKM_SECBOOT_FALCON_END) {
510 if (falcon_id == acr->base.boot_falcon)
511 continue;
512
513 acr->lazy_bootstrap |= BIT(falcon_id);
514 }
515 }
516
517
518
519
520
521 image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
522 image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
523
524 ls_blob_size = image_wpr_size;
525
526
527
528
529
530 if (wpr_size == 0 && acr->func->shadow_blob)
531 ls_blob_size *= 2;
532
533
534 ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
535 false, NULL, &acr->ls_blob);
536 if (ret)
537 goto cleanup;
538
539 nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
540 managed_count, image_wpr_size);
541
542
543 if (wpr_size == 0) {
544 wpr_addr = acr->ls_blob->addr;
545 if (acr->func->shadow_blob)
546 wpr_addr += acr->ls_blob->size / 2;
547
548 wpr_size = image_wpr_size;
549
550
551
552
553 } else if (image_wpr_size > wpr_size) {
554 nvkm_error(subdev, "WPR region too small for FW blob!\n");
555 nvkm_error(subdev, "required: %dB\n", image_wpr_size);
556 nvkm_error(subdev, "available: %dB\n", wpr_size);
557 ret = -ENOSPC;
558 goto cleanup;
559 }
560
561
562 ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
563 if (ret)
564 nvkm_gpuobj_del(&acr->ls_blob);
565
566cleanup:
567 list_for_each_entry_safe(img, t, &imgs, node) {
568 kfree(img->ucode_data);
569 kfree(img->sig);
570 kfree(img);
571 }
572
573 return ret;
574}
575
576
577
578
579void
580acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
581 void *_desc)
582{
583 struct hsflcn_acr_desc *desc = _desc;
584 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
585
586
587 if (sb->wpr_size == 0) {
588 u64 wpr_start = ls_blob->addr;
589 u64 wpr_end = wpr_start + ls_blob->size;
590
591 desc->wpr_region_id = 1;
592 desc->regions.no_regions = 2;
593 desc->regions.region_props[0].start_addr = wpr_start >> 8;
594 desc->regions.region_props[0].end_addr = wpr_end >> 8;
595 desc->regions.region_props[0].region_id = 1;
596 desc->regions.region_props[0].read_mask = 0xf;
597 desc->regions.region_props[0].write_mask = 0xc;
598 desc->regions.region_props[0].client_mask = 0x2;
599 } else {
600 desc->ucode_blob_base = ls_blob->addr;
601 desc->ucode_blob_size = ls_blob->size;
602 }
603}
604
605static void
606acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
607 u64 offset)
608{
609 struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
610 u64 addr_code, addr_data;
611
612 addr_code = offset >> 8;
613 addr_data = (offset + hdr->data_dma_base) >> 8;
614
615 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
616 bl_desc->code_dma_base = lower_32_bits(addr_code);
617 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
618 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
619 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
620 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
621 bl_desc->code_entry_point = 0;
622 bl_desc->data_dma_base = lower_32_bits(addr_data);
623 bl_desc->data_size = hdr->data_size;
624}
625
626
627
628
629
630
631
632
633
634
635static int
636acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
637 const char *fw, struct nvkm_gpuobj **blob,
638 struct hsf_load_header *load_header, bool patch)
639{
640 struct nvkm_subdev *subdev = &sb->subdev;
641 void *acr_image;
642 struct fw_bin_header *hsbin_hdr;
643 struct hsf_fw_header *fw_hdr;
644 struct hsf_load_header *load_hdr;
645 void *acr_data;
646 int ret;
647
648 acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
649 if (IS_ERR(acr_image))
650 return PTR_ERR(acr_image);
651
652 hsbin_hdr = acr_image;
653 fw_hdr = acr_image + hsbin_hdr->header_offset;
654 load_hdr = acr_image + fw_hdr->hdr_offset;
655 acr_data = acr_image + hsbin_hdr->data_offset;
656
657
658 if (patch) {
659 struct hsflcn_acr_desc *desc;
660
661 desc = acr_data + load_hdr->data_dma_base;
662 acr->func->fixup_hs_desc(acr, sb, desc);
663 }
664
665 if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
666 nvkm_error(subdev, "more apps (%d) than supported (%d)!",
667 load_hdr->num_apps, ACR_R352_MAX_APPS);
668 ret = -EINVAL;
669 goto cleanup;
670 }
671 memcpy(load_header, load_hdr, sizeof(*load_header) +
672 (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
673
674
675 ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
676 0x1000, false, NULL, blob);
677 if (ret)
678 goto cleanup;
679
680 nvkm_kmap(*blob);
681 nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
682 nvkm_done(*blob);
683
684cleanup:
685 kfree(acr_image);
686
687 return ret;
688}
689
690
691
692
693
694
695
696
697int
698acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
699{
700 struct nvkm_subdev *subdev = &sb->subdev;
701 int ret;
702
703
704 if (acr->firmware_ok)
705 return 0;
706
707
708 ret = acr_r352_prepare_ls_blob(acr, sb);
709 if (ret)
710 return ret;
711
712
713 if (!acr->load_blob) {
714 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
715 &acr->load_blob,
716 &acr->load_bl_header, true);
717 if (ret)
718 return ret;
719 }
720
721
722 if (sb->wpr_size == 0) {
723 ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
724 &acr->unload_blob,
725 &acr->unload_bl_header, false);
726 if (ret)
727 return ret;
728 }
729
730
731 if (!acr->hsbl_blob) {
732 acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
733 if (IS_ERR(acr->hsbl_blob)) {
734 ret = PTR_ERR(acr->hsbl_blob);
735 acr->hsbl_blob = NULL;
736 return ret;
737 }
738
739 if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
740 acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
741 "acr/unload_bl", 0);
742 if (IS_ERR(acr->hsbl_unload_blob)) {
743 ret = PTR_ERR(acr->hsbl_unload_blob);
744 acr->hsbl_unload_blob = NULL;
745 return ret;
746 }
747 } else {
748 acr->hsbl_unload_blob = acr->hsbl_blob;
749 }
750 }
751
752 acr->firmware_ok = true;
753 nvkm_debug(&sb->subdev, "LS blob successfully created\n");
754
755 return 0;
756}
757
758
759
760
761
762
763static int
764acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
765 struct nvkm_gpuobj *blob, u64 offset)
766{
767 struct acr_r352 *acr = acr_r352(_acr);
768 const u32 bl_desc_size = acr->func->hs_bl_desc_size;
769 const struct hsf_load_header *load_hdr;
770 struct fw_bin_header *bl_hdr;
771 struct fw_bl_desc *hsbl_desc;
772 void *bl, *blob_data, *hsbl_code, *hsbl_data;
773 u32 code_size;
774 u8 bl_desc[bl_desc_size];
775
776
777 if (blob == acr->load_blob) {
778 load_hdr = &acr->load_bl_header;
779 bl = acr->hsbl_blob;
780 } else if (blob == acr->unload_blob) {
781 load_hdr = &acr->unload_bl_header;
782 bl = acr->hsbl_unload_blob;
783 } else {
784 nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
785 return -EINVAL;
786 }
787
788 bl_hdr = bl;
789 hsbl_desc = bl + bl_hdr->header_offset;
790 blob_data = bl + bl_hdr->data_offset;
791 hsbl_code = blob_data + hsbl_desc->code_off;
792 hsbl_data = blob_data + hsbl_desc->data_off;
793 code_size = ALIGN(hsbl_desc->code_size, 256);
794
795
796
797
798 nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
799
800
801 nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
802 code_size, hsbl_desc->start_tag, 0, false);
803
804
805 memset(bl_desc, 0, bl_desc_size);
806 acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
807
808
809
810
811 nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
812 bl_desc_size, 0);
813
814 return hsbl_desc->start_tag << 8;
815}
816
817static int
818acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
819{
820 struct nvkm_subdev *subdev = &sb->subdev;
821 int i;
822
823
824 if (acr->unload_blob && sb->wpr_set) {
825 int ret;
826
827 nvkm_debug(subdev, "running HS unload blob\n");
828 ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
829 if (ret < 0)
830 return ret;
831
832
833
834
835 if (ret && ret != 0x1d) {
836 nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
837 return -EINVAL;
838 }
839 nvkm_debug(subdev, "HS unload blob completed\n");
840 }
841
842 for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
843 acr->falcon_state[i] = NON_SECURE;
844
845 sb->wpr_set = false;
846
847 return 0;
848}
849
850
851
852
853
854static bool
855acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
856{
857 const struct nvkm_subdev *subdev = &sb->subdev;
858 const struct nvkm_device *device = subdev->device;
859 u64 wpr_lo, wpr_hi;
860 u64 wpr_range_lo, wpr_range_hi;
861
862 nvkm_wr32(device, 0x100cd4, 0x2);
863 wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
864 wpr_lo <<= 8;
865 nvkm_wr32(device, 0x100cd4, 0x3);
866 wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
867 wpr_hi <<= 8;
868
869 if (sb->wpr_size != 0) {
870 wpr_range_lo = sb->wpr_addr;
871 wpr_range_hi = wpr_range_lo + sb->wpr_size;
872 } else {
873 wpr_range_lo = acr->ls_blob->addr;
874 wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
875 }
876
877 return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
878 wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
879}
880
881static int
882acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
883{
884 const struct nvkm_subdev *subdev = &sb->subdev;
885 unsigned long managed_falcons = acr->base.managed_falcons;
886 int falcon_id;
887 int ret;
888
889 if (sb->wpr_set)
890 return 0;
891
892
893 ret = acr_r352_load_blobs(acr, sb);
894 if (ret)
895 return ret;
896
897 nvkm_debug(subdev, "running HS load blob\n");
898 ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
899
900 nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
901 sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
902 if (ret < 0) {
903 return ret;
904 } else if (ret > 0) {
905 nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
906 return -EINVAL;
907 }
908 nvkm_debug(subdev, "HS load blob completed\n");
909
910 if (!sb->wpr_set) {
911 nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
912 return -EINVAL;
913 }
914
915
916 for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
917 const struct acr_r352_ls_func *func =
918 acr->func->ls_func[falcon_id];
919
920 if (func->post_run) {
921 ret = func->post_run(&acr->base, sb);
922 if (ret)
923 return ret;
924 }
925 }
926
927 return 0;
928}
929
930
931
932
933
934
935
936static int
937acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
938 unsigned long falcon_mask)
939{
940 int falcon;
941 int ret;
942
943
944
945
946
947 if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
948 goto end;
949
950 ret = acr_r352_shutdown(acr, sb);
951 if (ret)
952 return ret;
953
954 ret = acr_r352_bootstrap(acr, sb);
955 if (ret)
956 return ret;
957
958end:
959 for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
960 acr->falcon_state[falcon] = RESET;
961 }
962 return 0;
963}
964
965
966
967
968
969
970
971
972static int
973acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
974 unsigned long falcon_mask)
975{
976 struct acr_r352 *acr = acr_r352(_acr);
977 struct nvkm_msgqueue *queue;
978 int falcon;
979 bool wpr_already_set = sb->wpr_set;
980 int ret;
981
982
983 ret = acr_r352_bootstrap(acr, sb);
984 if (ret)
985 return ret;
986
987
988 if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
989
990 if (wpr_already_set)
991 return acr_r352_reset_nopmu(acr, sb, falcon_mask);
992
993 else
994 return ret;
995 }
996
997 switch (_acr->boot_falcon) {
998 case NVKM_SECBOOT_FALCON_PMU:
999 queue = sb->subdev.device->pmu->queue;
1000 break;
1001 case NVKM_SECBOOT_FALCON_SEC2:
1002 queue = sb->subdev.device->sec2->queue;
1003 break;
1004 default:
1005 return -EINVAL;
1006 }
1007
1008
1009 for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
1010 nvkm_debug(&sb->subdev, "resetting %s falcon\n",
1011 nvkm_secboot_falcon_name[falcon]);
1012 ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
1013 if (ret) {
1014 nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
1015 return ret;
1016 }
1017 nvkm_debug(&sb->subdev, "falcon reset done\n");
1018
1019 return 0;
1020}
1021
1022static int
1023acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
1024{
1025 struct acr_r352 *acr = acr_r352(_acr);
1026
1027 return acr_r352_shutdown(acr, sb);
1028}
1029
1030static void
1031acr_r352_dtor(struct nvkm_acr *_acr)
1032{
1033 struct acr_r352 *acr = acr_r352(_acr);
1034
1035 nvkm_gpuobj_del(&acr->unload_blob);
1036
1037 if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
1038 kfree(acr->hsbl_unload_blob);
1039 kfree(acr->hsbl_blob);
1040 nvkm_gpuobj_del(&acr->load_blob);
1041 nvkm_gpuobj_del(&acr->ls_blob);
1042
1043 kfree(acr);
1044}
1045
1046const struct acr_r352_ls_func
1047acr_r352_ls_fecs_func = {
1048 .load = acr_ls_ucode_load_fecs,
1049 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1050 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1051};
1052
1053const struct acr_r352_ls_func
1054acr_r352_ls_gpccs_func = {
1055 .load = acr_ls_ucode_load_gpccs,
1056 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1057 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1058
1059 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
1060};
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079struct acr_r352_pmu_bl_desc {
1080 u32 dma_idx;
1081 u32 code_dma_base;
1082 u32 code_size_total;
1083 u32 code_size_to_load;
1084 u32 code_entry_point;
1085 u32 data_dma_base;
1086 u32 data_size;
1087 u32 overlay_dma_base;
1088 u32 argc;
1089 u32 argv;
1090 u16 code_dma_base1;
1091 u16 data_dma_base1;
1092 u16 overlay_dma_base1;
1093};
1094
1095
1096
1097
1098
1099static void
1100acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
1101 const struct ls_ucode_img *img, u64 wpr_addr,
1102 void *_desc)
1103{
1104 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
1105 const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
1106 struct acr_r352_pmu_bl_desc *desc = _desc;
1107 u64 base;
1108 u64 addr_code;
1109 u64 addr_data;
1110 u32 addr_args;
1111
1112 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
1113 addr_code = (base + pdesc->app_resident_code_offset) >> 8;
1114 addr_data = (base + pdesc->app_resident_data_offset) >> 8;
1115 addr_args = pmu->falcon->data.limit;
1116 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
1117
1118 desc->dma_idx = FALCON_DMAIDX_UCODE;
1119 desc->code_dma_base = lower_32_bits(addr_code);
1120 desc->code_dma_base1 = upper_32_bits(addr_code);
1121 desc->code_size_total = pdesc->app_size;
1122 desc->code_size_to_load = pdesc->app_resident_code_size;
1123 desc->code_entry_point = pdesc->app_imem_entry;
1124 desc->data_dma_base = lower_32_bits(addr_data);
1125 desc->data_dma_base1 = upper_32_bits(addr_data);
1126 desc->data_size = pdesc->app_resident_data_size;
1127 desc->overlay_dma_base = lower_32_bits(addr_code);
1128 desc->overlay_dma_base1 = upper_32_bits(addr_code);
1129 desc->argc = 1;
1130 desc->argv = addr_args;
1131}
1132
1133static const struct acr_r352_ls_func
1134acr_r352_ls_pmu_func = {
1135 .load = acr_ls_ucode_load_pmu,
1136 .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
1137 .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
1138 .post_run = acr_ls_pmu_post_run,
1139};
1140
1141const struct acr_r352_func
1142acr_r352_func = {
1143 .fixup_hs_desc = acr_r352_fixup_hs_desc,
1144 .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
1145 .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1146 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
1147 .ls_fill_headers = acr_r352_ls_fill_headers,
1148 .ls_write_wpr = acr_r352_ls_write_wpr,
1149 .ls_func = {
1150 [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
1151 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
1152 [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
1153 },
1154};
1155
1156static const struct nvkm_acr_func
1157acr_r352_base_func = {
1158 .dtor = acr_r352_dtor,
1159 .fini = acr_r352_fini,
1160 .load = acr_r352_load,
1161 .reset = acr_r352_reset,
1162};
1163
1164struct nvkm_acr *
1165acr_r352_new_(const struct acr_r352_func *func,
1166 enum nvkm_secboot_falcon boot_falcon,
1167 unsigned long managed_falcons)
1168{
1169 struct acr_r352 *acr;
1170 int i;
1171
1172
1173 for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
1174 if (!func->ls_func[i])
1175 return ERR_PTR(-ENOTSUPP);
1176 }
1177
1178 acr = kzalloc(sizeof(*acr), GFP_KERNEL);
1179 if (!acr)
1180 return ERR_PTR(-ENOMEM);
1181
1182 acr->base.boot_falcon = boot_falcon;
1183 acr->base.managed_falcons = managed_falcons;
1184 acr->base.func = &acr_r352_base_func;
1185 acr->func = func;
1186
1187 return &acr->base;
1188}
1189
1190struct nvkm_acr *
1191acr_r352_new(unsigned long managed_falcons)
1192{
1193 return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
1194 managed_falcons);
1195}
1196