linux/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include "acr_r352.h"
  24#include "hs_ucode.h"
  25
  26#include <core/gpuobj.h>
  27#include <core/firmware.h>
  28#include <engine/falcon.h>
  29#include <subdev/pmu.h>
  30#include <core/msgqueue.h>
  31#include <engine/sec2.h>
  32
  33/**
  34 * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
  35 * @signature:          16B signature for secure code. 0s if no secure code
  36 * @ctx_dma:            DMA context to be used by BL while loading code/data
  37 * @code_dma_base:      256B-aligned Physical FB Address where code is located
  38 *                      (falcon's $xcbase register)
  39 * @non_sec_code_off:   offset from code_dma_base where the non-secure code is
  40 *                      located. The offset must be multiple of 256 to help perf
  41 * @non_sec_code_size:  the size of the nonSecure code part.
  42 * @sec_code_off:       offset from code_dma_base where the secure code is
  43 *                      located. The offset must be multiple of 256 to help perf
  44 * @sec_code_size:      offset from code_dma_base where the secure code is
  45 *                      located. The offset must be multiple of 256 to help perf
  46 * @code_entry_point:   code entry point which will be invoked by BL after
  47 *                      code is loaded.
  48 * @data_dma_base:      256B aligned Physical FB Address where data is located.
  49 *                      (falcon's $xdbase register)
  50 * @data_size:          size of data block. Should be multiple of 256B
  51 *
  52 * Structure used by the bootloader to load the rest of the code. This has
  53 * to be filled by host and copied into DMEM at offset provided in the
  54 * hsflcn_bl_desc.bl_desc_dmem_load_off.
  55 */
  56struct acr_r352_flcn_bl_desc {
  57        u32 reserved[4];
  58        u32 signature[4];
  59        u32 ctx_dma;
  60        u32 code_dma_base;
  61        u32 non_sec_code_off;
  62        u32 non_sec_code_size;
  63        u32 sec_code_off;
  64        u32 sec_code_size;
  65        u32 code_entry_point;
  66        u32 data_dma_base;
  67        u32 data_size;
  68        u32 code_dma_base1;
  69        u32 data_dma_base1;
  70};
  71
  72/**
  73 * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
  74 */
  75static void
  76acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
  77                               const struct ls_ucode_img *img, u64 wpr_addr,
  78                               void *_desc)
  79{
  80        struct acr_r352_flcn_bl_desc *desc = _desc;
  81        const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
  82        u64 base, addr_code, addr_data;
  83
  84        base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
  85        addr_code = (base + pdesc->app_resident_code_offset) >> 8;
  86        addr_data = (base + pdesc->app_resident_data_offset) >> 8;
  87
  88        desc->ctx_dma = FALCON_DMAIDX_UCODE;
  89        desc->code_dma_base = lower_32_bits(addr_code);
  90        desc->code_dma_base1 = upper_32_bits(addr_code);
  91        desc->non_sec_code_off = pdesc->app_resident_code_offset;
  92        desc->non_sec_code_size = pdesc->app_resident_code_size;
  93        desc->code_entry_point = pdesc->app_imem_entry;
  94        desc->data_dma_base = lower_32_bits(addr_data);
  95        desc->data_dma_base1 = upper_32_bits(addr_data);
  96        desc->data_size = pdesc->app_resident_data_size;
  97}
  98
  99
 100/**
 101 * struct hsflcn_acr_desc - data section of the HS firmware
 102 *
 103 * This header is to be copied at the beginning of DMEM by the HS bootloader.
 104 *
 105 * @signature:          signature of ACR ucode
 106 * @wpr_region_id:      region ID holding the WPR header and its details
 107 * @wpr_offset:         offset from the WPR region holding the wpr header
 108 * @regions:            region descriptors
 109 * @nonwpr_ucode_blob_size:     size of LS blob
 110 * @nonwpr_ucode_blob_start:    FB location of LS blob is
 111 */
 112struct hsflcn_acr_desc {
 113        union {
 114                u8 reserved_dmem[0x200];
 115                u32 signatures[4];
 116        } ucode_reserved_space;
 117        u32 wpr_region_id;
 118        u32 wpr_offset;
 119        u32 mmu_mem_range;
 120#define FLCN_ACR_MAX_REGIONS 2
 121        struct {
 122                u32 no_regions;
 123                struct {
 124                        u32 start_addr;
 125                        u32 end_addr;
 126                        u32 region_id;
 127                        u32 read_mask;
 128                        u32 write_mask;
 129                        u32 client_mask;
 130                } region_props[FLCN_ACR_MAX_REGIONS];
 131        } regions;
 132        u32 ucode_blob_size;
 133        u64 ucode_blob_base __aligned(8);
 134        struct {
 135                u32 vpr_enabled;
 136                u32 vpr_start;
 137                u32 vpr_end;
 138                u32 hdcp_policies;
 139        } vpr_desc;
 140};
 141
 142
 143/*
 144 * Low-secure blob creation
 145 */
 146
 147/**
 148 * struct acr_r352_lsf_lsb_header - LS firmware header
 149 * @signature:          signature to verify the firmware against
 150 * @ucode_off:          offset of the ucode blob in the WPR region. The ucode
 151 *                      blob contains the bootloader, code and data of the
 152 *                      LS falcon
 153 * @ucode_size:         size of the ucode blob, including bootloader
 154 * @data_size:          size of the ucode blob data
 155 * @bl_code_size:       size of the bootloader code
 156 * @bl_imem_off:        offset in imem of the bootloader
 157 * @bl_data_off:        offset of the bootloader data in WPR region
 158 * @bl_data_size:       size of the bootloader data
 159 * @app_code_off:       offset of the app code relative to ucode_off
 160 * @app_code_size:      size of the app code
 161 * @app_data_off:       offset of the app data relative to ucode_off
 162 * @app_data_size:      size of the app data
 163 * @flags:              flags for the secure bootloader
 164 *
 165 * This structure is written into the WPR region for each managed falcon. Each
 166 * instance is referenced by the lsb_offset member of the corresponding
 167 * lsf_wpr_header.
 168 */
 169struct acr_r352_lsf_lsb_header {
 170        /**
 171         * LS falcon signatures
 172         * @prd_keys:           signature to use in production mode
 173         * @dgb_keys:           signature to use in debug mode
 174         * @b_prd_present:      whether the production key is present
 175         * @b_dgb_present:      whether the debug key is present
 176         * @falcon_id:          ID of the falcon the ucode applies to
 177         */
 178        struct {
 179                u8 prd_keys[2][16];
 180                u8 dbg_keys[2][16];
 181                u32 b_prd_present;
 182                u32 b_dbg_present;
 183                u32 falcon_id;
 184        } signature;
 185        u32 ucode_off;
 186        u32 ucode_size;
 187        u32 data_size;
 188        u32 bl_code_size;
 189        u32 bl_imem_off;
 190        u32 bl_data_off;
 191        u32 bl_data_size;
 192        u32 app_code_off;
 193        u32 app_code_size;
 194        u32 app_data_off;
 195        u32 app_data_size;
 196        u32 flags;
 197};
 198
 199/**
 200 * struct acr_r352_lsf_wpr_header - LS blob WPR Header
 201 * @falcon_id:          LS falcon ID
 202 * @lsb_offset:         offset of the lsb_lsf_header in the WPR region
 203 * @bootstrap_owner:    secure falcon reponsible for bootstrapping the LS falcon
 204 * @lazy_bootstrap:     skip bootstrapping by ACR
 205 * @status:             bootstrapping status
 206 *
 207 * An array of these is written at the beginning of the WPR region, one for
 208 * each managed falcon. The array is terminated by an instance which falcon_id
 209 * is LSF_FALCON_ID_INVALID.
 210 */
 211struct acr_r352_lsf_wpr_header {
 212        u32 falcon_id;
 213        u32 lsb_offset;
 214        u32 bootstrap_owner;
 215        u32 lazy_bootstrap;
 216        u32 status;
 217#define LSF_IMAGE_STATUS_NONE                           0
 218#define LSF_IMAGE_STATUS_COPY                           1
 219#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED         2
 220#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED         3
 221#define LSF_IMAGE_STATUS_VALIDATION_DONE                4
 222#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED             5
 223#define LSF_IMAGE_STATUS_BOOTSTRAP_READY                6
 224};
 225
 226/**
 227 * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
 228 */
 229struct ls_ucode_img_r352 {
 230        struct ls_ucode_img base;
 231
 232        const struct acr_r352_lsf_func *func;
 233
 234        struct acr_r352_lsf_wpr_header wpr_header;
 235        struct acr_r352_lsf_lsb_header lsb_header;
 236};
 237#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
 238
 239/**
 240 * ls_ucode_img_load() - create a lsf_ucode_img and load it
 241 */
 242struct ls_ucode_img *
 243acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
 244                           const struct nvkm_secboot *sb,
 245                           enum nvkm_secboot_falcon falcon_id)
 246{
 247        const struct nvkm_subdev *subdev = acr->base.subdev;
 248        const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
 249        struct ls_ucode_img_r352 *img;
 250        int ret;
 251
 252        img = kzalloc(sizeof(*img), GFP_KERNEL);
 253        if (!img)
 254                return ERR_PTR(-ENOMEM);
 255
 256        img->base.falcon_id = falcon_id;
 257
 258        ret = func->load(sb, func->version_max, &img->base);
 259        if (ret < 0) {
 260                kfree(img->base.ucode_data);
 261                kfree(img->base.sig);
 262                kfree(img);
 263                return ERR_PTR(ret);
 264        }
 265
 266        img->func = func->version[ret];
 267
 268        /* Check that the signature size matches our expectations... */
 269        if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
 270                nvkm_error(subdev, "invalid signature size for %s falcon!\n",
 271                           nvkm_secboot_falcon_name[falcon_id]);
 272                return ERR_PTR(-EINVAL);
 273        }
 274
 275        /* Copy signature to the right place */
 276        memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
 277
 278        /* not needed? the signature should already have the right value */
 279        img->lsb_header.signature.falcon_id = falcon_id;
 280
 281        return &img->base;
 282}
 283
 284#define LSF_LSB_HEADER_ALIGN 256
 285#define LSF_BL_DATA_ALIGN 256
 286#define LSF_BL_DATA_SIZE_ALIGN 256
 287#define LSF_BL_CODE_SIZE_ALIGN 256
 288#define LSF_UCODE_DATA_ALIGN 4096
 289
 290/**
 291 * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
 292 * @acr:        ACR to use
 293 * @img:        image to generate for
 294 * @offset:     offset in the WPR region where this image starts
 295 *
 296 * Allocate space in the WPR area from offset and write the WPR and LSB headers
 297 * accordingly.
 298 *
 299 * Return: offset at the end of this image.
 300 */
 301static u32
 302acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
 303                             struct ls_ucode_img_r352 *img, u32 offset)
 304{
 305        struct ls_ucode_img *_img = &img->base;
 306        struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
 307        struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
 308        struct ls_ucode_img_desc *desc = &_img->ucode_desc;
 309        const struct acr_r352_lsf_func *func = img->func;
 310
 311        /* Fill WPR header */
 312        whdr->falcon_id = _img->falcon_id;
 313        whdr->bootstrap_owner = acr->base.boot_falcon;
 314        whdr->status = LSF_IMAGE_STATUS_COPY;
 315
 316        /* Skip bootstrapping falcons started by someone else than ACR */
 317        if (acr->lazy_bootstrap & BIT(_img->falcon_id))
 318                whdr->lazy_bootstrap = 1;
 319
 320        /* Align, save off, and include an LSB header size */
 321        offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
 322        whdr->lsb_offset = offset;
 323        offset += sizeof(*lhdr);
 324
 325        /*
 326         * Align, save off, and include the original (static) ucode
 327         * image size
 328         */
 329        offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
 330        _img->ucode_off = lhdr->ucode_off = offset;
 331        offset += _img->ucode_size;
 332
 333        /*
 334         * For falcons that use a boot loader (BL), we append a loader
 335         * desc structure on the end of the ucode image and consider
 336         * this the boot loader data. The host will then copy the loader
 337         * desc args to this space within the WPR region (before locking
 338         * down) and the HS bin will then copy them to DMEM 0 for the
 339         * loader.
 340         */
 341        lhdr->bl_code_size = ALIGN(desc->bootloader_size,
 342                                   LSF_BL_CODE_SIZE_ALIGN);
 343        lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
 344                                 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
 345        lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
 346                                lhdr->bl_code_size - lhdr->ucode_size;
 347        /*
 348         * Though the BL is located at 0th offset of the image, the VA
 349         * is different to make sure that it doesn't collide the actual
 350         * OS VA range
 351         */
 352        lhdr->bl_imem_off = desc->bootloader_imem_offset;
 353        lhdr->app_code_off = desc->app_start_offset +
 354                             desc->app_resident_code_offset;
 355        lhdr->app_code_size = desc->app_resident_code_size;
 356        lhdr->app_data_off = desc->app_start_offset +
 357                             desc->app_resident_data_offset;
 358        lhdr->app_data_size = desc->app_resident_data_size;
 359
 360        lhdr->flags = func->lhdr_flags;
 361        if (_img->falcon_id == acr->base.boot_falcon)
 362                lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
 363
 364        /* Align and save off BL descriptor size */
 365        lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
 366
 367        /*
 368         * Align, save off, and include the additional BL data
 369         */
 370        offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
 371        lhdr->bl_data_off = offset;
 372        offset += lhdr->bl_data_size;
 373
 374        return offset;
 375}
 376
 377/**
 378 * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
 379 */
 380int
 381acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
 382{
 383        struct ls_ucode_img_r352 *img;
 384        struct list_head *l;
 385        u32 count = 0;
 386        u32 offset;
 387
 388        /* Count the number of images to manage */
 389        list_for_each(l, imgs)
 390                count++;
 391
 392        /*
 393         * Start with an array of WPR headers at the base of the WPR.
 394         * The expectation here is that the secure falcon will do a single DMA
 395         * read of this array and cache it internally so it's ok to pack these.
 396         * Also, we add 1 to the falcon count to indicate the end of the array.
 397         */
 398        offset = sizeof(img->wpr_header) * (count + 1);
 399
 400        /*
 401         * Walk the managed falcons, accounting for the LSB structs
 402         * as well as the ucode images.
 403         */
 404        list_for_each_entry(img, imgs, base.node) {
 405                offset = acr_r352_ls_img_fill_headers(acr, img, offset);
 406        }
 407
 408        return offset;
 409}
 410
 411/**
 412 * acr_r352_ls_write_wpr - write the WPR blob contents
 413 */
 414int
 415acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 416                      struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
 417{
 418        struct ls_ucode_img *_img;
 419        u32 pos = 0;
 420        u32 max_desc_size = 0;
 421        u8 *gdesc;
 422
 423        /* Figure out how large we need gdesc to be. */
 424        list_for_each_entry(_img, imgs, node) {
 425                struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
 426                const struct acr_r352_lsf_func *ls_func = img->func;
 427
 428                max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
 429        }
 430
 431        gdesc = kmalloc(max_desc_size, GFP_KERNEL);
 432        if (!gdesc)
 433                return -ENOMEM;
 434
 435        nvkm_kmap(wpr_blob);
 436
 437        list_for_each_entry(_img, imgs, node) {
 438                struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
 439                const struct acr_r352_lsf_func *ls_func = img->func;
 440
 441                nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
 442                                      sizeof(img->wpr_header));
 443
 444                nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
 445                                     &img->lsb_header, sizeof(img->lsb_header));
 446
 447                /* Generate and write BL descriptor */
 448                memset(gdesc, 0, ls_func->bl_desc_size);
 449                ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
 450
 451                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
 452                                      gdesc, ls_func->bl_desc_size);
 453
 454                /* Copy ucode */
 455                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
 456                                      _img->ucode_data, _img->ucode_size);
 457
 458                pos += sizeof(img->wpr_header);
 459        }
 460
 461        nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
 462
 463        nvkm_done(wpr_blob);
 464
 465        kfree(gdesc);
 466
 467        return 0;
 468}
 469
 470/* Both size and address of WPR need to be 256K-aligned */
 471#define WPR_ALIGNMENT   0x40000
 472/**
 473 * acr_r352_prepare_ls_blob() - prepare the LS blob
 474 *
 475 * For each securely managed falcon, load the FW, signatures and bootloaders and
 476 * prepare a ucode blob. Then, compute the offsets in the WPR region for each
 477 * blob, and finally write the headers and ucode blobs into a GPU object that
 478 * will be copied into the WPR region by the HS firmware.
 479 */
 480static int
 481acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
 482{
 483        const struct nvkm_subdev *subdev = acr->base.subdev;
 484        struct list_head imgs;
 485        struct ls_ucode_img *img, *t;
 486        unsigned long managed_falcons = acr->base.managed_falcons;
 487        u64 wpr_addr = sb->wpr_addr;
 488        u32 wpr_size = sb->wpr_size;
 489        int managed_count = 0;
 490        u32 image_wpr_size, ls_blob_size;
 491        int falcon_id;
 492        int ret;
 493
 494        INIT_LIST_HEAD(&imgs);
 495
 496        /* Load all LS blobs */
 497        for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
 498                struct ls_ucode_img *img;
 499
 500                img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
 501                if (IS_ERR(img)) {
 502                        if (acr->base.optional_falcons & BIT(falcon_id)) {
 503                                managed_falcons &= ~BIT(falcon_id);
 504                                nvkm_info(subdev, "skipping %s falcon...\n",
 505                                          nvkm_secboot_falcon_name[falcon_id]);
 506                                continue;
 507                        }
 508                        ret = PTR_ERR(img);
 509                        goto cleanup;
 510                }
 511
 512                list_add_tail(&img->node, &imgs);
 513                managed_count++;
 514        }
 515
 516        /* Commit the actual list of falcons we will manage from now on */
 517        acr->base.managed_falcons = managed_falcons;
 518
 519        /*
 520         * If the boot falcon has a firmare, let it manage the bootstrap of other
 521         * falcons.
 522         */
 523        if (acr->func->ls_func[acr->base.boot_falcon] &&
 524            (managed_falcons & BIT(acr->base.boot_falcon))) {
 525                for_each_set_bit(falcon_id, &managed_falcons,
 526                                 NVKM_SECBOOT_FALCON_END) {
 527                        if (falcon_id == acr->base.boot_falcon)
 528                                continue;
 529
 530                        acr->lazy_bootstrap |= BIT(falcon_id);
 531                }
 532        }
 533
 534        /*
 535         * Fill the WPR and LSF headers with the right offsets and compute
 536         * required WPR size
 537         */
 538        image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
 539        image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
 540
 541        ls_blob_size = image_wpr_size;
 542
 543        /*
 544         * If we need a shadow area, allocate twice the size and use the
 545         * upper half as WPR
 546         */
 547        if (wpr_size == 0 && acr->func->shadow_blob)
 548                ls_blob_size *= 2;
 549
 550        /* Allocate GPU object that will contain the WPR region */
 551        ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
 552                              false, NULL, &acr->ls_blob);
 553        if (ret)
 554                goto cleanup;
 555
 556        nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
 557                    managed_count, image_wpr_size);
 558
 559        /* If WPR address and size are not fixed, set them to fit the LS blob */
 560        if (wpr_size == 0) {
 561                wpr_addr = acr->ls_blob->addr;
 562                if (acr->func->shadow_blob)
 563                        wpr_addr += acr->ls_blob->size / 2;
 564
 565                wpr_size = image_wpr_size;
 566        /*
 567         * But if the WPR region is set by the bootloader, it is illegal for
 568         * the HS blob to be larger than this region.
 569         */
 570        } else if (image_wpr_size > wpr_size) {
 571                nvkm_error(subdev, "WPR region too small for FW blob!\n");
 572                nvkm_error(subdev, "required: %dB\n", image_wpr_size);
 573                nvkm_error(subdev, "available: %dB\n", wpr_size);
 574                ret = -ENOSPC;
 575                goto cleanup;
 576        }
 577
 578        /* Write LS blob */
 579        ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
 580        if (ret)
 581                nvkm_gpuobj_del(&acr->ls_blob);
 582
 583cleanup:
 584        list_for_each_entry_safe(img, t, &imgs, node) {
 585                kfree(img->ucode_data);
 586                kfree(img->sig);
 587                kfree(img);
 588        }
 589
 590        return ret;
 591}
 592
 593
 594
 595
 596void
 597acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
 598                       void *_desc)
 599{
 600        struct hsflcn_acr_desc *desc = _desc;
 601        struct nvkm_gpuobj *ls_blob = acr->ls_blob;
 602
 603        /* WPR region information if WPR is not fixed */
 604        if (sb->wpr_size == 0) {
 605                u64 wpr_start = ls_blob->addr;
 606                u64 wpr_end = wpr_start + ls_blob->size;
 607
 608                desc->wpr_region_id = 1;
 609                desc->regions.no_regions = 2;
 610                desc->regions.region_props[0].start_addr = wpr_start >> 8;
 611                desc->regions.region_props[0].end_addr = wpr_end >> 8;
 612                desc->regions.region_props[0].region_id = 1;
 613                desc->regions.region_props[0].read_mask = 0xf;
 614                desc->regions.region_props[0].write_mask = 0xc;
 615                desc->regions.region_props[0].client_mask = 0x2;
 616        } else {
 617                desc->ucode_blob_base = ls_blob->addr;
 618                desc->ucode_blob_size = ls_blob->size;
 619        }
 620}
 621
 622static void
 623acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
 624                             u64 offset)
 625{
 626        struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
 627        u64 addr_code, addr_data;
 628
 629        addr_code = offset >> 8;
 630        addr_data = (offset + hdr->data_dma_base) >> 8;
 631
 632        bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
 633        bl_desc->code_dma_base = lower_32_bits(addr_code);
 634        bl_desc->non_sec_code_off = hdr->non_sec_code_off;
 635        bl_desc->non_sec_code_size = hdr->non_sec_code_size;
 636        bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
 637        bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
 638        bl_desc->code_entry_point = 0;
 639        bl_desc->data_dma_base = lower_32_bits(addr_data);
 640        bl_desc->data_size = hdr->data_size;
 641}
 642
 643/**
 644 * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
 645 *
 646 * @sb secure boot instance to prepare for
 647 * @fw name of the HS firmware to load
 648 * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
 649 * @bl_desc pointer to the BL descriptor to write for this firmware
 650 * @patch whether we should patch the HS descriptor (only for HS loaders)
 651 */
 652static int
 653acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
 654                         const char *fw, struct nvkm_gpuobj **blob,
 655                         struct hsf_load_header *load_header, bool patch)
 656{
 657        struct nvkm_subdev *subdev = &sb->subdev;
 658        void *acr_image;
 659        struct fw_bin_header *hsbin_hdr;
 660        struct hsf_fw_header *fw_hdr;
 661        struct hsf_load_header *load_hdr;
 662        void *acr_data;
 663        int ret;
 664
 665        acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
 666        if (IS_ERR(acr_image))
 667                return PTR_ERR(acr_image);
 668
 669        hsbin_hdr = acr_image;
 670        fw_hdr = acr_image + hsbin_hdr->header_offset;
 671        load_hdr = acr_image + fw_hdr->hdr_offset;
 672        acr_data = acr_image + hsbin_hdr->data_offset;
 673
 674        /* Patch descriptor with WPR information? */
 675        if (patch) {
 676                struct hsflcn_acr_desc *desc;
 677
 678                desc = acr_data + load_hdr->data_dma_base;
 679                acr->func->fixup_hs_desc(acr, sb, desc);
 680        }
 681
 682        if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
 683                nvkm_error(subdev, "more apps (%d) than supported (%d)!",
 684                           load_hdr->num_apps, ACR_R352_MAX_APPS);
 685                ret = -EINVAL;
 686                goto cleanup;
 687        }
 688        memcpy(load_header, load_hdr, sizeof(*load_header) +
 689                          (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
 690
 691        /* Create ACR blob and copy HS data to it */
 692        ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
 693                              0x1000, false, NULL, blob);
 694        if (ret)
 695                goto cleanup;
 696
 697        nvkm_kmap(*blob);
 698        nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
 699        nvkm_done(*blob);
 700
 701cleanup:
 702        kfree(acr_image);
 703
 704        return ret;
 705}
 706
 707/**
 708 * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
 709 *
 710 * This includes the LS blob, HS ucode loading blob, and HS bootloader.
 711 *
 712 * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
 713 */
 714int
 715acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
 716{
 717        struct nvkm_subdev *subdev = &sb->subdev;
 718        int ret;
 719
 720        /* Firmware already loaded? */
 721        if (acr->firmware_ok)
 722                return 0;
 723
 724        /* Load and prepare the managed falcon's firmwares */
 725        ret = acr_r352_prepare_ls_blob(acr, sb);
 726        if (ret)
 727                return ret;
 728
 729        /* Load the HS firmware that will load the LS firmwares */
 730        if (!acr->load_blob) {
 731                ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
 732                                               &acr->load_blob,
 733                                               &acr->load_bl_header, true);
 734                if (ret)
 735                        return ret;
 736        }
 737
 738        /* If the ACR region is dynamically programmed, we need an unload FW */
 739        if (sb->wpr_size == 0) {
 740                ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
 741                                               &acr->unload_blob,
 742                                               &acr->unload_bl_header, false);
 743                if (ret)
 744                        return ret;
 745        }
 746
 747        /* Load the HS firmware bootloader */
 748        if (!acr->hsbl_blob) {
 749                acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
 750                if (IS_ERR(acr->hsbl_blob)) {
 751                        ret = PTR_ERR(acr->hsbl_blob);
 752                        acr->hsbl_blob = NULL;
 753                        return ret;
 754                }
 755
 756                if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
 757                        acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
 758                                                            "acr/unload_bl", 0);
 759                        if (IS_ERR(acr->hsbl_unload_blob)) {
 760                                ret = PTR_ERR(acr->hsbl_unload_blob);
 761                                acr->hsbl_unload_blob = NULL;
 762                                return ret;
 763                        }
 764                } else {
 765                        acr->hsbl_unload_blob = acr->hsbl_blob;
 766                }
 767        }
 768
 769        acr->firmware_ok = true;
 770        nvkm_debug(&sb->subdev, "LS blob successfully created\n");
 771
 772        return 0;
 773}
 774
 775/**
 776 * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
 777 *
 778 * Returns the start address to use, or a negative error value.
 779 */
 780static int
 781acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
 782              struct nvkm_gpuobj *blob, u64 offset)
 783{
 784        struct acr_r352 *acr = acr_r352(_acr);
 785        const u32 bl_desc_size = acr->func->hs_bl_desc_size;
 786        const struct hsf_load_header *load_hdr;
 787        struct fw_bin_header *bl_hdr;
 788        struct fw_bl_desc *hsbl_desc;
 789        void *bl, *blob_data, *hsbl_code, *hsbl_data;
 790        u32 code_size;
 791        u8 *bl_desc;
 792
 793        bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
 794        if (!bl_desc)
 795                return -ENOMEM;
 796
 797        /* Find the bootloader descriptor for our blob and copy it */
 798        if (blob == acr->load_blob) {
 799                load_hdr = &acr->load_bl_header;
 800                bl = acr->hsbl_blob;
 801        } else if (blob == acr->unload_blob) {
 802                load_hdr = &acr->unload_bl_header;
 803                bl = acr->hsbl_unload_blob;
 804        } else {
 805                nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
 806                kfree(bl_desc);
 807                return -EINVAL;
 808        }
 809
 810        bl_hdr = bl;
 811        hsbl_desc = bl + bl_hdr->header_offset;
 812        blob_data = bl + bl_hdr->data_offset;
 813        hsbl_code = blob_data + hsbl_desc->code_off;
 814        hsbl_data = blob_data + hsbl_desc->data_off;
 815        code_size = ALIGN(hsbl_desc->code_size, 256);
 816
 817        /*
 818         * Copy HS bootloader data
 819         */
 820        nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
 821
 822        /* Copy HS bootloader code to end of IMEM */
 823        nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
 824                              code_size, hsbl_desc->start_tag, 0, false);
 825
 826        /* Generate the BL header */
 827        acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
 828
 829        /*
 830         * Copy HS BL header where the HS descriptor expects it to be
 831         */
 832        nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
 833                              bl_desc_size, 0);
 834
 835        kfree(bl_desc);
 836        return hsbl_desc->start_tag << 8;
 837}
 838
 839static int
 840acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
 841{
 842        struct nvkm_subdev *subdev = &sb->subdev;
 843        int i;
 844
 845        /* Run the unload blob to unprotect the WPR region */
 846        if (acr->unload_blob && sb->wpr_set) {
 847                int ret;
 848
 849                nvkm_debug(subdev, "running HS unload blob\n");
 850                ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
 851                if (ret < 0)
 852                        return ret;
 853                /*
 854                 * Unload blob will return this error code - it is not an error
 855                 * and the expected behavior on RM as well
 856                 */
 857                if (ret && ret != 0x1d) {
 858                        nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
 859                        return -EINVAL;
 860                }
 861                nvkm_debug(subdev, "HS unload blob completed\n");
 862        }
 863
 864        for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
 865                acr->falcon_state[i] = NON_SECURE;
 866
 867        sb->wpr_set = false;
 868
 869        return 0;
 870}
 871
 872/**
 873 * Check if the WPR region has been indeed set by the ACR firmware, and
 874 * matches where it should be.
 875 */
 876static bool
 877acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
 878{
 879        const struct nvkm_subdev *subdev = &sb->subdev;
 880        const struct nvkm_device *device = subdev->device;
 881        u64 wpr_lo, wpr_hi;
 882        u64 wpr_range_lo, wpr_range_hi;
 883
 884        nvkm_wr32(device, 0x100cd4, 0x2);
 885        wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
 886        wpr_lo <<= 8;
 887        nvkm_wr32(device, 0x100cd4, 0x3);
 888        wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
 889        wpr_hi <<= 8;
 890
 891        if (sb->wpr_size != 0) {
 892                wpr_range_lo = sb->wpr_addr;
 893                wpr_range_hi = wpr_range_lo + sb->wpr_size;
 894        } else {
 895                wpr_range_lo = acr->ls_blob->addr;
 896                wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
 897        }
 898
 899        return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
 900                wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
 901}
 902
 903static int
 904acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
 905{
 906        const struct nvkm_subdev *subdev = &sb->subdev;
 907        unsigned long managed_falcons = acr->base.managed_falcons;
 908        int falcon_id;
 909        int ret;
 910
 911        if (sb->wpr_set)
 912                return 0;
 913
 914        /* Make sure all blobs are ready */
 915        ret = acr_r352_load_blobs(acr, sb);
 916        if (ret)
 917                return ret;
 918
 919        nvkm_debug(subdev, "running HS load blob\n");
 920        ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
 921        /* clear halt interrupt */
 922        nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
 923        sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
 924        if (ret < 0) {
 925                return ret;
 926        } else if (ret > 0) {
 927                nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
 928                return -EINVAL;
 929        }
 930        nvkm_debug(subdev, "HS load blob completed\n");
 931        /* WPR must be set at this point */
 932        if (!sb->wpr_set) {
 933                nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
 934                return -EINVAL;
 935        }
 936
 937        /* Run LS firmwares post_run hooks */
 938        for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
 939                const struct acr_r352_ls_func *func =
 940                                                  acr->func->ls_func[falcon_id];
 941
 942                if (func->post_run) {
 943                        ret = func->post_run(&acr->base, sb);
 944                        if (ret)
 945                                return ret;
 946                }
 947        }
 948
 949        return 0;
 950}
 951
 952/**
 953 * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
 954 *
 955 * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
 956 * disabled. This has the effect of making all managed falcons ready-to-run.
 957 */
 958static int
 959acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
 960                     unsigned long falcon_mask)
 961{
 962        int falcon;
 963        int ret;
 964
 965        /*
 966         * Perform secure boot each time we are called on FECS. Since only FECS
 967         * and GPCCS are managed and started together, this ought to be safe.
 968         */
 969        if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
 970                goto end;
 971
 972        ret = acr_r352_shutdown(acr, sb);
 973        if (ret)
 974                return ret;
 975
 976        ret = acr_r352_bootstrap(acr, sb);
 977        if (ret)
 978                return ret;
 979
 980end:
 981        for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
 982                acr->falcon_state[falcon] = RESET;
 983        }
 984        return 0;
 985}
 986
 987/*
 988 * acr_r352_reset() - execute secure boot from the prepared state
 989 *
 990 * Load the HS bootloader and ask the falcon to run it. This will in turn
 991 * load the HS firmware and run it, so once the falcon stops all the managed
 992 * falcons should have their LS firmware loaded and be ready to run.
 993 */
 994static int
 995acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
 996               unsigned long falcon_mask)
 997{
 998        struct acr_r352 *acr = acr_r352(_acr);
 999        struct nvkm_msgqueue *queue;
1000        int falcon;
1001        bool wpr_already_set = sb->wpr_set;
1002        int ret;
1003
1004        /* Make sure secure boot is performed */
1005        ret = acr_r352_bootstrap(acr, sb);
1006        if (ret)
1007                return ret;
1008
1009        /* No PMU interface? */
1010        if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
1011                /* Redo secure boot entirely if it was already done */
1012                if (wpr_already_set)
1013                        return acr_r352_reset_nopmu(acr, sb, falcon_mask);
1014                /* Else return the result of the initial invokation */
1015                else
1016                        return ret;
1017        }
1018
1019        switch (_acr->boot_falcon) {
1020        case NVKM_SECBOOT_FALCON_PMU:
1021                queue = sb->subdev.device->pmu->queue;
1022                break;
1023        case NVKM_SECBOOT_FALCON_SEC2:
1024                queue = sb->subdev.device->sec2->queue;
1025                break;
1026        default:
1027                return -EINVAL;
1028        }
1029
1030        /* Otherwise just ask the LS firmware to reset the falcon */
1031        for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
1032                nvkm_debug(&sb->subdev, "resetting %s falcon\n",
1033                           nvkm_secboot_falcon_name[falcon]);
1034        ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
1035        if (ret) {
1036                nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
1037                return ret;
1038        }
1039        nvkm_debug(&sb->subdev, "falcon reset done\n");
1040
1041        return 0;
1042}
1043
1044static int
1045acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
1046{
1047        struct acr_r352 *acr = acr_r352(_acr);
1048
1049        return acr_r352_shutdown(acr, sb);
1050}
1051
1052static void
1053acr_r352_dtor(struct nvkm_acr *_acr)
1054{
1055        struct acr_r352 *acr = acr_r352(_acr);
1056
1057        nvkm_gpuobj_del(&acr->unload_blob);
1058
1059        if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
1060                kfree(acr->hsbl_unload_blob);
1061        kfree(acr->hsbl_blob);
1062        nvkm_gpuobj_del(&acr->load_blob);
1063        nvkm_gpuobj_del(&acr->ls_blob);
1064
1065        kfree(acr);
1066}
1067
1068static const struct acr_r352_lsf_func
1069acr_r352_ls_fecs_func_0 = {
1070        .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1071        .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1072};
1073
1074const struct acr_r352_ls_func
1075acr_r352_ls_fecs_func = {
1076        .load = acr_ls_ucode_load_fecs,
1077        .version_max = 0,
1078        .version = {
1079                &acr_r352_ls_fecs_func_0,
1080        }
1081};
1082
1083static const struct acr_r352_lsf_func
1084acr_r352_ls_gpccs_func_0 = {
1085        .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1086        .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1087        /* GPCCS will be loaded using PRI */
1088        .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
1089};
1090
1091const struct acr_r352_ls_func
1092acr_r352_ls_gpccs_func = {
1093        .load = acr_ls_ucode_load_gpccs,
1094        .version_max = 0,
1095        .version = {
1096                &acr_r352_ls_gpccs_func_0,
1097        }
1098};
1099
1100
1101
1102/**
1103 * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
1104 * @dma_idx:            DMA context to be used by BL while loading code/data
1105 * @code_dma_base:      256B-aligned Physical FB Address where code is located
1106 * @total_code_size:    total size of the code part in the ucode
1107 * @code_size_to_load:  size of the code part to load in PMU IMEM.
1108 * @code_entry_point:   entry point in the code.
1109 * @data_dma_base:      Physical FB address where data part of ucode is located
1110 * @data_size:          Total size of the data portion.
1111 * @overlay_dma_base:   Physical Fb address for resident code present in ucode
1112 * @argc:               Total number of args
1113 * @argv:               offset where args are copied into PMU's DMEM.
1114 *
1115 * Structure used by the PMU bootloader to load the rest of the code
1116 */
1117struct acr_r352_pmu_bl_desc {
1118        u32 dma_idx;
1119        u32 code_dma_base;
1120        u32 code_size_total;
1121        u32 code_size_to_load;
1122        u32 code_entry_point;
1123        u32 data_dma_base;
1124        u32 data_size;
1125        u32 overlay_dma_base;
1126        u32 argc;
1127        u32 argv;
1128        u16 code_dma_base1;
1129        u16 data_dma_base1;
1130        u16 overlay_dma_base1;
1131};
1132
1133/**
1134 * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
1135 *
1136 */
1137static void
1138acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
1139                              const struct ls_ucode_img *img, u64 wpr_addr,
1140                              void *_desc)
1141{
1142        const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
1143        const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
1144        struct acr_r352_pmu_bl_desc *desc = _desc;
1145        u64 base;
1146        u64 addr_code;
1147        u64 addr_data;
1148        u32 addr_args;
1149
1150        base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
1151        addr_code = (base + pdesc->app_resident_code_offset) >> 8;
1152        addr_data = (base + pdesc->app_resident_data_offset) >> 8;
1153        addr_args = pmu->falcon->data.limit;
1154        addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
1155
1156        desc->dma_idx = FALCON_DMAIDX_UCODE;
1157        desc->code_dma_base = lower_32_bits(addr_code);
1158        desc->code_dma_base1 = upper_32_bits(addr_code);
1159        desc->code_size_total = pdesc->app_size;
1160        desc->code_size_to_load = pdesc->app_resident_code_size;
1161        desc->code_entry_point = pdesc->app_imem_entry;
1162        desc->data_dma_base = lower_32_bits(addr_data);
1163        desc->data_dma_base1 = upper_32_bits(addr_data);
1164        desc->data_size = pdesc->app_resident_data_size;
1165        desc->overlay_dma_base = lower_32_bits(addr_code);
1166        desc->overlay_dma_base1 = upper_32_bits(addr_code);
1167        desc->argc = 1;
1168        desc->argv = addr_args;
1169}
1170
1171static const struct acr_r352_lsf_func
1172acr_r352_ls_pmu_func_0 = {
1173        .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
1174        .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
1175};
1176
1177static const struct acr_r352_ls_func
1178acr_r352_ls_pmu_func = {
1179        .load = acr_ls_ucode_load_pmu,
1180        .post_run = acr_ls_pmu_post_run,
1181        .version_max = 0,
1182        .version = {
1183                &acr_r352_ls_pmu_func_0,
1184        }
1185};
1186
1187const struct acr_r352_func
1188acr_r352_func = {
1189        .fixup_hs_desc = acr_r352_fixup_hs_desc,
1190        .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
1191        .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1192        .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
1193        .ls_fill_headers = acr_r352_ls_fill_headers,
1194        .ls_write_wpr = acr_r352_ls_write_wpr,
1195        .ls_func = {
1196                [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
1197                [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
1198                [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
1199        },
1200};
1201
1202static const struct nvkm_acr_func
1203acr_r352_base_func = {
1204        .dtor = acr_r352_dtor,
1205        .fini = acr_r352_fini,
1206        .load = acr_r352_load,
1207        .reset = acr_r352_reset,
1208};
1209
1210struct nvkm_acr *
1211acr_r352_new_(const struct acr_r352_func *func,
1212              enum nvkm_secboot_falcon boot_falcon,
1213              unsigned long managed_falcons)
1214{
1215        struct acr_r352 *acr;
1216        int i;
1217
1218        /* Check that all requested falcons are supported */
1219        for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
1220                if (!func->ls_func[i])
1221                        return ERR_PTR(-ENOTSUPP);
1222        }
1223
1224        acr = kzalloc(sizeof(*acr), GFP_KERNEL);
1225        if (!acr)
1226                return ERR_PTR(-ENOMEM);
1227
1228        acr->base.boot_falcon = boot_falcon;
1229        acr->base.managed_falcons = managed_falcons;
1230        acr->base.func = &acr_r352_base_func;
1231        acr->func = func;
1232
1233        return &acr->base;
1234}
1235
1236struct nvkm_acr *
1237acr_r352_new(unsigned long managed_falcons)
1238{
1239        return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
1240                             managed_falcons);
1241}
1242