linux/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include "acr_r352.h"
  24#include "hs_ucode.h"
  25
  26#include <core/gpuobj.h>
  27#include <core/firmware.h>
  28#include <engine/falcon.h>
  29#include <subdev/pmu.h>
  30#include <core/msgqueue.h>
  31#include <engine/sec2.h>
  32
  33/**
  34 * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
  35 * @signature:          16B signature for secure code. 0s if no secure code
  36 * @ctx_dma:            DMA context to be used by BL while loading code/data
  37 * @code_dma_base:      256B-aligned Physical FB Address where code is located
  38 *                      (falcon's $xcbase register)
  39 * @non_sec_code_off:   offset from code_dma_base where the non-secure code is
  40 *                      located. The offset must be multiple of 256 to help perf
  41 * @non_sec_code_size:  the size of the nonSecure code part.
  42 * @sec_code_off:       offset from code_dma_base where the secure code is
  43 *                      located. The offset must be multiple of 256 to help perf
  44 * @sec_code_size:      offset from code_dma_base where the secure code is
  45 *                      located. The offset must be multiple of 256 to help perf
  46 * @code_entry_point:   code entry point which will be invoked by BL after
  47 *                      code is loaded.
  48 * @data_dma_base:      256B aligned Physical FB Address where data is located.
  49 *                      (falcon's $xdbase register)
  50 * @data_size:          size of data block. Should be multiple of 256B
  51 *
  52 * Structure used by the bootloader to load the rest of the code. This has
  53 * to be filled by host and copied into DMEM at offset provided in the
  54 * hsflcn_bl_desc.bl_desc_dmem_load_off.
  55 */
  56struct acr_r352_flcn_bl_desc {
  57        u32 reserved[4];
  58        u32 signature[4];
  59        u32 ctx_dma;
  60        u32 code_dma_base;
  61        u32 non_sec_code_off;
  62        u32 non_sec_code_size;
  63        u32 sec_code_off;
  64        u32 sec_code_size;
  65        u32 code_entry_point;
  66        u32 data_dma_base;
  67        u32 data_size;
  68        u32 code_dma_base1;
  69        u32 data_dma_base1;
  70};
  71
  72/**
  73 * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
  74 */
  75static void
  76acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
  77                               const struct ls_ucode_img *img, u64 wpr_addr,
  78                               void *_desc)
  79{
  80        struct acr_r352_flcn_bl_desc *desc = _desc;
  81        const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
  82        u64 base, addr_code, addr_data;
  83
  84        base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
  85        addr_code = (base + pdesc->app_resident_code_offset) >> 8;
  86        addr_data = (base + pdesc->app_resident_data_offset) >> 8;
  87
  88        desc->ctx_dma = FALCON_DMAIDX_UCODE;
  89        desc->code_dma_base = lower_32_bits(addr_code);
  90        desc->code_dma_base1 = upper_32_bits(addr_code);
  91        desc->non_sec_code_off = pdesc->app_resident_code_offset;
  92        desc->non_sec_code_size = pdesc->app_resident_code_size;
  93        desc->code_entry_point = pdesc->app_imem_entry;
  94        desc->data_dma_base = lower_32_bits(addr_data);
  95        desc->data_dma_base1 = upper_32_bits(addr_data);
  96        desc->data_size = pdesc->app_resident_data_size;
  97}
  98
  99
 100/**
 101 * struct hsflcn_acr_desc - data section of the HS firmware
 102 *
 103 * This header is to be copied at the beginning of DMEM by the HS bootloader.
 104 *
 105 * @signature:          signature of ACR ucode
 106 * @wpr_region_id:      region ID holding the WPR header and its details
 107 * @wpr_offset:         offset from the WPR region holding the wpr header
 108 * @regions:            region descriptors
 109 * @nonwpr_ucode_blob_size:     size of LS blob
 110 * @nonwpr_ucode_blob_start:    FB location of LS blob is
 111 */
 112struct hsflcn_acr_desc {
 113        union {
 114                u8 reserved_dmem[0x200];
 115                u32 signatures[4];
 116        } ucode_reserved_space;
 117        u32 wpr_region_id;
 118        u32 wpr_offset;
 119        u32 mmu_mem_range;
 120#define FLCN_ACR_MAX_REGIONS 2
 121        struct {
 122                u32 no_regions;
 123                struct {
 124                        u32 start_addr;
 125                        u32 end_addr;
 126                        u32 region_id;
 127                        u32 read_mask;
 128                        u32 write_mask;
 129                        u32 client_mask;
 130                } region_props[FLCN_ACR_MAX_REGIONS];
 131        } regions;
 132        u32 ucode_blob_size;
 133        u64 ucode_blob_base __aligned(8);
 134        struct {
 135                u32 vpr_enabled;
 136                u32 vpr_start;
 137                u32 vpr_end;
 138                u32 hdcp_policies;
 139        } vpr_desc;
 140};
 141
 142
 143/*
 144 * Low-secure blob creation
 145 */
 146
 147/**
 148 * struct acr_r352_lsf_lsb_header - LS firmware header
 149 * @signature:          signature to verify the firmware against
 150 * @ucode_off:          offset of the ucode blob in the WPR region. The ucode
 151 *                      blob contains the bootloader, code and data of the
 152 *                      LS falcon
 153 * @ucode_size:         size of the ucode blob, including bootloader
 154 * @data_size:          size of the ucode blob data
 155 * @bl_code_size:       size of the bootloader code
 156 * @bl_imem_off:        offset in imem of the bootloader
 157 * @bl_data_off:        offset of the bootloader data in WPR region
 158 * @bl_data_size:       size of the bootloader data
 159 * @app_code_off:       offset of the app code relative to ucode_off
 160 * @app_code_size:      size of the app code
 161 * @app_data_off:       offset of the app data relative to ucode_off
 162 * @app_data_size:      size of the app data
 163 * @flags:              flags for the secure bootloader
 164 *
 165 * This structure is written into the WPR region for each managed falcon. Each
 166 * instance is referenced by the lsb_offset member of the corresponding
 167 * lsf_wpr_header.
 168 */
 169struct acr_r352_lsf_lsb_header {
 170        /**
 171         * LS falcon signatures
 172         * @prd_keys:           signature to use in production mode
 173         * @dgb_keys:           signature to use in debug mode
 174         * @b_prd_present:      whether the production key is present
 175         * @b_dgb_present:      whether the debug key is present
 176         * @falcon_id:          ID of the falcon the ucode applies to
 177         */
 178        struct {
 179                u8 prd_keys[2][16];
 180                u8 dbg_keys[2][16];
 181                u32 b_prd_present;
 182                u32 b_dbg_present;
 183                u32 falcon_id;
 184        } signature;
 185        u32 ucode_off;
 186        u32 ucode_size;
 187        u32 data_size;
 188        u32 bl_code_size;
 189        u32 bl_imem_off;
 190        u32 bl_data_off;
 191        u32 bl_data_size;
 192        u32 app_code_off;
 193        u32 app_code_size;
 194        u32 app_data_off;
 195        u32 app_data_size;
 196        u32 flags;
 197};
 198
 199/**
 200 * struct acr_r352_lsf_wpr_header - LS blob WPR Header
 201 * @falcon_id:          LS falcon ID
 202 * @lsb_offset:         offset of the lsb_lsf_header in the WPR region
 203 * @bootstrap_owner:    secure falcon reponsible for bootstrapping the LS falcon
 204 * @lazy_bootstrap:     skip bootstrapping by ACR
 205 * @status:             bootstrapping status
 206 *
 207 * An array of these is written at the beginning of the WPR region, one for
 208 * each managed falcon. The array is terminated by an instance which falcon_id
 209 * is LSF_FALCON_ID_INVALID.
 210 */
 211struct acr_r352_lsf_wpr_header {
 212        u32 falcon_id;
 213        u32 lsb_offset;
 214        u32 bootstrap_owner;
 215        u32 lazy_bootstrap;
 216        u32 status;
 217#define LSF_IMAGE_STATUS_NONE                           0
 218#define LSF_IMAGE_STATUS_COPY                           1
 219#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED         2
 220#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED         3
 221#define LSF_IMAGE_STATUS_VALIDATION_DONE                4
 222#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED             5
 223#define LSF_IMAGE_STATUS_BOOTSTRAP_READY                6
 224};
 225
 226/**
 227 * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
 228 */
 229struct ls_ucode_img_r352 {
 230        struct ls_ucode_img base;
 231
 232        struct acr_r352_lsf_wpr_header wpr_header;
 233        struct acr_r352_lsf_lsb_header lsb_header;
 234};
 235#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
 236
 237/**
 238 * ls_ucode_img_load() - create a lsf_ucode_img and load it
 239 */
 240struct ls_ucode_img *
 241acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
 242                           const struct nvkm_secboot *sb,
 243                           enum nvkm_secboot_falcon falcon_id)
 244{
 245        const struct nvkm_subdev *subdev = acr->base.subdev;
 246        struct ls_ucode_img_r352 *img;
 247        int ret;
 248
 249        img = kzalloc(sizeof(*img), GFP_KERNEL);
 250        if (!img)
 251                return ERR_PTR(-ENOMEM);
 252
 253        img->base.falcon_id = falcon_id;
 254
 255        ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
 256
 257        if (ret) {
 258                kfree(img->base.ucode_data);
 259                kfree(img->base.sig);
 260                kfree(img);
 261                return ERR_PTR(ret);
 262        }
 263
 264        /* Check that the signature size matches our expectations... */
 265        if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
 266                nvkm_error(subdev, "invalid signature size for %s falcon!\n",
 267                           nvkm_secboot_falcon_name[falcon_id]);
 268                return ERR_PTR(-EINVAL);
 269        }
 270
 271        /* Copy signature to the right place */
 272        memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
 273
 274        /* not needed? the signature should already have the right value */
 275        img->lsb_header.signature.falcon_id = falcon_id;
 276
 277        return &img->base;
 278}
 279
 280#define LSF_LSB_HEADER_ALIGN 256
 281#define LSF_BL_DATA_ALIGN 256
 282#define LSF_BL_DATA_SIZE_ALIGN 256
 283#define LSF_BL_CODE_SIZE_ALIGN 256
 284#define LSF_UCODE_DATA_ALIGN 4096
 285
 286/**
 287 * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
 288 * @acr:        ACR to use
 289 * @img:        image to generate for
 290 * @offset:     offset in the WPR region where this image starts
 291 *
 292 * Allocate space in the WPR area from offset and write the WPR and LSB headers
 293 * accordingly.
 294 *
 295 * Return: offset at the end of this image.
 296 */
 297static u32
 298acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
 299                             struct ls_ucode_img_r352 *img, u32 offset)
 300{
 301        struct ls_ucode_img *_img = &img->base;
 302        struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
 303        struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
 304        struct ls_ucode_img_desc *desc = &_img->ucode_desc;
 305        const struct acr_r352_ls_func *func =
 306                                            acr->func->ls_func[_img->falcon_id];
 307
 308        /* Fill WPR header */
 309        whdr->falcon_id = _img->falcon_id;
 310        whdr->bootstrap_owner = acr->base.boot_falcon;
 311        whdr->status = LSF_IMAGE_STATUS_COPY;
 312
 313        /* Skip bootstrapping falcons started by someone else than ACR */
 314        if (acr->lazy_bootstrap & BIT(_img->falcon_id))
 315                whdr->lazy_bootstrap = 1;
 316
 317        /* Align, save off, and include an LSB header size */
 318        offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
 319        whdr->lsb_offset = offset;
 320        offset += sizeof(*lhdr);
 321
 322        /*
 323         * Align, save off, and include the original (static) ucode
 324         * image size
 325         */
 326        offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
 327        _img->ucode_off = lhdr->ucode_off = offset;
 328        offset += _img->ucode_size;
 329
 330        /*
 331         * For falcons that use a boot loader (BL), we append a loader
 332         * desc structure on the end of the ucode image and consider
 333         * this the boot loader data. The host will then copy the loader
 334         * desc args to this space within the WPR region (before locking
 335         * down) and the HS bin will then copy them to DMEM 0 for the
 336         * loader.
 337         */
 338        lhdr->bl_code_size = ALIGN(desc->bootloader_size,
 339                                   LSF_BL_CODE_SIZE_ALIGN);
 340        lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
 341                                 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
 342        lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
 343                                lhdr->bl_code_size - lhdr->ucode_size;
 344        /*
 345         * Though the BL is located at 0th offset of the image, the VA
 346         * is different to make sure that it doesn't collide the actual
 347         * OS VA range
 348         */
 349        lhdr->bl_imem_off = desc->bootloader_imem_offset;
 350        lhdr->app_code_off = desc->app_start_offset +
 351                             desc->app_resident_code_offset;
 352        lhdr->app_code_size = desc->app_resident_code_size;
 353        lhdr->app_data_off = desc->app_start_offset +
 354                             desc->app_resident_data_offset;
 355        lhdr->app_data_size = desc->app_resident_data_size;
 356
 357        lhdr->flags = func->lhdr_flags;
 358        if (_img->falcon_id == acr->base.boot_falcon)
 359                lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
 360
 361        /* Align and save off BL descriptor size */
 362        lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
 363
 364        /*
 365         * Align, save off, and include the additional BL data
 366         */
 367        offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
 368        lhdr->bl_data_off = offset;
 369        offset += lhdr->bl_data_size;
 370
 371        return offset;
 372}
 373
 374/**
 375 * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
 376 */
 377int
 378acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
 379{
 380        struct ls_ucode_img_r352 *img;
 381        struct list_head *l;
 382        u32 count = 0;
 383        u32 offset;
 384
 385        /* Count the number of images to manage */
 386        list_for_each(l, imgs)
 387                count++;
 388
 389        /*
 390         * Start with an array of WPR headers at the base of the WPR.
 391         * The expectation here is that the secure falcon will do a single DMA
 392         * read of this array and cache it internally so it's ok to pack these.
 393         * Also, we add 1 to the falcon count to indicate the end of the array.
 394         */
 395        offset = sizeof(img->wpr_header) * (count + 1);
 396
 397        /*
 398         * Walk the managed falcons, accounting for the LSB structs
 399         * as well as the ucode images.
 400         */
 401        list_for_each_entry(img, imgs, base.node) {
 402                offset = acr_r352_ls_img_fill_headers(acr, img, offset);
 403        }
 404
 405        return offset;
 406}
 407
 408/**
 409 * acr_r352_ls_write_wpr - write the WPR blob contents
 410 */
 411int
 412acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 413                      struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
 414{
 415        struct ls_ucode_img *_img;
 416        u32 pos = 0;
 417
 418        nvkm_kmap(wpr_blob);
 419
 420        list_for_each_entry(_img, imgs, node) {
 421                struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
 422                const struct acr_r352_ls_func *ls_func =
 423                                            acr->func->ls_func[_img->falcon_id];
 424                u8 gdesc[ls_func->bl_desc_size];
 425
 426                nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
 427                                      sizeof(img->wpr_header));
 428
 429                nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
 430                                     &img->lsb_header, sizeof(img->lsb_header));
 431
 432                /* Generate and write BL descriptor */
 433                memset(gdesc, 0, ls_func->bl_desc_size);
 434                ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
 435
 436                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
 437                                      gdesc, ls_func->bl_desc_size);
 438
 439                /* Copy ucode */
 440                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
 441                                      _img->ucode_data, _img->ucode_size);
 442
 443                pos += sizeof(img->wpr_header);
 444        }
 445
 446        nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
 447
 448        nvkm_done(wpr_blob);
 449
 450        return 0;
 451}
 452
 453/* Both size and address of WPR need to be 256K-aligned */
 454#define WPR_ALIGNMENT   0x40000
 455/**
 456 * acr_r352_prepare_ls_blob() - prepare the LS blob
 457 *
 458 * For each securely managed falcon, load the FW, signatures and bootloaders and
 459 * prepare a ucode blob. Then, compute the offsets in the WPR region for each
 460 * blob, and finally write the headers and ucode blobs into a GPU object that
 461 * will be copied into the WPR region by the HS firmware.
 462 */
 463static int
 464acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
 465{
 466        const struct nvkm_subdev *subdev = acr->base.subdev;
 467        struct list_head imgs;
 468        struct ls_ucode_img *img, *t;
 469        unsigned long managed_falcons = acr->base.managed_falcons;
 470        u64 wpr_addr = sb->wpr_addr;
 471        u32 wpr_size = sb->wpr_size;
 472        int managed_count = 0;
 473        u32 image_wpr_size, ls_blob_size;
 474        int falcon_id;
 475        int ret;
 476
 477        INIT_LIST_HEAD(&imgs);
 478
 479        /* Load all LS blobs */
 480        for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
 481                struct ls_ucode_img *img;
 482
 483                img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
 484                if (IS_ERR(img)) {
 485                        if (acr->base.optional_falcons & BIT(falcon_id)) {
 486                                managed_falcons &= ~BIT(falcon_id);
 487                                nvkm_info(subdev, "skipping %s falcon...\n",
 488                                          nvkm_secboot_falcon_name[falcon_id]);
 489                                continue;
 490                        }
 491                        ret = PTR_ERR(img);
 492                        goto cleanup;
 493                }
 494
 495                list_add_tail(&img->node, &imgs);
 496                managed_count++;
 497        }
 498
 499        /* Commit the actual list of falcons we will manage from now on */
 500        acr->base.managed_falcons = managed_falcons;
 501
 502        /*
 503         * If the boot falcon has a firmare, let it manage the bootstrap of other
 504         * falcons.
 505         */
 506        if (acr->func->ls_func[acr->base.boot_falcon] &&
 507            (managed_falcons & BIT(acr->base.boot_falcon))) {
 508                for_each_set_bit(falcon_id, &managed_falcons,
 509                                 NVKM_SECBOOT_FALCON_END) {
 510                        if (falcon_id == acr->base.boot_falcon)
 511                                continue;
 512
 513                        acr->lazy_bootstrap |= BIT(falcon_id);
 514                }
 515        }
 516
 517        /*
 518         * Fill the WPR and LSF headers with the right offsets and compute
 519         * required WPR size
 520         */
 521        image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
 522        image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
 523
 524        ls_blob_size = image_wpr_size;
 525
 526        /*
 527         * If we need a shadow area, allocate twice the size and use the
 528         * upper half as WPR
 529         */
 530        if (wpr_size == 0 && acr->func->shadow_blob)
 531                ls_blob_size *= 2;
 532
 533        /* Allocate GPU object that will contain the WPR region */
 534        ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
 535                              false, NULL, &acr->ls_blob);
 536        if (ret)
 537                goto cleanup;
 538
 539        nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
 540                    managed_count, image_wpr_size);
 541
 542        /* If WPR address and size are not fixed, set them to fit the LS blob */
 543        if (wpr_size == 0) {
 544                wpr_addr = acr->ls_blob->addr;
 545                if (acr->func->shadow_blob)
 546                        wpr_addr += acr->ls_blob->size / 2;
 547
 548                wpr_size = image_wpr_size;
 549        /*
 550         * But if the WPR region is set by the bootloader, it is illegal for
 551         * the HS blob to be larger than this region.
 552         */
 553        } else if (image_wpr_size > wpr_size) {
 554                nvkm_error(subdev, "WPR region too small for FW blob!\n");
 555                nvkm_error(subdev, "required: %dB\n", image_wpr_size);
 556                nvkm_error(subdev, "available: %dB\n", wpr_size);
 557                ret = -ENOSPC;
 558                goto cleanup;
 559        }
 560
 561        /* Write LS blob */
 562        ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
 563        if (ret)
 564                nvkm_gpuobj_del(&acr->ls_blob);
 565
 566cleanup:
 567        list_for_each_entry_safe(img, t, &imgs, node) {
 568                kfree(img->ucode_data);
 569                kfree(img->sig);
 570                kfree(img);
 571        }
 572
 573        return ret;
 574}
 575
 576
 577
 578
 579void
 580acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
 581                       void *_desc)
 582{
 583        struct hsflcn_acr_desc *desc = _desc;
 584        struct nvkm_gpuobj *ls_blob = acr->ls_blob;
 585
 586        /* WPR region information if WPR is not fixed */
 587        if (sb->wpr_size == 0) {
 588                u64 wpr_start = ls_blob->addr;
 589                u64 wpr_end = wpr_start + ls_blob->size;
 590
 591                desc->wpr_region_id = 1;
 592                desc->regions.no_regions = 2;
 593                desc->regions.region_props[0].start_addr = wpr_start >> 8;
 594                desc->regions.region_props[0].end_addr = wpr_end >> 8;
 595                desc->regions.region_props[0].region_id = 1;
 596                desc->regions.region_props[0].read_mask = 0xf;
 597                desc->regions.region_props[0].write_mask = 0xc;
 598                desc->regions.region_props[0].client_mask = 0x2;
 599        } else {
 600                desc->ucode_blob_base = ls_blob->addr;
 601                desc->ucode_blob_size = ls_blob->size;
 602        }
 603}
 604
 605static void
 606acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
 607                             u64 offset)
 608{
 609        struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
 610        u64 addr_code, addr_data;
 611
 612        addr_code = offset >> 8;
 613        addr_data = (offset + hdr->data_dma_base) >> 8;
 614
 615        bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
 616        bl_desc->code_dma_base = lower_32_bits(addr_code);
 617        bl_desc->non_sec_code_off = hdr->non_sec_code_off;
 618        bl_desc->non_sec_code_size = hdr->non_sec_code_size;
 619        bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
 620        bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
 621        bl_desc->code_entry_point = 0;
 622        bl_desc->data_dma_base = lower_32_bits(addr_data);
 623        bl_desc->data_size = hdr->data_size;
 624}
 625
 626/**
 627 * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
 628 *
 629 * @sb secure boot instance to prepare for
 630 * @fw name of the HS firmware to load
 631 * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
 632 * @bl_desc pointer to the BL descriptor to write for this firmware
 633 * @patch whether we should patch the HS descriptor (only for HS loaders)
 634 */
 635static int
 636acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
 637                         const char *fw, struct nvkm_gpuobj **blob,
 638                         struct hsf_load_header *load_header, bool patch)
 639{
 640        struct nvkm_subdev *subdev = &sb->subdev;
 641        void *acr_image;
 642        struct fw_bin_header *hsbin_hdr;
 643        struct hsf_fw_header *fw_hdr;
 644        struct hsf_load_header *load_hdr;
 645        void *acr_data;
 646        int ret;
 647
 648        acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
 649        if (IS_ERR(acr_image))
 650                return PTR_ERR(acr_image);
 651
 652        hsbin_hdr = acr_image;
 653        fw_hdr = acr_image + hsbin_hdr->header_offset;
 654        load_hdr = acr_image + fw_hdr->hdr_offset;
 655        acr_data = acr_image + hsbin_hdr->data_offset;
 656
 657        /* Patch descriptor with WPR information? */
 658        if (patch) {
 659                struct hsflcn_acr_desc *desc;
 660
 661                desc = acr_data + load_hdr->data_dma_base;
 662                acr->func->fixup_hs_desc(acr, sb, desc);
 663        }
 664
 665        if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
 666                nvkm_error(subdev, "more apps (%d) than supported (%d)!",
 667                           load_hdr->num_apps, ACR_R352_MAX_APPS);
 668                ret = -EINVAL;
 669                goto cleanup;
 670        }
 671        memcpy(load_header, load_hdr, sizeof(*load_header) +
 672                          (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
 673
 674        /* Create ACR blob and copy HS data to it */
 675        ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
 676                              0x1000, false, NULL, blob);
 677        if (ret)
 678                goto cleanup;
 679
 680        nvkm_kmap(*blob);
 681        nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
 682        nvkm_done(*blob);
 683
 684cleanup:
 685        kfree(acr_image);
 686
 687        return ret;
 688}
 689
 690/**
 691 * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
 692 *
 693 * This includes the LS blob, HS ucode loading blob, and HS bootloader.
 694 *
 695 * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
 696 */
 697int
 698acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
 699{
 700        struct nvkm_subdev *subdev = &sb->subdev;
 701        int ret;
 702
 703        /* Firmware already loaded? */
 704        if (acr->firmware_ok)
 705                return 0;
 706
 707        /* Load and prepare the managed falcon's firmwares */
 708        ret = acr_r352_prepare_ls_blob(acr, sb);
 709        if (ret)
 710                return ret;
 711
 712        /* Load the HS firmware that will load the LS firmwares */
 713        if (!acr->load_blob) {
 714                ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
 715                                               &acr->load_blob,
 716                                               &acr->load_bl_header, true);
 717                if (ret)
 718                        return ret;
 719        }
 720
 721        /* If the ACR region is dynamically programmed, we need an unload FW */
 722        if (sb->wpr_size == 0) {
 723                ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
 724                                               &acr->unload_blob,
 725                                               &acr->unload_bl_header, false);
 726                if (ret)
 727                        return ret;
 728        }
 729
 730        /* Load the HS firmware bootloader */
 731        if (!acr->hsbl_blob) {
 732                acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
 733                if (IS_ERR(acr->hsbl_blob)) {
 734                        ret = PTR_ERR(acr->hsbl_blob);
 735                        acr->hsbl_blob = NULL;
 736                        return ret;
 737                }
 738
 739                if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
 740                        acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
 741                                                            "acr/unload_bl", 0);
 742                        if (IS_ERR(acr->hsbl_unload_blob)) {
 743                                ret = PTR_ERR(acr->hsbl_unload_blob);
 744                                acr->hsbl_unload_blob = NULL;
 745                                return ret;
 746                        }
 747                } else {
 748                        acr->hsbl_unload_blob = acr->hsbl_blob;
 749                }
 750        }
 751
 752        acr->firmware_ok = true;
 753        nvkm_debug(&sb->subdev, "LS blob successfully created\n");
 754
 755        return 0;
 756}
 757
 758/**
 759 * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
 760 *
 761 * Returns the start address to use, or a negative error value.
 762 */
 763static int
 764acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
 765              struct nvkm_gpuobj *blob, u64 offset)
 766{
 767        struct acr_r352 *acr = acr_r352(_acr);
 768        const u32 bl_desc_size = acr->func->hs_bl_desc_size;
 769        const struct hsf_load_header *load_hdr;
 770        struct fw_bin_header *bl_hdr;
 771        struct fw_bl_desc *hsbl_desc;
 772        void *bl, *blob_data, *hsbl_code, *hsbl_data;
 773        u32 code_size;
 774        u8 bl_desc[bl_desc_size];
 775
 776        /* Find the bootloader descriptor for our blob and copy it */
 777        if (blob == acr->load_blob) {
 778                load_hdr = &acr->load_bl_header;
 779                bl = acr->hsbl_blob;
 780        } else if (blob == acr->unload_blob) {
 781                load_hdr = &acr->unload_bl_header;
 782                bl = acr->hsbl_unload_blob;
 783        } else {
 784                nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
 785                return -EINVAL;
 786        }
 787
 788        bl_hdr = bl;
 789        hsbl_desc = bl + bl_hdr->header_offset;
 790        blob_data = bl + bl_hdr->data_offset;
 791        hsbl_code = blob_data + hsbl_desc->code_off;
 792        hsbl_data = blob_data + hsbl_desc->data_off;
 793        code_size = ALIGN(hsbl_desc->code_size, 256);
 794
 795        /*
 796         * Copy HS bootloader data
 797         */
 798        nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
 799
 800        /* Copy HS bootloader code to end of IMEM */
 801        nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
 802                              code_size, hsbl_desc->start_tag, 0, false);
 803
 804        /* Generate the BL header */
 805        memset(bl_desc, 0, bl_desc_size);
 806        acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
 807
 808        /*
 809         * Copy HS BL header where the HS descriptor expects it to be
 810         */
 811        nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
 812                              bl_desc_size, 0);
 813
 814        return hsbl_desc->start_tag << 8;
 815}
 816
 817static int
 818acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
 819{
 820        struct nvkm_subdev *subdev = &sb->subdev;
 821        int i;
 822
 823        /* Run the unload blob to unprotect the WPR region */
 824        if (acr->unload_blob && sb->wpr_set) {
 825                int ret;
 826
 827                nvkm_debug(subdev, "running HS unload blob\n");
 828                ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
 829                if (ret < 0)
 830                        return ret;
 831                /*
 832                 * Unload blob will return this error code - it is not an error
 833                 * and the expected behavior on RM as well
 834                 */
 835                if (ret && ret != 0x1d) {
 836                        nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
 837                        return -EINVAL;
 838                }
 839                nvkm_debug(subdev, "HS unload blob completed\n");
 840        }
 841
 842        for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
 843                acr->falcon_state[i] = NON_SECURE;
 844
 845        sb->wpr_set = false;
 846
 847        return 0;
 848}
 849
 850/**
 851 * Check if the WPR region has been indeed set by the ACR firmware, and
 852 * matches where it should be.
 853 */
 854static bool
 855acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
 856{
 857        const struct nvkm_subdev *subdev = &sb->subdev;
 858        const struct nvkm_device *device = subdev->device;
 859        u64 wpr_lo, wpr_hi;
 860        u64 wpr_range_lo, wpr_range_hi;
 861
 862        nvkm_wr32(device, 0x100cd4, 0x2);
 863        wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
 864        wpr_lo <<= 8;
 865        nvkm_wr32(device, 0x100cd4, 0x3);
 866        wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
 867        wpr_hi <<= 8;
 868
 869        if (sb->wpr_size != 0) {
 870                wpr_range_lo = sb->wpr_addr;
 871                wpr_range_hi = wpr_range_lo + sb->wpr_size;
 872        } else {
 873                wpr_range_lo = acr->ls_blob->addr;
 874                wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
 875        }
 876
 877        return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
 878                wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
 879}
 880
 881static int
 882acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
 883{
 884        const struct nvkm_subdev *subdev = &sb->subdev;
 885        unsigned long managed_falcons = acr->base.managed_falcons;
 886        int falcon_id;
 887        int ret;
 888
 889        if (sb->wpr_set)
 890                return 0;
 891
 892        /* Make sure all blobs are ready */
 893        ret = acr_r352_load_blobs(acr, sb);
 894        if (ret)
 895                return ret;
 896
 897        nvkm_debug(subdev, "running HS load blob\n");
 898        ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
 899        /* clear halt interrupt */
 900        nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
 901        sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
 902        if (ret < 0) {
 903                return ret;
 904        } else if (ret > 0) {
 905                nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
 906                return -EINVAL;
 907        }
 908        nvkm_debug(subdev, "HS load blob completed\n");
 909        /* WPR must be set at this point */
 910        if (!sb->wpr_set) {
 911                nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
 912                return -EINVAL;
 913        }
 914
 915        /* Run LS firmwares post_run hooks */
 916        for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
 917                const struct acr_r352_ls_func *func =
 918                                                  acr->func->ls_func[falcon_id];
 919
 920                if (func->post_run) {
 921                        ret = func->post_run(&acr->base, sb);
 922                        if (ret)
 923                                return ret;
 924                }
 925        }
 926
 927        return 0;
 928}
 929
 930/**
 931 * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
 932 *
 933 * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
 934 * disabled. This has the effect of making all managed falcons ready-to-run.
 935 */
 936static int
 937acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
 938                     unsigned long falcon_mask)
 939{
 940        int falcon;
 941        int ret;
 942
 943        /*
 944         * Perform secure boot each time we are called on FECS. Since only FECS
 945         * and GPCCS are managed and started together, this ought to be safe.
 946         */
 947        if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
 948                goto end;
 949
 950        ret = acr_r352_shutdown(acr, sb);
 951        if (ret)
 952                return ret;
 953
 954        ret = acr_r352_bootstrap(acr, sb);
 955        if (ret)
 956                return ret;
 957
 958end:
 959        for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
 960                acr->falcon_state[falcon] = RESET;
 961        }
 962        return 0;
 963}
 964
 965/*
 966 * acr_r352_reset() - execute secure boot from the prepared state
 967 *
 968 * Load the HS bootloader and ask the falcon to run it. This will in turn
 969 * load the HS firmware and run it, so once the falcon stops all the managed
 970 * falcons should have their LS firmware loaded and be ready to run.
 971 */
 972static int
 973acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
 974               unsigned long falcon_mask)
 975{
 976        struct acr_r352 *acr = acr_r352(_acr);
 977        struct nvkm_msgqueue *queue;
 978        int falcon;
 979        bool wpr_already_set = sb->wpr_set;
 980        int ret;
 981
 982        /* Make sure secure boot is performed */
 983        ret = acr_r352_bootstrap(acr, sb);
 984        if (ret)
 985                return ret;
 986
 987        /* No PMU interface? */
 988        if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
 989                /* Redo secure boot entirely if it was already done */
 990                if (wpr_already_set)
 991                        return acr_r352_reset_nopmu(acr, sb, falcon_mask);
 992                /* Else return the result of the initial invokation */
 993                else
 994                        return ret;
 995        }
 996
 997        switch (_acr->boot_falcon) {
 998        case NVKM_SECBOOT_FALCON_PMU:
 999                queue = sb->subdev.device->pmu->queue;
1000                break;
1001        case NVKM_SECBOOT_FALCON_SEC2:
1002                queue = sb->subdev.device->sec2->queue;
1003                break;
1004        default:
1005                return -EINVAL;
1006        }
1007
1008        /* Otherwise just ask the LS firmware to reset the falcon */
1009        for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
1010                nvkm_debug(&sb->subdev, "resetting %s falcon\n",
1011                           nvkm_secboot_falcon_name[falcon]);
1012        ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
1013        if (ret) {
1014                nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
1015                return ret;
1016        }
1017        nvkm_debug(&sb->subdev, "falcon reset done\n");
1018
1019        return 0;
1020}
1021
1022static int
1023acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
1024{
1025        struct acr_r352 *acr = acr_r352(_acr);
1026
1027        return acr_r352_shutdown(acr, sb);
1028}
1029
1030static void
1031acr_r352_dtor(struct nvkm_acr *_acr)
1032{
1033        struct acr_r352 *acr = acr_r352(_acr);
1034
1035        nvkm_gpuobj_del(&acr->unload_blob);
1036
1037        if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
1038                kfree(acr->hsbl_unload_blob);
1039        kfree(acr->hsbl_blob);
1040        nvkm_gpuobj_del(&acr->load_blob);
1041        nvkm_gpuobj_del(&acr->ls_blob);
1042
1043        kfree(acr);
1044}
1045
1046const struct acr_r352_ls_func
1047acr_r352_ls_fecs_func = {
1048        .load = acr_ls_ucode_load_fecs,
1049        .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1050        .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1051};
1052
1053const struct acr_r352_ls_func
1054acr_r352_ls_gpccs_func = {
1055        .load = acr_ls_ucode_load_gpccs,
1056        .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1057        .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1058        /* GPCCS will be loaded using PRI */
1059        .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
1060};
1061
1062
1063
1064/**
1065 * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
1066 * @dma_idx:            DMA context to be used by BL while loading code/data
1067 * @code_dma_base:      256B-aligned Physical FB Address where code is located
1068 * @total_code_size:    total size of the code part in the ucode
1069 * @code_size_to_load:  size of the code part to load in PMU IMEM.
1070 * @code_entry_point:   entry point in the code.
1071 * @data_dma_base:      Physical FB address where data part of ucode is located
1072 * @data_size:          Total size of the data portion.
1073 * @overlay_dma_base:   Physical Fb address for resident code present in ucode
1074 * @argc:               Total number of args
1075 * @argv:               offset where args are copied into PMU's DMEM.
1076 *
1077 * Structure used by the PMU bootloader to load the rest of the code
1078 */
1079struct acr_r352_pmu_bl_desc {
1080        u32 dma_idx;
1081        u32 code_dma_base;
1082        u32 code_size_total;
1083        u32 code_size_to_load;
1084        u32 code_entry_point;
1085        u32 data_dma_base;
1086        u32 data_size;
1087        u32 overlay_dma_base;
1088        u32 argc;
1089        u32 argv;
1090        u16 code_dma_base1;
1091        u16 data_dma_base1;
1092        u16 overlay_dma_base1;
1093};
1094
1095/**
1096 * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
1097 *
1098 */
1099static void
1100acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
1101                              const struct ls_ucode_img *img, u64 wpr_addr,
1102                              void *_desc)
1103{
1104        const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
1105        const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
1106        struct acr_r352_pmu_bl_desc *desc = _desc;
1107        u64 base;
1108        u64 addr_code;
1109        u64 addr_data;
1110        u32 addr_args;
1111
1112        base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
1113        addr_code = (base + pdesc->app_resident_code_offset) >> 8;
1114        addr_data = (base + pdesc->app_resident_data_offset) >> 8;
1115        addr_args = pmu->falcon->data.limit;
1116        addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
1117
1118        desc->dma_idx = FALCON_DMAIDX_UCODE;
1119        desc->code_dma_base = lower_32_bits(addr_code);
1120        desc->code_dma_base1 = upper_32_bits(addr_code);
1121        desc->code_size_total = pdesc->app_size;
1122        desc->code_size_to_load = pdesc->app_resident_code_size;
1123        desc->code_entry_point = pdesc->app_imem_entry;
1124        desc->data_dma_base = lower_32_bits(addr_data);
1125        desc->data_dma_base1 = upper_32_bits(addr_data);
1126        desc->data_size = pdesc->app_resident_data_size;
1127        desc->overlay_dma_base = lower_32_bits(addr_code);
1128        desc->overlay_dma_base1 = upper_32_bits(addr_code);
1129        desc->argc = 1;
1130        desc->argv = addr_args;
1131}
1132
1133static const struct acr_r352_ls_func
1134acr_r352_ls_pmu_func = {
1135        .load = acr_ls_ucode_load_pmu,
1136        .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
1137        .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
1138        .post_run = acr_ls_pmu_post_run,
1139};
1140
1141const struct acr_r352_func
1142acr_r352_func = {
1143        .fixup_hs_desc = acr_r352_fixup_hs_desc,
1144        .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
1145        .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1146        .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
1147        .ls_fill_headers = acr_r352_ls_fill_headers,
1148        .ls_write_wpr = acr_r352_ls_write_wpr,
1149        .ls_func = {
1150                [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
1151                [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
1152                [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
1153        },
1154};
1155
1156static const struct nvkm_acr_func
1157acr_r352_base_func = {
1158        .dtor = acr_r352_dtor,
1159        .fini = acr_r352_fini,
1160        .load = acr_r352_load,
1161        .reset = acr_r352_reset,
1162};
1163
1164struct nvkm_acr *
1165acr_r352_new_(const struct acr_r352_func *func,
1166              enum nvkm_secboot_falcon boot_falcon,
1167              unsigned long managed_falcons)
1168{
1169        struct acr_r352 *acr;
1170        int i;
1171
1172        /* Check that all requested falcons are supported */
1173        for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
1174                if (!func->ls_func[i])
1175                        return ERR_PTR(-ENOTSUPP);
1176        }
1177
1178        acr = kzalloc(sizeof(*acr), GFP_KERNEL);
1179        if (!acr)
1180                return ERR_PTR(-ENOMEM);
1181
1182        acr->base.boot_falcon = boot_falcon;
1183        acr->base.managed_falcons = managed_falcons;
1184        acr->base.func = &acr_r352_base_func;
1185        acr->func = func;
1186
1187        return &acr->base;
1188}
1189
1190struct nvkm_acr *
1191acr_r352_new(unsigned long managed_falcons)
1192{
1193        return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
1194                             managed_falcons);
1195}
1196