linux/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include "acr_r367.h"
  24#include "acr_r361.h"
  25
  26#include <core/gpuobj.h>
  27
  28/*
  29 * r367 ACR: new LS signature format requires a rewrite of LS firmware and
  30 * blob creation functions. Also the hsflcn_desc layout has changed slightly.
  31 */
  32
  33#define LSF_LSB_DEPMAP_SIZE 11
  34
  35/**
  36 * struct acr_r367_lsf_lsb_header - LS firmware header
  37 *
  38 * See also struct acr_r352_lsf_lsb_header for documentation.
  39 */
  40struct acr_r367_lsf_lsb_header {
  41        /**
  42         * LS falcon signatures
  43         * @prd_keys:           signature to use in production mode
  44         * @dgb_keys:           signature to use in debug mode
  45         * @b_prd_present:      whether the production key is present
  46         * @b_dgb_present:      whether the debug key is present
  47         * @falcon_id:          ID of the falcon the ucode applies to
  48         */
  49        struct {
  50                u8 prd_keys[2][16];
  51                u8 dbg_keys[2][16];
  52                u32 b_prd_present;
  53                u32 b_dbg_present;
  54                u32 falcon_id;
  55                u32 supports_versioning;
  56                u32 version;
  57                u32 depmap_count;
  58                u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
  59                u8 kdf[16];
  60        } signature;
  61        u32 ucode_off;
  62        u32 ucode_size;
  63        u32 data_size;
  64        u32 bl_code_size;
  65        u32 bl_imem_off;
  66        u32 bl_data_off;
  67        u32 bl_data_size;
  68        u32 app_code_off;
  69        u32 app_code_size;
  70        u32 app_data_off;
  71        u32 app_data_size;
  72        u32 flags;
  73};
  74
  75/**
  76 * struct acr_r367_lsf_wpr_header - LS blob WPR Header
  77 *
  78 * See also struct acr_r352_lsf_wpr_header for documentation.
  79 */
  80struct acr_r367_lsf_wpr_header {
  81        u32 falcon_id;
  82        u32 lsb_offset;
  83        u32 bootstrap_owner;
  84        u32 lazy_bootstrap;
  85        u32 bin_version;
  86        u32 status;
  87#define LSF_IMAGE_STATUS_NONE                           0
  88#define LSF_IMAGE_STATUS_COPY                           1
  89#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED         2
  90#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED         3
  91#define LSF_IMAGE_STATUS_VALIDATION_DONE                4
  92#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED             5
  93#define LSF_IMAGE_STATUS_BOOTSTRAP_READY                6
  94#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED                7
  95};
  96
  97/**
  98 * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
  99 */
 100struct ls_ucode_img_r367 {
 101        struct ls_ucode_img base;
 102
 103        struct acr_r367_lsf_wpr_header wpr_header;
 104        struct acr_r367_lsf_lsb_header lsb_header;
 105};
 106#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
 107
 108struct ls_ucode_img *
 109acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
 110                           const struct nvkm_secboot *sb,
 111                           enum nvkm_secboot_falcon falcon_id)
 112{
 113        const struct nvkm_subdev *subdev = acr->base.subdev;
 114        struct ls_ucode_img_r367 *img;
 115        int ret;
 116
 117        img = kzalloc(sizeof(*img), GFP_KERNEL);
 118        if (!img)
 119                return ERR_PTR(-ENOMEM);
 120
 121        img->base.falcon_id = falcon_id;
 122
 123        ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
 124        if (ret) {
 125                kfree(img->base.ucode_data);
 126                kfree(img->base.sig);
 127                kfree(img);
 128                return ERR_PTR(ret);
 129        }
 130
 131        /* Check that the signature size matches our expectations... */
 132        if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
 133                nvkm_error(subdev, "invalid signature size for %s falcon!\n",
 134                           nvkm_secboot_falcon_name[falcon_id]);
 135                return ERR_PTR(-EINVAL);
 136        }
 137
 138        /* Copy signature to the right place */
 139        memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
 140
 141        /* not needed? the signature should already have the right value */
 142        img->lsb_header.signature.falcon_id = falcon_id;
 143
 144        return &img->base;
 145}
 146
 147#define LSF_LSB_HEADER_ALIGN 256
 148#define LSF_BL_DATA_ALIGN 256
 149#define LSF_BL_DATA_SIZE_ALIGN 256
 150#define LSF_BL_CODE_SIZE_ALIGN 256
 151#define LSF_UCODE_DATA_ALIGN 4096
 152
 153static u32
 154acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
 155                             struct ls_ucode_img_r367 *img, u32 offset)
 156{
 157        struct ls_ucode_img *_img = &img->base;
 158        struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
 159        struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
 160        struct ls_ucode_img_desc *desc = &_img->ucode_desc;
 161        const struct acr_r352_ls_func *func =
 162                                            acr->func->ls_func[_img->falcon_id];
 163
 164        /* Fill WPR header */
 165        whdr->falcon_id = _img->falcon_id;
 166        whdr->bootstrap_owner = acr->base.boot_falcon;
 167        whdr->bin_version = lhdr->signature.version;
 168        whdr->status = LSF_IMAGE_STATUS_COPY;
 169
 170        /* Skip bootstrapping falcons started by someone else than ACR */
 171        if (acr->lazy_bootstrap & BIT(_img->falcon_id))
 172                whdr->lazy_bootstrap = 1;
 173
 174        /* Align, save off, and include an LSB header size */
 175        offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
 176        whdr->lsb_offset = offset;
 177        offset += sizeof(*lhdr);
 178
 179        /*
 180         * Align, save off, and include the original (static) ucode
 181         * image size
 182         */
 183        offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
 184        _img->ucode_off = lhdr->ucode_off = offset;
 185        offset += _img->ucode_size;
 186
 187        /*
 188         * For falcons that use a boot loader (BL), we append a loader
 189         * desc structure on the end of the ucode image and consider
 190         * this the boot loader data. The host will then copy the loader
 191         * desc args to this space within the WPR region (before locking
 192         * down) and the HS bin will then copy them to DMEM 0 for the
 193         * loader.
 194         */
 195        lhdr->bl_code_size = ALIGN(desc->bootloader_size,
 196                                   LSF_BL_CODE_SIZE_ALIGN);
 197        lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
 198                                 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
 199        lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
 200                                lhdr->bl_code_size - lhdr->ucode_size;
 201        /*
 202         * Though the BL is located at 0th offset of the image, the VA
 203         * is different to make sure that it doesn't collide the actual
 204         * OS VA range
 205         */
 206        lhdr->bl_imem_off = desc->bootloader_imem_offset;
 207        lhdr->app_code_off = desc->app_start_offset +
 208                             desc->app_resident_code_offset;
 209        lhdr->app_code_size = desc->app_resident_code_size;
 210        lhdr->app_data_off = desc->app_start_offset +
 211                             desc->app_resident_data_offset;
 212        lhdr->app_data_size = desc->app_resident_data_size;
 213
 214        lhdr->flags = func->lhdr_flags;
 215        if (_img->falcon_id == acr->base.boot_falcon)
 216                lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
 217
 218        /* Align and save off BL descriptor size */
 219        lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
 220
 221        /*
 222         * Align, save off, and include the additional BL data
 223         */
 224        offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
 225        lhdr->bl_data_off = offset;
 226        offset += lhdr->bl_data_size;
 227
 228        return offset;
 229}
 230
 231int
 232acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
 233{
 234        struct ls_ucode_img_r367 *img;
 235        struct list_head *l;
 236        u32 count = 0;
 237        u32 offset;
 238
 239        /* Count the number of images to manage */
 240        list_for_each(l, imgs)
 241                count++;
 242
 243        /*
 244         * Start with an array of WPR headers at the base of the WPR.
 245         * The expectation here is that the secure falcon will do a single DMA
 246         * read of this array and cache it internally so it's ok to pack these.
 247         * Also, we add 1 to the falcon count to indicate the end of the array.
 248         */
 249        offset = sizeof(img->wpr_header) * (count + 1);
 250
 251        /*
 252         * Walk the managed falcons, accounting for the LSB structs
 253         * as well as the ucode images.
 254         */
 255        list_for_each_entry(img, imgs, base.node) {
 256                offset = acr_r367_ls_img_fill_headers(acr, img, offset);
 257        }
 258
 259        return offset;
 260}
 261
 262int
 263acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 264                      struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
 265{
 266        struct ls_ucode_img *_img;
 267        u32 pos = 0;
 268
 269        nvkm_kmap(wpr_blob);
 270
 271        list_for_each_entry(_img, imgs, node) {
 272                struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
 273                const struct acr_r352_ls_func *ls_func =
 274                                            acr->func->ls_func[_img->falcon_id];
 275                u8 gdesc[ls_func->bl_desc_size];
 276
 277                nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
 278                                      sizeof(img->wpr_header));
 279
 280                nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
 281                                     &img->lsb_header, sizeof(img->lsb_header));
 282
 283                /* Generate and write BL descriptor */
 284                memset(gdesc, 0, ls_func->bl_desc_size);
 285                ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
 286
 287                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
 288                                      gdesc, ls_func->bl_desc_size);
 289
 290                /* Copy ucode */
 291                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
 292                                      _img->ucode_data, _img->ucode_size);
 293
 294                pos += sizeof(img->wpr_header);
 295        }
 296
 297        nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
 298
 299        nvkm_done(wpr_blob);
 300
 301        return 0;
 302}
 303
 304struct acr_r367_hsflcn_desc {
 305        u8 reserved_dmem[0x200];
 306        u32 signatures[4];
 307        u32 wpr_region_id;
 308        u32 wpr_offset;
 309        u32 mmu_memory_range;
 310#define FLCN_ACR_MAX_REGIONS 2
 311        struct {
 312                u32 no_regions;
 313                struct {
 314                        u32 start_addr;
 315                        u32 end_addr;
 316                        u32 region_id;
 317                        u32 read_mask;
 318                        u32 write_mask;
 319                        u32 client_mask;
 320                        u32 shadow_mem_start_addr;
 321                } region_props[FLCN_ACR_MAX_REGIONS];
 322        } regions;
 323        u32 ucode_blob_size;
 324        u64 ucode_blob_base __aligned(8);
 325        struct {
 326                u32 vpr_enabled;
 327                u32 vpr_start;
 328                u32 vpr_end;
 329                u32 hdcp_policies;
 330        } vpr_desc;
 331};
 332
 333void
 334acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
 335                       void *_desc)
 336{
 337        struct acr_r367_hsflcn_desc *desc = _desc;
 338        struct nvkm_gpuobj *ls_blob = acr->ls_blob;
 339
 340        /* WPR region information if WPR is not fixed */
 341        if (sb->wpr_size == 0) {
 342                u64 wpr_start = ls_blob->addr;
 343                u64 wpr_end = ls_blob->addr + ls_blob->size;
 344
 345                if (acr->func->shadow_blob)
 346                        wpr_start += ls_blob->size / 2;
 347
 348                desc->wpr_region_id = 1;
 349                desc->regions.no_regions = 2;
 350                desc->regions.region_props[0].start_addr = wpr_start >> 8;
 351                desc->regions.region_props[0].end_addr = wpr_end >> 8;
 352                desc->regions.region_props[0].region_id = 1;
 353                desc->regions.region_props[0].read_mask = 0xf;
 354                desc->regions.region_props[0].write_mask = 0xc;
 355                desc->regions.region_props[0].client_mask = 0x2;
 356                if (acr->func->shadow_blob)
 357                        desc->regions.region_props[0].shadow_mem_start_addr =
 358                                                             ls_blob->addr >> 8;
 359                else
 360                        desc->regions.region_props[0].shadow_mem_start_addr = 0;
 361        } else {
 362                desc->ucode_blob_base = ls_blob->addr;
 363                desc->ucode_blob_size = ls_blob->size;
 364        }
 365}
 366
 367const struct acr_r352_func
 368acr_r367_func = {
 369        .fixup_hs_desc = acr_r367_fixup_hs_desc,
 370        .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
 371        .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
 372        .shadow_blob = true,
 373        .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
 374        .ls_fill_headers = acr_r367_ls_fill_headers,
 375        .ls_write_wpr = acr_r367_ls_write_wpr,
 376        .ls_func = {
 377                [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
 378                [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
 379                [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
 380                [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
 381        },
 382};
 383
 384struct nvkm_acr *
 385acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
 386             unsigned long managed_falcons)
 387{
 388        return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
 389}
 390