linux/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include "acr_r367.h"
  24#include "acr_r361.h"
  25#include "acr_r370.h"
  26
  27#include <core/gpuobj.h>
  28
  29/*
  30 * r367 ACR: new LS signature format requires a rewrite of LS firmware and
  31 * blob creation functions. Also the hsflcn_desc layout has changed slightly.
  32 */
  33
  34#define LSF_LSB_DEPMAP_SIZE 11
  35
  36/**
  37 * struct acr_r367_lsf_lsb_header - LS firmware header
  38 *
  39 * See also struct acr_r352_lsf_lsb_header for documentation.
  40 */
  41struct acr_r367_lsf_lsb_header {
  42        /**
  43         * LS falcon signatures
  44         * @prd_keys:           signature to use in production mode
  45         * @dgb_keys:           signature to use in debug mode
  46         * @b_prd_present:      whether the production key is present
  47         * @b_dgb_present:      whether the debug key is present
  48         * @falcon_id:          ID of the falcon the ucode applies to
  49         */
  50        struct {
  51                u8 prd_keys[2][16];
  52                u8 dbg_keys[2][16];
  53                u32 b_prd_present;
  54                u32 b_dbg_present;
  55                u32 falcon_id;
  56                u32 supports_versioning;
  57                u32 version;
  58                u32 depmap_count;
  59                u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
  60                u8 kdf[16];
  61        } signature;
  62        u32 ucode_off;
  63        u32 ucode_size;
  64        u32 data_size;
  65        u32 bl_code_size;
  66        u32 bl_imem_off;
  67        u32 bl_data_off;
  68        u32 bl_data_size;
  69        u32 app_code_off;
  70        u32 app_code_size;
  71        u32 app_data_off;
  72        u32 app_data_size;
  73        u32 flags;
  74};
  75
  76/**
  77 * struct acr_r367_lsf_wpr_header - LS blob WPR Header
  78 *
  79 * See also struct acr_r352_lsf_wpr_header for documentation.
  80 */
  81struct acr_r367_lsf_wpr_header {
  82        u32 falcon_id;
  83        u32 lsb_offset;
  84        u32 bootstrap_owner;
  85        u32 lazy_bootstrap;
  86        u32 bin_version;
  87        u32 status;
  88#define LSF_IMAGE_STATUS_NONE                           0
  89#define LSF_IMAGE_STATUS_COPY                           1
  90#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED         2
  91#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED         3
  92#define LSF_IMAGE_STATUS_VALIDATION_DONE                4
  93#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED             5
  94#define LSF_IMAGE_STATUS_BOOTSTRAP_READY                6
  95#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED                7
  96};
  97
  98/**
  99 * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
 100 */
 101struct ls_ucode_img_r367 {
 102        struct ls_ucode_img base;
 103
 104        const struct acr_r352_lsf_func *func;
 105
 106        struct acr_r367_lsf_wpr_header wpr_header;
 107        struct acr_r367_lsf_lsb_header lsb_header;
 108};
 109#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
 110
 111struct ls_ucode_img *
 112acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
 113                           const struct nvkm_secboot *sb,
 114                           enum nvkm_secboot_falcon falcon_id)
 115{
 116        const struct nvkm_subdev *subdev = acr->base.subdev;
 117        const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
 118        struct ls_ucode_img_r367 *img;
 119        int ret;
 120
 121        img = kzalloc(sizeof(*img), GFP_KERNEL);
 122        if (!img)
 123                return ERR_PTR(-ENOMEM);
 124
 125        img->base.falcon_id = falcon_id;
 126
 127        ret = func->load(sb, func->version_max, &img->base);
 128        if (ret < 0) {
 129                kfree(img->base.ucode_data);
 130                kfree(img->base.sig);
 131                kfree(img);
 132                return ERR_PTR(ret);
 133        }
 134
 135        img->func = func->version[ret];
 136
 137        /* Check that the signature size matches our expectations... */
 138        if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
 139                nvkm_error(subdev, "invalid signature size for %s falcon!\n",
 140                           nvkm_secboot_falcon_name[falcon_id]);
 141                return ERR_PTR(-EINVAL);
 142        }
 143
 144        /* Copy signature to the right place */
 145        memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
 146
 147        /* not needed? the signature should already have the right value */
 148        img->lsb_header.signature.falcon_id = falcon_id;
 149
 150        return &img->base;
 151}
 152
 153#define LSF_LSB_HEADER_ALIGN 256
 154#define LSF_BL_DATA_ALIGN 256
 155#define LSF_BL_DATA_SIZE_ALIGN 256
 156#define LSF_BL_CODE_SIZE_ALIGN 256
 157#define LSF_UCODE_DATA_ALIGN 4096
 158
 159static u32
 160acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
 161                             struct ls_ucode_img_r367 *img, u32 offset)
 162{
 163        struct ls_ucode_img *_img = &img->base;
 164        struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
 165        struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
 166        struct ls_ucode_img_desc *desc = &_img->ucode_desc;
 167        const struct acr_r352_lsf_func *func = img->func;
 168
 169        /* Fill WPR header */
 170        whdr->falcon_id = _img->falcon_id;
 171        whdr->bootstrap_owner = acr->base.boot_falcon;
 172        whdr->bin_version = lhdr->signature.version;
 173        whdr->status = LSF_IMAGE_STATUS_COPY;
 174
 175        /* Skip bootstrapping falcons started by someone else than ACR */
 176        if (acr->lazy_bootstrap & BIT(_img->falcon_id))
 177                whdr->lazy_bootstrap = 1;
 178
 179        /* Align, save off, and include an LSB header size */
 180        offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
 181        whdr->lsb_offset = offset;
 182        offset += sizeof(*lhdr);
 183
 184        /*
 185         * Align, save off, and include the original (static) ucode
 186         * image size
 187         */
 188        offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
 189        _img->ucode_off = lhdr->ucode_off = offset;
 190        offset += _img->ucode_size;
 191
 192        /*
 193         * For falcons that use a boot loader (BL), we append a loader
 194         * desc structure on the end of the ucode image and consider
 195         * this the boot loader data. The host will then copy the loader
 196         * desc args to this space within the WPR region (before locking
 197         * down) and the HS bin will then copy them to DMEM 0 for the
 198         * loader.
 199         */
 200        lhdr->bl_code_size = ALIGN(desc->bootloader_size,
 201                                   LSF_BL_CODE_SIZE_ALIGN);
 202        lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
 203                                 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
 204        lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
 205                                lhdr->bl_code_size - lhdr->ucode_size;
 206        /*
 207         * Though the BL is located at 0th offset of the image, the VA
 208         * is different to make sure that it doesn't collide the actual
 209         * OS VA range
 210         */
 211        lhdr->bl_imem_off = desc->bootloader_imem_offset;
 212        lhdr->app_code_off = desc->app_start_offset +
 213                             desc->app_resident_code_offset;
 214        lhdr->app_code_size = desc->app_resident_code_size;
 215        lhdr->app_data_off = desc->app_start_offset +
 216                             desc->app_resident_data_offset;
 217        lhdr->app_data_size = desc->app_resident_data_size;
 218
 219        lhdr->flags = func->lhdr_flags;
 220        if (_img->falcon_id == acr->base.boot_falcon)
 221                lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
 222
 223        /* Align and save off BL descriptor size */
 224        lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
 225
 226        /*
 227         * Align, save off, and include the additional BL data
 228         */
 229        offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
 230        lhdr->bl_data_off = offset;
 231        offset += lhdr->bl_data_size;
 232
 233        return offset;
 234}
 235
 236int
 237acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
 238{
 239        struct ls_ucode_img_r367 *img;
 240        struct list_head *l;
 241        u32 count = 0;
 242        u32 offset;
 243
 244        /* Count the number of images to manage */
 245        list_for_each(l, imgs)
 246                count++;
 247
 248        /*
 249         * Start with an array of WPR headers at the base of the WPR.
 250         * The expectation here is that the secure falcon will do a single DMA
 251         * read of this array and cache it internally so it's ok to pack these.
 252         * Also, we add 1 to the falcon count to indicate the end of the array.
 253         */
 254        offset = sizeof(img->wpr_header) * (count + 1);
 255
 256        /*
 257         * Walk the managed falcons, accounting for the LSB structs
 258         * as well as the ucode images.
 259         */
 260        list_for_each_entry(img, imgs, base.node) {
 261                offset = acr_r367_ls_img_fill_headers(acr, img, offset);
 262        }
 263
 264        return offset;
 265}
 266
 267int
 268acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 269                      struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
 270{
 271        struct ls_ucode_img *_img;
 272        u32 pos = 0;
 273        u32 max_desc_size = 0;
 274        u8 *gdesc;
 275
 276        list_for_each_entry(_img, imgs, node) {
 277                struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
 278                const struct acr_r352_lsf_func *ls_func = img->func;
 279
 280                max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
 281        }
 282
 283        gdesc = kmalloc(max_desc_size, GFP_KERNEL);
 284        if (!gdesc)
 285                return -ENOMEM;
 286
 287        nvkm_kmap(wpr_blob);
 288
 289        list_for_each_entry(_img, imgs, node) {
 290                struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
 291                const struct acr_r352_lsf_func *ls_func = img->func;
 292
 293                nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
 294                                      sizeof(img->wpr_header));
 295
 296                nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
 297                                     &img->lsb_header, sizeof(img->lsb_header));
 298
 299                /* Generate and write BL descriptor */
 300                memset(gdesc, 0, ls_func->bl_desc_size);
 301                ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
 302
 303                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
 304                                      gdesc, ls_func->bl_desc_size);
 305
 306                /* Copy ucode */
 307                nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
 308                                      _img->ucode_data, _img->ucode_size);
 309
 310                pos += sizeof(img->wpr_header);
 311        }
 312
 313        nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
 314
 315        nvkm_done(wpr_blob);
 316
 317        kfree(gdesc);
 318
 319        return 0;
 320}
 321
 322struct acr_r367_hsflcn_desc {
 323        u8 reserved_dmem[0x200];
 324        u32 signatures[4];
 325        u32 wpr_region_id;
 326        u32 wpr_offset;
 327        u32 mmu_memory_range;
 328#define FLCN_ACR_MAX_REGIONS 2
 329        struct {
 330                u32 no_regions;
 331                struct {
 332                        u32 start_addr;
 333                        u32 end_addr;
 334                        u32 region_id;
 335                        u32 read_mask;
 336                        u32 write_mask;
 337                        u32 client_mask;
 338                        u32 shadow_mem_start_addr;
 339                } region_props[FLCN_ACR_MAX_REGIONS];
 340        } regions;
 341        u32 ucode_blob_size;
 342        u64 ucode_blob_base __aligned(8);
 343        struct {
 344                u32 vpr_enabled;
 345                u32 vpr_start;
 346                u32 vpr_end;
 347                u32 hdcp_policies;
 348        } vpr_desc;
 349};
 350
 351void
 352acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
 353                       void *_desc)
 354{
 355        struct acr_r367_hsflcn_desc *desc = _desc;
 356        struct nvkm_gpuobj *ls_blob = acr->ls_blob;
 357
 358        /* WPR region information if WPR is not fixed */
 359        if (sb->wpr_size == 0) {
 360                u64 wpr_start = ls_blob->addr;
 361                u64 wpr_end = ls_blob->addr + ls_blob->size;
 362
 363                if (acr->func->shadow_blob)
 364                        wpr_start += ls_blob->size / 2;
 365
 366                desc->wpr_region_id = 1;
 367                desc->regions.no_regions = 2;
 368                desc->regions.region_props[0].start_addr = wpr_start >> 8;
 369                desc->regions.region_props[0].end_addr = wpr_end >> 8;
 370                desc->regions.region_props[0].region_id = 1;
 371                desc->regions.region_props[0].read_mask = 0xf;
 372                desc->regions.region_props[0].write_mask = 0xc;
 373                desc->regions.region_props[0].client_mask = 0x2;
 374                if (acr->func->shadow_blob)
 375                        desc->regions.region_props[0].shadow_mem_start_addr =
 376                                                             ls_blob->addr >> 8;
 377                else
 378                        desc->regions.region_props[0].shadow_mem_start_addr = 0;
 379        } else {
 380                desc->ucode_blob_base = ls_blob->addr;
 381                desc->ucode_blob_size = ls_blob->size;
 382        }
 383}
 384
 385static const struct acr_r352_ls_func
 386acr_r367_ls_sec2_func = {
 387        .load = acr_ls_ucode_load_sec2,
 388        .post_run = acr_ls_sec2_post_run,
 389        .version_max = 1,
 390        .version = {
 391                &acr_r361_ls_sec2_func_0,
 392                &acr_r370_ls_sec2_func_0,
 393        }
 394};
 395
 396const struct acr_r352_func
 397acr_r367_func = {
 398        .fixup_hs_desc = acr_r367_fixup_hs_desc,
 399        .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
 400        .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
 401        .shadow_blob = true,
 402        .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
 403        .ls_fill_headers = acr_r367_ls_fill_headers,
 404        .ls_write_wpr = acr_r367_ls_write_wpr,
 405        .ls_func = {
 406                [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
 407                [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
 408                [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
 409                [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
 410        },
 411};
 412
 413struct nvkm_acr *
 414acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
 415             unsigned long managed_falcons)
 416{
 417        return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
 418}
 419