linux/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2014-2019 Intel Corporation
   4 *
   5 * Authors:
   6 *    Vinit Azad <vinit.azad@intel.com>
   7 *    Ben Widawsky <ben@bwidawsk.net>
   8 *    Dave Gordon <david.s.gordon@intel.com>
   9 *    Alex Dai <yu.dai@intel.com>
  10 */
  11
  12#include "gt/intel_gt.h"
  13#include "gt/intel_gt_regs.h"
  14#include "intel_guc_fw.h"
  15#include "i915_drv.h"
  16
  17static void guc_prepare_xfer(struct intel_uncore *uncore)
  18{
  19        u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
  20                         GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
  21                         GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
  22                         GUC_ENABLE_MIA_CLOCK_GATING;
  23
  24        if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
  25                shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
  26                              GUC_ENABLE_MIA_CACHING;
  27
  28        /* Must program this register before loading the ucode with DMA */
  29        intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
  30
  31        if (IS_GEN9_LP(uncore->i915))
  32                intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
  33        else
  34                intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
  35
  36        if (GRAPHICS_VER(uncore->i915) == 9) {
  37                /* DOP Clock Gating Enable for GuC clocks */
  38                intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
  39                                 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
  40
  41                /* allows for 5us (in 10ns units) before GT can go to RC6 */
  42                intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
  43        }
  44}
  45
  46static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
  47                             struct intel_uncore *uncore)
  48{
  49        u32 rsa[UOS_RSA_SCRATCH_COUNT];
  50        size_t copied;
  51        int i;
  52
  53        copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
  54        if (copied < sizeof(rsa))
  55                return -ENOMEM;
  56
  57        for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
  58                intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
  59
  60        return 0;
  61}
  62
  63static int guc_xfer_rsa_vma(struct intel_uc_fw *guc_fw,
  64                            struct intel_uncore *uncore)
  65{
  66        struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
  67
  68        intel_uncore_write(uncore, UOS_RSA_SCRATCH(0),
  69                           intel_guc_ggtt_offset(guc, guc_fw->rsa_data));
  70
  71        return 0;
  72}
  73
  74/* Copy RSA signature from the fw image to HW for verification */
  75static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
  76                        struct intel_uncore *uncore)
  77{
  78        if (guc_fw->rsa_data)
  79                return guc_xfer_rsa_vma(guc_fw, uncore);
  80        else
  81                return guc_xfer_rsa_mmio(guc_fw, uncore);
  82}
  83
  84/*
  85 * Read the GuC status register (GUC_STATUS) and store it in the
  86 * specified location; then return a boolean indicating whether
  87 * the value matches either of two values representing completion
  88 * of the GuC boot process.
  89 *
  90 * This is used for polling the GuC status in a wait_for()
  91 * loop below.
  92 */
  93static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
  94{
  95        u32 val = intel_uncore_read(uncore, GUC_STATUS);
  96        u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, val);
  97
  98        *status = val;
  99        return uk_val == INTEL_GUC_LOAD_STATUS_READY;
 100}
 101
 102static int guc_wait_ucode(struct intel_uncore *uncore)
 103{
 104        u32 status;
 105        int ret;
 106
 107        /*
 108         * Wait for the GuC to start up.
 109         * NB: Docs recommend not using the interrupt for completion.
 110         * Measurements indicate this should take no more than 20ms
 111         * (assuming the GT clock is at maximum frequency). So, a
 112         * timeout here indicates that the GuC has failed and is unusable.
 113         * (Higher levels of the driver may decide to reset the GuC and
 114         * attempt the ucode load again if this happens.)
 115         *
 116         * FIXME: There is a known (but exceedingly unlikely) race condition
 117         * where the asynchronous frequency management code could reduce
 118         * the GT clock while a GuC reload is in progress (during a full
 119         * GT reset). A fix is in progress but there are complex locking
 120         * issues to be resolved. In the meantime bump the timeout to
 121         * 200ms. Even at slowest clock, this should be sufficient. And
 122         * in the working case, a larger timeout makes no difference.
 123         */
 124        ret = wait_for(guc_ready(uncore, &status), 200);
 125        if (ret) {
 126                struct drm_device *drm = &uncore->i915->drm;
 127
 128                drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
 129                drm_info(drm, "GuC load failed: status: Reset = %d, "
 130                        "BootROM = 0x%02X, UKernel = 0x%02X, "
 131                        "MIA = 0x%02X, Auth = 0x%02X\n",
 132                        REG_FIELD_GET(GS_MIA_IN_RESET, status),
 133                        REG_FIELD_GET(GS_BOOTROM_MASK, status),
 134                        REG_FIELD_GET(GS_UKERNEL_MASK, status),
 135                        REG_FIELD_GET(GS_MIA_MASK, status),
 136                        REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
 137
 138                if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
 139                        drm_info(drm, "GuC firmware signature verification failed\n");
 140                        ret = -ENOEXEC;
 141                }
 142
 143                if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) {
 144                        drm_info(drm, "GuC firmware exception. EIP: %#x\n",
 145                                 intel_uncore_read(uncore, SOFT_SCRATCH(13)));
 146                        ret = -ENXIO;
 147                }
 148        }
 149
 150        return ret;
 151}
 152
 153/**
 154 * intel_guc_fw_upload() - load GuC uCode to device
 155 * @guc: intel_guc structure
 156 *
 157 * Called from intel_uc_init_hw() during driver load, resume from sleep and
 158 * after a GPU reset.
 159 *
 160 * The firmware image should have already been fetched into memory, so only
 161 * check that fetch succeeded, and then transfer the image to the h/w.
 162 *
 163 * Return:      non-zero code on error
 164 */
 165int intel_guc_fw_upload(struct intel_guc *guc)
 166{
 167        struct intel_gt *gt = guc_to_gt(guc);
 168        struct intel_uncore *uncore = gt->uncore;
 169        int ret;
 170
 171        guc_prepare_xfer(uncore);
 172
 173        /*
 174         * Note that GuC needs the CSS header plus uKernel code to be copied
 175         * by the DMA engine in one operation, whereas the RSA signature is
 176         * loaded separately, either by copying it to the UOS_RSA_SCRATCH
 177         * register (if key size <= 256) or through a ggtt-pinned vma (if key
 178         * size > 256). The RSA size and therefore the way we provide it to the
 179         * HW is fixed for each platform and hard-coded in the bootrom.
 180         */
 181        ret = guc_xfer_rsa(&guc->fw, uncore);
 182        if (ret)
 183                goto out;
 184
 185        /*
 186         * Current uCode expects the code to be loaded at 8k; locations below
 187         * this are used for the stack.
 188         */
 189        ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
 190        if (ret)
 191                goto out;
 192
 193        ret = guc_wait_ucode(uncore);
 194        if (ret)
 195                goto out;
 196
 197        intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_RUNNING);
 198        return 0;
 199
 200out:
 201        intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
 202        return ret;
 203}
 204