linux/drivers/gpu/drm/i915/display/intel_csr.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/firmware.h>
  26
  27#include "i915_drv.h"
  28#include "i915_reg.h"
  29#include "intel_csr.h"
  30#include "intel_de.h"
  31
  32/**
  33 * DOC: csr support for dmc
  34 *
  35 * Display Context Save and Restore (CSR) firmware support added from gen9
  36 * onwards to drive newly added DMC (Display microcontroller) in display
  37 * engine to save and restore the state of display engine when it enter into
  38 * low-power state and comes back to normal.
  39 */
  40
  41#define GEN12_CSR_MAX_FW_SIZE           ICL_CSR_MAX_FW_SIZE
  42
  43#define RKL_CSR_PATH                    "i915/rkl_dmc_ver2_02.bin"
  44#define RKL_CSR_VERSION_REQUIRED        CSR_VERSION(2, 2)
  45MODULE_FIRMWARE(RKL_CSR_PATH);
  46
  47#define TGL_CSR_PATH                    "i915/tgl_dmc_ver2_08.bin"
  48#define TGL_CSR_VERSION_REQUIRED        CSR_VERSION(2, 8)
  49#define TGL_CSR_MAX_FW_SIZE             0x6000
  50MODULE_FIRMWARE(TGL_CSR_PATH);
  51
  52#define ICL_CSR_PATH                    "i915/icl_dmc_ver1_09.bin"
  53#define ICL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 9)
  54#define ICL_CSR_MAX_FW_SIZE             0x6000
  55MODULE_FIRMWARE(ICL_CSR_PATH);
  56
  57#define CNL_CSR_PATH                    "i915/cnl_dmc_ver1_07.bin"
  58#define CNL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 7)
  59#define CNL_CSR_MAX_FW_SIZE             GLK_CSR_MAX_FW_SIZE
  60MODULE_FIRMWARE(CNL_CSR_PATH);
  61
  62#define GLK_CSR_PATH                    "i915/glk_dmc_ver1_04.bin"
  63#define GLK_CSR_VERSION_REQUIRED        CSR_VERSION(1, 4)
  64#define GLK_CSR_MAX_FW_SIZE             0x4000
  65MODULE_FIRMWARE(GLK_CSR_PATH);
  66
  67#define KBL_CSR_PATH                    "i915/kbl_dmc_ver1_04.bin"
  68#define KBL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 4)
  69#define KBL_CSR_MAX_FW_SIZE             BXT_CSR_MAX_FW_SIZE
  70MODULE_FIRMWARE(KBL_CSR_PATH);
  71
  72#define SKL_CSR_PATH                    "i915/skl_dmc_ver1_27.bin"
  73#define SKL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 27)
  74#define SKL_CSR_MAX_FW_SIZE             BXT_CSR_MAX_FW_SIZE
  75MODULE_FIRMWARE(SKL_CSR_PATH);
  76
  77#define BXT_CSR_PATH                    "i915/bxt_dmc_ver1_07.bin"
  78#define BXT_CSR_VERSION_REQUIRED        CSR_VERSION(1, 7)
  79#define BXT_CSR_MAX_FW_SIZE             0x3000
  80MODULE_FIRMWARE(BXT_CSR_PATH);
  81
  82#define CSR_DEFAULT_FW_OFFSET           0xFFFFFFFF
  83#define PACKAGE_MAX_FW_INFO_ENTRIES     20
  84#define PACKAGE_V2_MAX_FW_INFO_ENTRIES  32
  85#define DMC_V1_MAX_MMIO_COUNT           8
  86#define DMC_V3_MAX_MMIO_COUNT           20
  87
  88struct intel_css_header {
  89        /* 0x09 for DMC */
  90        u32 module_type;
  91
  92        /* Includes the DMC specific header in dwords */
  93        u32 header_len;
  94
  95        /* always value would be 0x10000 */
  96        u32 header_ver;
  97
  98        /* Not used */
  99        u32 module_id;
 100
 101        /* Not used */
 102        u32 module_vendor;
 103
 104        /* in YYYYMMDD format */
 105        u32 date;
 106
 107        /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
 108        u32 size;
 109
 110        /* Not used */
 111        u32 key_size;
 112
 113        /* Not used */
 114        u32 modulus_size;
 115
 116        /* Not used */
 117        u32 exponent_size;
 118
 119        /* Not used */
 120        u32 reserved1[12];
 121
 122        /* Major Minor */
 123        u32 version;
 124
 125        /* Not used */
 126        u32 reserved2[8];
 127
 128        /* Not used */
 129        u32 kernel_header_info;
 130} __packed;
 131
 132struct intel_fw_info {
 133        u8 reserved1;
 134
 135        /* reserved on package_header version 1, must be 0 on version 2 */
 136        u8 dmc_id;
 137
 138        /* Stepping (A, B, C, ..., *). * is a wildcard */
 139        char stepping;
 140
 141        /* Sub-stepping (0, 1, ..., *). * is a wildcard */
 142        char substepping;
 143
 144        u32 offset;
 145        u32 reserved2;
 146} __packed;
 147
 148struct intel_package_header {
 149        /* DMC container header length in dwords */
 150        u8 header_len;
 151
 152        /* 0x01, 0x02 */
 153        u8 header_ver;
 154
 155        u8 reserved[10];
 156
 157        /* Number of valid entries in the FWInfo array below */
 158        u32 num_entries;
 159} __packed;
 160
 161struct intel_dmc_header_base {
 162        /* always value would be 0x40403E3E */
 163        u32 signature;
 164
 165        /* DMC binary header length */
 166        u8 header_len;
 167
 168        /* 0x01 */
 169        u8 header_ver;
 170
 171        /* Reserved */
 172        u16 dmcc_ver;
 173
 174        /* Major, Minor */
 175        u32 project;
 176
 177        /* Firmware program size (excluding header) in dwords */
 178        u32 fw_size;
 179
 180        /* Major Minor version */
 181        u32 fw_version;
 182} __packed;
 183
 184struct intel_dmc_header_v1 {
 185        struct intel_dmc_header_base base;
 186
 187        /* Number of valid MMIO cycles present. */
 188        u32 mmio_count;
 189
 190        /* MMIO address */
 191        u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
 192
 193        /* MMIO data */
 194        u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
 195
 196        /* FW filename  */
 197        char dfile[32];
 198
 199        u32 reserved1[2];
 200} __packed;
 201
 202struct intel_dmc_header_v3 {
 203        struct intel_dmc_header_base base;
 204
 205        /* DMC RAM start MMIO address */
 206        u32 start_mmioaddr;
 207
 208        u32 reserved[9];
 209
 210        /* FW filename */
 211        char dfile[32];
 212
 213        /* Number of valid MMIO cycles present. */
 214        u32 mmio_count;
 215
 216        /* MMIO address */
 217        u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
 218
 219        /* MMIO data */
 220        u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
 221} __packed;
 222
 223struct stepping_info {
 224        char stepping;
 225        char substepping;
 226};
 227
 228static const struct stepping_info skl_stepping_info[] = {
 229        {'A', '0'}, {'B', '0'}, {'C', '0'},
 230        {'D', '0'}, {'E', '0'}, {'F', '0'},
 231        {'G', '0'}, {'H', '0'}, {'I', '0'},
 232        {'J', '0'}, {'K', '0'}
 233};
 234
 235static const struct stepping_info bxt_stepping_info[] = {
 236        {'A', '0'}, {'A', '1'}, {'A', '2'},
 237        {'B', '0'}, {'B', '1'}, {'B', '2'}
 238};
 239
 240static const struct stepping_info icl_stepping_info[] = {
 241        {'A', '0'}, {'A', '1'}, {'A', '2'},
 242        {'B', '0'}, {'B', '2'},
 243        {'C', '0'}
 244};
 245
 246static const struct stepping_info no_stepping_info = { '*', '*' };
 247
 248static const struct stepping_info *
 249intel_get_stepping_info(struct drm_i915_private *dev_priv)
 250{
 251        const struct stepping_info *si;
 252        unsigned int size;
 253
 254        if (IS_ICELAKE(dev_priv)) {
 255                size = ARRAY_SIZE(icl_stepping_info);
 256                si = icl_stepping_info;
 257        } else if (IS_SKYLAKE(dev_priv)) {
 258                size = ARRAY_SIZE(skl_stepping_info);
 259                si = skl_stepping_info;
 260        } else if (IS_BROXTON(dev_priv)) {
 261                size = ARRAY_SIZE(bxt_stepping_info);
 262                si = bxt_stepping_info;
 263        } else {
 264                size = 0;
 265                si = NULL;
 266        }
 267
 268        if (INTEL_REVID(dev_priv) < size)
 269                return si + INTEL_REVID(dev_priv);
 270
 271        return &no_stepping_info;
 272}
 273
 274static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 275{
 276        u32 val, mask;
 277
 278        mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
 279
 280        if (IS_GEN9_LP(dev_priv))
 281                mask |= DC_STATE_DEBUG_MASK_CORES;
 282
 283        /* The below bit doesn't need to be cleared ever afterwards */
 284        val = intel_de_read(dev_priv, DC_STATE_DEBUG);
 285        if ((val & mask) != mask) {
 286                val |= mask;
 287                intel_de_write(dev_priv, DC_STATE_DEBUG, val);
 288                intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
 289        }
 290}
 291
 292/**
 293 * intel_csr_load_program() - write the firmware from memory to register.
 294 * @dev_priv: i915 drm device.
 295 *
 296 * CSR firmware is read from a .bin file and kept in internal memory one time.
 297 * Everytime display comes back from low power state this function is called to
 298 * copy the firmware from internal memory to registers.
 299 */
 300void intel_csr_load_program(struct drm_i915_private *dev_priv)
 301{
 302        u32 *payload = dev_priv->csr.dmc_payload;
 303        u32 i, fw_size;
 304
 305        if (!HAS_CSR(dev_priv)) {
 306                drm_err(&dev_priv->drm,
 307                        "No CSR support available for this platform\n");
 308                return;
 309        }
 310
 311        if (!dev_priv->csr.dmc_payload) {
 312                drm_err(&dev_priv->drm,
 313                        "Tried to program CSR with empty payload\n");
 314                return;
 315        }
 316
 317        fw_size = dev_priv->csr.dmc_fw_size;
 318        assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 319
 320        preempt_disable();
 321
 322        for (i = 0; i < fw_size; i++)
 323                intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
 324                                      payload[i]);
 325
 326        preempt_enable();
 327
 328        for (i = 0; i < dev_priv->csr.mmio_count; i++) {
 329                intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
 330                               dev_priv->csr.mmiodata[i]);
 331        }
 332
 333        dev_priv->csr.dc_state = 0;
 334
 335        gen9_set_dc_state_debugmask(dev_priv);
 336}
 337
 338/*
 339 * Search fw_info table for dmc_offset to find firmware binary: num_entries is
 340 * already sanitized.
 341 */
 342static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
 343                              unsigned int num_entries,
 344                              const struct stepping_info *si,
 345                              u8 package_ver)
 346{
 347        u32 dmc_offset = CSR_DEFAULT_FW_OFFSET;
 348        unsigned int i;
 349
 350        for (i = 0; i < num_entries; i++) {
 351                if (package_ver > 1 && fw_info[i].dmc_id != 0)
 352                        continue;
 353
 354                if (fw_info[i].substepping == '*' &&
 355                    si->stepping == fw_info[i].stepping) {
 356                        dmc_offset = fw_info[i].offset;
 357                        break;
 358                }
 359
 360                if (si->stepping == fw_info[i].stepping &&
 361                    si->substepping == fw_info[i].substepping) {
 362                        dmc_offset = fw_info[i].offset;
 363                        break;
 364                }
 365
 366                if (fw_info[i].stepping == '*' &&
 367                    fw_info[i].substepping == '*') {
 368                        /*
 369                         * In theory we should stop the search as generic
 370                         * entries should always come after the more specific
 371                         * ones, but let's continue to make sure to work even
 372                         * with "broken" firmwares. If we don't find a more
 373                         * specific one, then we use this entry
 374                         */
 375                        dmc_offset = fw_info[i].offset;
 376                }
 377        }
 378
 379        return dmc_offset;
 380}
 381
 382static u32 parse_csr_fw_dmc(struct intel_csr *csr,
 383                            const struct intel_dmc_header_base *dmc_header,
 384                            size_t rem_size)
 385{
 386        unsigned int header_len_bytes, dmc_header_size, payload_size, i;
 387        const u32 *mmioaddr, *mmiodata;
 388        u32 mmio_count, mmio_count_max;
 389        u8 *payload;
 390
 391        BUILD_BUG_ON(ARRAY_SIZE(csr->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
 392                     ARRAY_SIZE(csr->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
 393
 394        /*
 395         * Check if we can access common fields, we will checkc again below
 396         * after we have read the version
 397         */
 398        if (rem_size < sizeof(struct intel_dmc_header_base))
 399                goto error_truncated;
 400
 401        /* Cope with small differences between v1 and v3 */
 402        if (dmc_header->header_ver == 3) {
 403                const struct intel_dmc_header_v3 *v3 =
 404                        (const struct intel_dmc_header_v3 *)dmc_header;
 405
 406                if (rem_size < sizeof(struct intel_dmc_header_v3))
 407                        goto error_truncated;
 408
 409                mmioaddr = v3->mmioaddr;
 410                mmiodata = v3->mmiodata;
 411                mmio_count = v3->mmio_count;
 412                mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
 413                /* header_len is in dwords */
 414                header_len_bytes = dmc_header->header_len * 4;
 415                dmc_header_size = sizeof(*v3);
 416        } else if (dmc_header->header_ver == 1) {
 417                const struct intel_dmc_header_v1 *v1 =
 418                        (const struct intel_dmc_header_v1 *)dmc_header;
 419
 420                if (rem_size < sizeof(struct intel_dmc_header_v1))
 421                        goto error_truncated;
 422
 423                mmioaddr = v1->mmioaddr;
 424                mmiodata = v1->mmiodata;
 425                mmio_count = v1->mmio_count;
 426                mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
 427                header_len_bytes = dmc_header->header_len;
 428                dmc_header_size = sizeof(*v1);
 429        } else {
 430                DRM_ERROR("Unknown DMC fw header version: %u\n",
 431                          dmc_header->header_ver);
 432                return 0;
 433        }
 434
 435        if (header_len_bytes != dmc_header_size) {
 436                DRM_ERROR("DMC firmware has wrong dmc header length "
 437                          "(%u bytes)\n", header_len_bytes);
 438                return 0;
 439        }
 440
 441        /* Cache the dmc header info. */
 442        if (mmio_count > mmio_count_max) {
 443                DRM_ERROR("DMC firmware has wrong mmio count %u\n", mmio_count);
 444                return 0;
 445        }
 446
 447        for (i = 0; i < mmio_count; i++) {
 448                if (mmioaddr[i] < CSR_MMIO_START_RANGE ||
 449                    mmioaddr[i] > CSR_MMIO_END_RANGE) {
 450                        DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
 451                                  mmioaddr[i]);
 452                        return 0;
 453                }
 454                csr->mmioaddr[i] = _MMIO(mmioaddr[i]);
 455                csr->mmiodata[i] = mmiodata[i];
 456        }
 457        csr->mmio_count = mmio_count;
 458
 459        rem_size -= header_len_bytes;
 460
 461        /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
 462        payload_size = dmc_header->fw_size * 4;
 463        if (rem_size < payload_size)
 464                goto error_truncated;
 465
 466        if (payload_size > csr->max_fw_size) {
 467                DRM_ERROR("DMC FW too big (%u bytes)\n", payload_size);
 468                return 0;
 469        }
 470        csr->dmc_fw_size = dmc_header->fw_size;
 471
 472        csr->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
 473        if (!csr->dmc_payload) {
 474                DRM_ERROR("Memory allocation failed for dmc payload\n");
 475                return 0;
 476        }
 477
 478        payload = (u8 *)(dmc_header) + header_len_bytes;
 479        memcpy(csr->dmc_payload, payload, payload_size);
 480
 481        return header_len_bytes + payload_size;
 482
 483error_truncated:
 484        DRM_ERROR("Truncated DMC firmware, refusing.\n");
 485        return 0;
 486}
 487
 488static u32
 489parse_csr_fw_package(struct intel_csr *csr,
 490                     const struct intel_package_header *package_header,
 491                     const struct stepping_info *si,
 492                     size_t rem_size)
 493{
 494        u32 package_size = sizeof(struct intel_package_header);
 495        u32 num_entries, max_entries, dmc_offset;
 496        const struct intel_fw_info *fw_info;
 497
 498        if (rem_size < package_size)
 499                goto error_truncated;
 500
 501        if (package_header->header_ver == 1) {
 502                max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
 503        } else if (package_header->header_ver == 2) {
 504                max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
 505        } else {
 506                DRM_ERROR("DMC firmware has unknown header version %u\n",
 507                          package_header->header_ver);
 508                return 0;
 509        }
 510
 511        /*
 512         * We should always have space for max_entries,
 513         * even if not all are used
 514         */
 515        package_size += max_entries * sizeof(struct intel_fw_info);
 516        if (rem_size < package_size)
 517                goto error_truncated;
 518
 519        if (package_header->header_len * 4 != package_size) {
 520                DRM_ERROR("DMC firmware has wrong package header length "
 521                          "(%u bytes)\n", package_size);
 522                return 0;
 523        }
 524
 525        num_entries = package_header->num_entries;
 526        if (WARN_ON(package_header->num_entries > max_entries))
 527                num_entries = max_entries;
 528
 529        fw_info = (const struct intel_fw_info *)
 530                ((u8 *)package_header + sizeof(*package_header));
 531        dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
 532                                        package_header->header_ver);
 533        if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
 534                DRM_ERROR("DMC firmware not supported for %c stepping\n",
 535                          si->stepping);
 536                return 0;
 537        }
 538
 539        /* dmc_offset is in dwords */
 540        return package_size + dmc_offset * 4;
 541
 542error_truncated:
 543        DRM_ERROR("Truncated DMC firmware, refusing.\n");
 544        return 0;
 545}
 546
 547/* Return number of bytes parsed or 0 on error */
 548static u32 parse_csr_fw_css(struct intel_csr *csr,
 549                            struct intel_css_header *css_header,
 550                            size_t rem_size)
 551{
 552        if (rem_size < sizeof(struct intel_css_header)) {
 553                DRM_ERROR("Truncated DMC firmware, refusing.\n");
 554                return 0;
 555        }
 556
 557        if (sizeof(struct intel_css_header) !=
 558            (css_header->header_len * 4)) {
 559                DRM_ERROR("DMC firmware has wrong CSS header length "
 560                          "(%u bytes)\n",
 561                          (css_header->header_len * 4));
 562                return 0;
 563        }
 564
 565        if (csr->required_version &&
 566            css_header->version != csr->required_version) {
 567                DRM_INFO("Refusing to load DMC firmware v%u.%u,"
 568                         " please use v%u.%u\n",
 569                         CSR_VERSION_MAJOR(css_header->version),
 570                         CSR_VERSION_MINOR(css_header->version),
 571                         CSR_VERSION_MAJOR(csr->required_version),
 572                         CSR_VERSION_MINOR(csr->required_version));
 573                return 0;
 574        }
 575
 576        csr->version = css_header->version;
 577
 578        return sizeof(struct intel_css_header);
 579}
 580
 581static void parse_csr_fw(struct drm_i915_private *dev_priv,
 582                         const struct firmware *fw)
 583{
 584        struct intel_css_header *css_header;
 585        struct intel_package_header *package_header;
 586        struct intel_dmc_header_base *dmc_header;
 587        struct intel_csr *csr = &dev_priv->csr;
 588        const struct stepping_info *si = intel_get_stepping_info(dev_priv);
 589        u32 readcount = 0;
 590        u32 r;
 591
 592        if (!fw)
 593                return;
 594
 595        /* Extract CSS Header information */
 596        css_header = (struct intel_css_header *)fw->data;
 597        r = parse_csr_fw_css(csr, css_header, fw->size);
 598        if (!r)
 599                return;
 600
 601        readcount += r;
 602
 603        /* Extract Package Header information */
 604        package_header = (struct intel_package_header *)&fw->data[readcount];
 605        r = parse_csr_fw_package(csr, package_header, si, fw->size - readcount);
 606        if (!r)
 607                return;
 608
 609        readcount += r;
 610
 611        /* Extract dmc_header information */
 612        dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
 613        parse_csr_fw_dmc(csr, dmc_header, fw->size - readcount);
 614}
 615
 616static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
 617{
 618        drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
 619        dev_priv->csr.wakeref =
 620                intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 621}
 622
 623static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
 624{
 625        intel_wakeref_t wakeref __maybe_unused =
 626                fetch_and_zero(&dev_priv->csr.wakeref);
 627
 628        intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
 629}
 630
 631static void csr_load_work_fn(struct work_struct *work)
 632{
 633        struct drm_i915_private *dev_priv;
 634        struct intel_csr *csr;
 635        const struct firmware *fw = NULL;
 636
 637        dev_priv = container_of(work, typeof(*dev_priv), csr.work);
 638        csr = &dev_priv->csr;
 639
 640        request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
 641        parse_csr_fw(dev_priv, fw);
 642
 643        if (dev_priv->csr.dmc_payload) {
 644                intel_csr_load_program(dev_priv);
 645                intel_csr_runtime_pm_put(dev_priv);
 646
 647                drm_info(&dev_priv->drm,
 648                         "Finished loading DMC firmware %s (v%u.%u)\n",
 649                         dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
 650                         CSR_VERSION_MINOR(csr->version));
 651        } else {
 652                drm_notice(&dev_priv->drm,
 653                           "Failed to load DMC firmware %s."
 654                           " Disabling runtime power management.\n",
 655                           csr->fw_path);
 656                drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
 657                           INTEL_UC_FIRMWARE_URL);
 658        }
 659
 660        release_firmware(fw);
 661}
 662
 663/**
 664 * intel_csr_ucode_init() - initialize the firmware loading.
 665 * @dev_priv: i915 drm device.
 666 *
 667 * This function is called at the time of loading the display driver to read
 668 * firmware from a .bin file and copied into a internal memory.
 669 */
 670void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
 671{
 672        struct intel_csr *csr = &dev_priv->csr;
 673
 674        INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
 675
 676        if (!HAS_CSR(dev_priv))
 677                return;
 678
 679        /*
 680         * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
 681         * runtime-suspend.
 682         *
 683         * On error, we return with the rpm wakeref held to prevent runtime
 684         * suspend as runtime suspend *requires* a working CSR for whatever
 685         * reason.
 686         */
 687        intel_csr_runtime_pm_get(dev_priv);
 688
 689        if (IS_ROCKETLAKE(dev_priv)) {
 690                csr->fw_path = RKL_CSR_PATH;
 691                csr->required_version = RKL_CSR_VERSION_REQUIRED;
 692                csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
 693        } else if (INTEL_GEN(dev_priv) >= 12) {
 694                csr->fw_path = TGL_CSR_PATH;
 695                csr->required_version = TGL_CSR_VERSION_REQUIRED;
 696                /* Allow to load fw via parameter using the last known size */
 697                csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
 698        } else if (IS_GEN(dev_priv, 11)) {
 699                csr->fw_path = ICL_CSR_PATH;
 700                csr->required_version = ICL_CSR_VERSION_REQUIRED;
 701                csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
 702        } else if (IS_CANNONLAKE(dev_priv)) {
 703                csr->fw_path = CNL_CSR_PATH;
 704                csr->required_version = CNL_CSR_VERSION_REQUIRED;
 705                csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
 706        } else if (IS_GEMINILAKE(dev_priv)) {
 707                csr->fw_path = GLK_CSR_PATH;
 708                csr->required_version = GLK_CSR_VERSION_REQUIRED;
 709                csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
 710        } else if (IS_KABYLAKE(dev_priv) ||
 711                   IS_COFFEELAKE(dev_priv) ||
 712                   IS_COMETLAKE(dev_priv)) {
 713                csr->fw_path = KBL_CSR_PATH;
 714                csr->required_version = KBL_CSR_VERSION_REQUIRED;
 715                csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
 716        } else if (IS_SKYLAKE(dev_priv)) {
 717                csr->fw_path = SKL_CSR_PATH;
 718                csr->required_version = SKL_CSR_VERSION_REQUIRED;
 719                csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
 720        } else if (IS_BROXTON(dev_priv)) {
 721                csr->fw_path = BXT_CSR_PATH;
 722                csr->required_version = BXT_CSR_VERSION_REQUIRED;
 723                csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
 724        }
 725
 726        if (dev_priv->params.dmc_firmware_path) {
 727                if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
 728                        csr->fw_path = NULL;
 729                        drm_info(&dev_priv->drm,
 730                                 "Disabling CSR firmware and runtime PM\n");
 731                        return;
 732                }
 733
 734                csr->fw_path = dev_priv->params.dmc_firmware_path;
 735                /* Bypass version check for firmware override. */
 736                csr->required_version = 0;
 737        }
 738
 739        if (csr->fw_path == NULL) {
 740                drm_dbg_kms(&dev_priv->drm,
 741                            "No known CSR firmware for platform, disabling runtime PM\n");
 742                return;
 743        }
 744
 745        drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
 746        schedule_work(&dev_priv->csr.work);
 747}
 748
 749/**
 750 * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
 751 * @dev_priv: i915 drm device
 752 *
 753 * Prepare the DMC firmware before entering system suspend. This includes
 754 * flushing pending work items and releasing any resources acquired during
 755 * init.
 756 */
 757void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
 758{
 759        if (!HAS_CSR(dev_priv))
 760                return;
 761
 762        flush_work(&dev_priv->csr.work);
 763
 764        /* Drop the reference held in case DMC isn't loaded. */
 765        if (!dev_priv->csr.dmc_payload)
 766                intel_csr_runtime_pm_put(dev_priv);
 767}
 768
 769/**
 770 * intel_csr_ucode_resume() - init CSR firmware during system resume
 771 * @dev_priv: i915 drm device
 772 *
 773 * Reinitialize the DMC firmware during system resume, reacquiring any
 774 * resources released in intel_csr_ucode_suspend().
 775 */
 776void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
 777{
 778        if (!HAS_CSR(dev_priv))
 779                return;
 780
 781        /*
 782         * Reacquire the reference to keep RPM disabled in case DMC isn't
 783         * loaded.
 784         */
 785        if (!dev_priv->csr.dmc_payload)
 786                intel_csr_runtime_pm_get(dev_priv);
 787}
 788
 789/**
 790 * intel_csr_ucode_fini() - unload the CSR firmware.
 791 * @dev_priv: i915 drm device.
 792 *
 793 * Firmmware unloading includes freeing the internal memory and reset the
 794 * firmware loading status.
 795 */
 796void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
 797{
 798        if (!HAS_CSR(dev_priv))
 799                return;
 800
 801        intel_csr_ucode_suspend(dev_priv);
 802        drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
 803
 804        kfree(dev_priv->csr.dmc_payload);
 805}
 806