linux/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/list.h>
  25#include <linux/slab.h>
  26#include <linux/pci.h>
  27#include <linux/acpi.h>
  28#include <drm/drmP.h>
  29#include <linux/firmware.h>
  30#include <drm/amdgpu_drm.h>
  31#include "amdgpu.h"
  32#include "cgs_linux.h"
  33#include "atom.h"
  34#include "amdgpu_ucode.h"
  35
  36struct amdgpu_cgs_device {
  37        struct cgs_device base;
  38        struct amdgpu_device *adev;
  39};
  40
  41#define CGS_FUNC_ADEV                                                   \
  42        struct amdgpu_device *adev =                                    \
  43                ((struct amdgpu_cgs_device *)cgs_device)->adev
  44
  45static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
  46                                    enum cgs_gpu_mem_type type,
  47                                    uint64_t size, uint64_t align,
  48                                    uint64_t min_offset, uint64_t max_offset,
  49                                    cgs_handle_t *handle)
  50{
  51        CGS_FUNC_ADEV;
  52        uint16_t flags = 0;
  53        int ret = 0;
  54        uint32_t domain = 0;
  55        struct amdgpu_bo *obj;
  56        struct ttm_placement placement;
  57        struct ttm_place place;
  58
  59        if (min_offset > max_offset) {
  60                BUG_ON(1);
  61                return -EINVAL;
  62        }
  63
  64        /* fail if the alignment is not a power of 2 */
  65        if (((align != 1) && (align & (align - 1)))
  66            || size == 0 || align == 0)
  67                return -EINVAL;
  68
  69
  70        switch(type) {
  71        case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  72        case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  73                flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  74                        AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
  75                domain = AMDGPU_GEM_DOMAIN_VRAM;
  76                if (max_offset > adev->mc.real_vram_size)
  77                        return -EINVAL;
  78                place.fpfn = min_offset >> PAGE_SHIFT;
  79                place.lpfn = max_offset >> PAGE_SHIFT;
  80                place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  81                        TTM_PL_FLAG_VRAM;
  82                break;
  83        case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  84        case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  85                flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
  86                        AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
  87                domain = AMDGPU_GEM_DOMAIN_VRAM;
  88                if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
  89                        place.fpfn =
  90                                max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
  91                        place.lpfn =
  92                                min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
  93                        place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  94                                TTM_PL_FLAG_VRAM;
  95                }
  96
  97                break;
  98        case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  99                domain = AMDGPU_GEM_DOMAIN_GTT;
 100                place.fpfn = min_offset >> PAGE_SHIFT;
 101                place.lpfn = max_offset >> PAGE_SHIFT;
 102                place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
 103                break;
 104        case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
 105                flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 106                domain = AMDGPU_GEM_DOMAIN_GTT;
 107                place.fpfn = min_offset >> PAGE_SHIFT;
 108                place.lpfn = max_offset >> PAGE_SHIFT;
 109                place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
 110                        TTM_PL_FLAG_UNCACHED;
 111                break;
 112        default:
 113                return -EINVAL;
 114        }
 115
 116
 117        *handle = 0;
 118
 119        placement.placement = &place;
 120        placement.num_placement = 1;
 121        placement.busy_placement = &place;
 122        placement.num_busy_placement = 1;
 123
 124        ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
 125                                          true, domain, flags,
 126                                          NULL, &placement, NULL,
 127                                          &obj);
 128        if (ret) {
 129                DRM_ERROR("(%d) bo create failed\n", ret);
 130                return ret;
 131        }
 132        *handle = (cgs_handle_t)obj;
 133
 134        return ret;
 135}
 136
 137static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 138{
 139        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 140
 141        if (obj) {
 142                int r = amdgpu_bo_reserve(obj, true);
 143                if (likely(r == 0)) {
 144                        amdgpu_bo_kunmap(obj);
 145                        amdgpu_bo_unpin(obj);
 146                        amdgpu_bo_unreserve(obj);
 147                }
 148                amdgpu_bo_unref(&obj);
 149
 150        }
 151        return 0;
 152}
 153
 154static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 155                                   uint64_t *mcaddr)
 156{
 157        int r;
 158        u64 min_offset, max_offset;
 159        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 160
 161        WARN_ON_ONCE(obj->placement.num_placement > 1);
 162
 163        min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
 164        max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
 165
 166        r = amdgpu_bo_reserve(obj, true);
 167        if (unlikely(r != 0))
 168                return r;
 169        r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
 170                                     min_offset, max_offset, mcaddr);
 171        amdgpu_bo_unreserve(obj);
 172        return r;
 173}
 174
 175static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 176{
 177        int r;
 178        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 179        r = amdgpu_bo_reserve(obj, true);
 180        if (unlikely(r != 0))
 181                return r;
 182        r = amdgpu_bo_unpin(obj);
 183        amdgpu_bo_unreserve(obj);
 184        return r;
 185}
 186
 187static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 188                                   void **map)
 189{
 190        int r;
 191        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 192        r = amdgpu_bo_reserve(obj, true);
 193        if (unlikely(r != 0))
 194                return r;
 195        r = amdgpu_bo_kmap(obj, map);
 196        amdgpu_bo_unreserve(obj);
 197        return r;
 198}
 199
 200static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 201{
 202        int r;
 203        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 204        r = amdgpu_bo_reserve(obj, true);
 205        if (unlikely(r != 0))
 206                return r;
 207        amdgpu_bo_kunmap(obj);
 208        amdgpu_bo_unreserve(obj);
 209        return r;
 210}
 211
 212static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
 213{
 214        CGS_FUNC_ADEV;
 215        return RREG32(offset);
 216}
 217
 218static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
 219                                      uint32_t value)
 220{
 221        CGS_FUNC_ADEV;
 222        WREG32(offset, value);
 223}
 224
 225static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 226                                             enum cgs_ind_reg space,
 227                                             unsigned index)
 228{
 229        CGS_FUNC_ADEV;
 230        switch (space) {
 231        case CGS_IND_REG__MMIO:
 232                return RREG32_IDX(index);
 233        case CGS_IND_REG__PCIE:
 234                return RREG32_PCIE(index);
 235        case CGS_IND_REG__SMC:
 236                return RREG32_SMC(index);
 237        case CGS_IND_REG__UVD_CTX:
 238                return RREG32_UVD_CTX(index);
 239        case CGS_IND_REG__DIDT:
 240                return RREG32_DIDT(index);
 241        case CGS_IND_REG_GC_CAC:
 242                return RREG32_GC_CAC(index);
 243        case CGS_IND_REG__AUDIO_ENDPT:
 244                DRM_ERROR("audio endpt register access not implemented.\n");
 245                return 0;
 246        }
 247        WARN(1, "Invalid indirect register space");
 248        return 0;
 249}
 250
 251static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 252                                          enum cgs_ind_reg space,
 253                                          unsigned index, uint32_t value)
 254{
 255        CGS_FUNC_ADEV;
 256        switch (space) {
 257        case CGS_IND_REG__MMIO:
 258                return WREG32_IDX(index, value);
 259        case CGS_IND_REG__PCIE:
 260                return WREG32_PCIE(index, value);
 261        case CGS_IND_REG__SMC:
 262                return WREG32_SMC(index, value);
 263        case CGS_IND_REG__UVD_CTX:
 264                return WREG32_UVD_CTX(index, value);
 265        case CGS_IND_REG__DIDT:
 266                return WREG32_DIDT(index, value);
 267        case CGS_IND_REG_GC_CAC:
 268                return WREG32_GC_CAC(index, value);
 269        case CGS_IND_REG__AUDIO_ENDPT:
 270                DRM_ERROR("audio endpt register access not implemented.\n");
 271                return;
 272        }
 273        WARN(1, "Invalid indirect register space");
 274}
 275
 276static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
 277                                       enum cgs_resource_type resource_type,
 278                                       uint64_t size,
 279                                       uint64_t offset,
 280                                       uint64_t *resource_base)
 281{
 282        CGS_FUNC_ADEV;
 283
 284        if (resource_base == NULL)
 285                return -EINVAL;
 286
 287        switch (resource_type) {
 288        case CGS_RESOURCE_TYPE_MMIO:
 289                if (adev->rmmio_size == 0)
 290                        return -ENOENT;
 291                if ((offset + size) > adev->rmmio_size)
 292                        return -EINVAL;
 293                *resource_base = adev->rmmio_base;
 294                return 0;
 295        case CGS_RESOURCE_TYPE_DOORBELL:
 296                if (adev->doorbell.size == 0)
 297                        return -ENOENT;
 298                if ((offset + size) > adev->doorbell.size)
 299                        return -EINVAL;
 300                *resource_base = adev->doorbell.base;
 301                return 0;
 302        case CGS_RESOURCE_TYPE_FB:
 303        case CGS_RESOURCE_TYPE_IO:
 304        case CGS_RESOURCE_TYPE_ROM:
 305        default:
 306                return -EINVAL;
 307        }
 308}
 309
 310static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
 311                                                  unsigned table, uint16_t *size,
 312                                                  uint8_t *frev, uint8_t *crev)
 313{
 314        CGS_FUNC_ADEV;
 315        uint16_t data_start;
 316
 317        if (amdgpu_atom_parse_data_header(
 318                    adev->mode_info.atom_context, table, size,
 319                    frev, crev, &data_start))
 320                return (uint8_t*)adev->mode_info.atom_context->bios +
 321                        data_start;
 322
 323        return NULL;
 324}
 325
 326static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
 327                                              uint8_t *frev, uint8_t *crev)
 328{
 329        CGS_FUNC_ADEV;
 330
 331        if (amdgpu_atom_parse_cmd_header(
 332                    adev->mode_info.atom_context, table,
 333                    frev, crev))
 334                return 0;
 335
 336        return -EINVAL;
 337}
 338
 339static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
 340                                          void *args)
 341{
 342        CGS_FUNC_ADEV;
 343
 344        return amdgpu_atom_execute_table(
 345                adev->mode_info.atom_context, table, args);
 346}
 347
 348struct cgs_irq_params {
 349        unsigned src_id;
 350        cgs_irq_source_set_func_t set;
 351        cgs_irq_handler_func_t handler;
 352        void *private_data;
 353};
 354
 355static int cgs_set_irq_state(struct amdgpu_device *adev,
 356                             struct amdgpu_irq_src *src,
 357                             unsigned type,
 358                             enum amdgpu_interrupt_state state)
 359{
 360        struct cgs_irq_params *irq_params =
 361                (struct cgs_irq_params *)src->data;
 362        if (!irq_params)
 363                return -EINVAL;
 364        if (!irq_params->set)
 365                return -EINVAL;
 366        return irq_params->set(irq_params->private_data,
 367                               irq_params->src_id,
 368                               type,
 369                               (int)state);
 370}
 371
 372static int cgs_process_irq(struct amdgpu_device *adev,
 373                           struct amdgpu_irq_src *source,
 374                           struct amdgpu_iv_entry *entry)
 375{
 376        struct cgs_irq_params *irq_params =
 377                (struct cgs_irq_params *)source->data;
 378        if (!irq_params)
 379                return -EINVAL;
 380        if (!irq_params->handler)
 381                return -EINVAL;
 382        return irq_params->handler(irq_params->private_data,
 383                                   irq_params->src_id,
 384                                   entry->iv_entry);
 385}
 386
 387static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
 388        .set = cgs_set_irq_state,
 389        .process = cgs_process_irq,
 390};
 391
 392static int amdgpu_cgs_add_irq_source(void *cgs_device,
 393                                     unsigned client_id,
 394                                     unsigned src_id,
 395                                     unsigned num_types,
 396                                     cgs_irq_source_set_func_t set,
 397                                     cgs_irq_handler_func_t handler,
 398                                     void *private_data)
 399{
 400        CGS_FUNC_ADEV;
 401        int ret = 0;
 402        struct cgs_irq_params *irq_params;
 403        struct amdgpu_irq_src *source =
 404                kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
 405        if (!source)
 406                return -ENOMEM;
 407        irq_params =
 408                kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
 409        if (!irq_params) {
 410                kfree(source);
 411                return -ENOMEM;
 412        }
 413        source->num_types = num_types;
 414        source->funcs = &cgs_irq_funcs;
 415        irq_params->src_id = src_id;
 416        irq_params->set = set;
 417        irq_params->handler = handler;
 418        irq_params->private_data = private_data;
 419        source->data = (void *)irq_params;
 420        ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
 421        if (ret) {
 422                kfree(irq_params);
 423                kfree(source);
 424        }
 425
 426        return ret;
 427}
 428
 429static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
 430                              unsigned src_id, unsigned type)
 431{
 432        CGS_FUNC_ADEV;
 433
 434        if (!adev->irq.client[client_id].sources)
 435                return -EINVAL;
 436
 437        return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
 438}
 439
 440static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
 441                              unsigned src_id, unsigned type)
 442{
 443        CGS_FUNC_ADEV;
 444
 445        if (!adev->irq.client[client_id].sources)
 446                return -EINVAL;
 447
 448        return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
 449}
 450
 451static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 452                                  enum amd_ip_block_type block_type,
 453                                  enum amd_clockgating_state state)
 454{
 455        CGS_FUNC_ADEV;
 456        int i, r = -1;
 457
 458        for (i = 0; i < adev->num_ip_blocks; i++) {
 459                if (!adev->ip_blocks[i].status.valid)
 460                        continue;
 461
 462                if (adev->ip_blocks[i].version->type == block_type) {
 463                        r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
 464                                                                (void *)adev,
 465                                                                        state);
 466                        break;
 467                }
 468        }
 469        return r;
 470}
 471
 472static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 473                                  enum amd_ip_block_type block_type,
 474                                  enum amd_powergating_state state)
 475{
 476        CGS_FUNC_ADEV;
 477        int i, r = -1;
 478
 479        for (i = 0; i < adev->num_ip_blocks; i++) {
 480                if (!adev->ip_blocks[i].status.valid)
 481                        continue;
 482
 483                if (adev->ip_blocks[i].version->type == block_type) {
 484                        r = adev->ip_blocks[i].version->funcs->set_powergating_state(
 485                                                                (void *)adev,
 486                                                                        state);
 487                        break;
 488                }
 489        }
 490        return r;
 491}
 492
 493
 494static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 495{
 496        CGS_FUNC_ADEV;
 497        enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
 498
 499        switch (fw_type) {
 500        case CGS_UCODE_ID_SDMA0:
 501                result = AMDGPU_UCODE_ID_SDMA0;
 502                break;
 503        case CGS_UCODE_ID_SDMA1:
 504                result = AMDGPU_UCODE_ID_SDMA1;
 505                break;
 506        case CGS_UCODE_ID_CP_CE:
 507                result = AMDGPU_UCODE_ID_CP_CE;
 508                break;
 509        case CGS_UCODE_ID_CP_PFP:
 510                result = AMDGPU_UCODE_ID_CP_PFP;
 511                break;
 512        case CGS_UCODE_ID_CP_ME:
 513                result = AMDGPU_UCODE_ID_CP_ME;
 514                break;
 515        case CGS_UCODE_ID_CP_MEC:
 516        case CGS_UCODE_ID_CP_MEC_JT1:
 517                result = AMDGPU_UCODE_ID_CP_MEC1;
 518                break;
 519        case CGS_UCODE_ID_CP_MEC_JT2:
 520                /* for VI. JT2 should be the same as JT1, because:
 521                        1, MEC2 and MEC1 use exactly same FW.
 522                        2, JT2 is not pached but JT1 is.
 523                */
 524                if (adev->asic_type >= CHIP_TOPAZ)
 525                        result = AMDGPU_UCODE_ID_CP_MEC1;
 526                else
 527                        result = AMDGPU_UCODE_ID_CP_MEC2;
 528                break;
 529        case CGS_UCODE_ID_RLC_G:
 530                result = AMDGPU_UCODE_ID_RLC_G;
 531                break;
 532        case CGS_UCODE_ID_STORAGE:
 533                result = AMDGPU_UCODE_ID_STORAGE;
 534                break;
 535        default:
 536                DRM_ERROR("Firmware type not supported\n");
 537        }
 538        return result;
 539}
 540
 541static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
 542{
 543        CGS_FUNC_ADEV;
 544        if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
 545                release_firmware(adev->pm.fw);
 546                adev->pm.fw = NULL;
 547                return 0;
 548        }
 549        /* cannot release other firmware because they are not created by cgs */
 550        return -EINVAL;
 551}
 552
 553static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
 554                                        enum cgs_ucode_id type)
 555{
 556        CGS_FUNC_ADEV;
 557        uint16_t fw_version = 0;
 558
 559        switch (type) {
 560                case CGS_UCODE_ID_SDMA0:
 561                        fw_version = adev->sdma.instance[0].fw_version;
 562                        break;
 563                case CGS_UCODE_ID_SDMA1:
 564                        fw_version = adev->sdma.instance[1].fw_version;
 565                        break;
 566                case CGS_UCODE_ID_CP_CE:
 567                        fw_version = adev->gfx.ce_fw_version;
 568                        break;
 569                case CGS_UCODE_ID_CP_PFP:
 570                        fw_version = adev->gfx.pfp_fw_version;
 571                        break;
 572                case CGS_UCODE_ID_CP_ME:
 573                        fw_version = adev->gfx.me_fw_version;
 574                        break;
 575                case CGS_UCODE_ID_CP_MEC:
 576                        fw_version = adev->gfx.mec_fw_version;
 577                        break;
 578                case CGS_UCODE_ID_CP_MEC_JT1:
 579                        fw_version = adev->gfx.mec_fw_version;
 580                        break;
 581                case CGS_UCODE_ID_CP_MEC_JT2:
 582                        fw_version = adev->gfx.mec_fw_version;
 583                        break;
 584                case CGS_UCODE_ID_RLC_G:
 585                        fw_version = adev->gfx.rlc_fw_version;
 586                        break;
 587                case CGS_UCODE_ID_STORAGE:
 588                        break;
 589                default:
 590                        DRM_ERROR("firmware type %d do not have version\n", type);
 591                        break;
 592        }
 593        return fw_version;
 594}
 595
 596static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
 597                                        bool en)
 598{
 599        CGS_FUNC_ADEV;
 600
 601        if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
 602                adev->gfx.rlc.funcs->exit_safe_mode == NULL)
 603                return 0;
 604
 605        if (en)
 606                adev->gfx.rlc.funcs->enter_safe_mode(adev);
 607        else
 608                adev->gfx.rlc.funcs->exit_safe_mode(adev);
 609
 610        return 0;
 611}
 612
 613static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 614                                        enum cgs_ucode_id type,
 615                                        struct cgs_firmware_info *info)
 616{
 617        CGS_FUNC_ADEV;
 618
 619        if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
 620                uint64_t gpu_addr;
 621                uint32_t data_size;
 622                const struct gfx_firmware_header_v1_0 *header;
 623                enum AMDGPU_UCODE_ID id;
 624                struct amdgpu_firmware_info *ucode;
 625
 626                id = fw_type_convert(cgs_device, type);
 627                ucode = &adev->firmware.ucode[id];
 628                if (ucode->fw == NULL)
 629                        return -EINVAL;
 630
 631                gpu_addr  = ucode->mc_addr;
 632                header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
 633                data_size = le32_to_cpu(header->header.ucode_size_bytes);
 634
 635                if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
 636                    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
 637                        gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
 638                        data_size = le32_to_cpu(header->jt_size) << 2;
 639                }
 640
 641                info->kptr = ucode->kaddr;
 642                info->image_size = data_size;
 643                info->mc_addr = gpu_addr;
 644                info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
 645
 646                if (CGS_UCODE_ID_CP_MEC == type)
 647                        info->image_size = (header->jt_offset) << 2;
 648
 649                info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
 650                info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
 651        } else {
 652                char fw_name[30] = {0};
 653                int err = 0;
 654                uint32_t ucode_size;
 655                uint32_t ucode_start_address;
 656                const uint8_t *src;
 657                const struct smc_firmware_header_v1_0 *hdr;
 658                const struct common_firmware_header *header;
 659                struct amdgpu_firmware_info *ucode = NULL;
 660
 661                if (!adev->pm.fw) {
 662                        switch (adev->asic_type) {
 663                        case CHIP_TOPAZ:
 664                                if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
 665                                    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
 666                                    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
 667                                        info->is_kicker = true;
 668                                        strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
 669                                } else
 670                                        strcpy(fw_name, "amdgpu/topaz_smc.bin");
 671                                break;
 672                        case CHIP_TONGA:
 673                                if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
 674                                    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
 675                                        info->is_kicker = true;
 676                                        strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
 677                                } else
 678                                        strcpy(fw_name, "amdgpu/tonga_smc.bin");
 679                                break;
 680                        case CHIP_FIJI:
 681                                strcpy(fw_name, "amdgpu/fiji_smc.bin");
 682                                break;
 683                        case CHIP_POLARIS11:
 684                                if (type == CGS_UCODE_ID_SMU) {
 685                                        if (((adev->pdev->device == 0x67ef) &&
 686                                             ((adev->pdev->revision == 0xe0) ||
 687                                              (adev->pdev->revision == 0xe2) ||
 688                                              (adev->pdev->revision == 0xe5))) ||
 689                                            ((adev->pdev->device == 0x67ff) &&
 690                                             ((adev->pdev->revision == 0xcf) ||
 691                                              (adev->pdev->revision == 0xef) ||
 692                                              (adev->pdev->revision == 0xff)))) {
 693                                                info->is_kicker = true;
 694                                                strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
 695                                        } else
 696                                                strcpy(fw_name, "amdgpu/polaris11_smc.bin");
 697                                } else if (type == CGS_UCODE_ID_SMU_SK) {
 698                                        strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
 699                                }
 700                                break;
 701                        case CHIP_POLARIS10:
 702                                if (type == CGS_UCODE_ID_SMU) {
 703                                        if ((adev->pdev->device == 0x67df) &&
 704                                            ((adev->pdev->revision == 0xe0) ||
 705                                             (adev->pdev->revision == 0xe3) ||
 706                                             (adev->pdev->revision == 0xe4) ||
 707                                             (adev->pdev->revision == 0xe5) ||
 708                                             (adev->pdev->revision == 0xe7) ||
 709                                             (adev->pdev->revision == 0xef))) {
 710                                                info->is_kicker = true;
 711                                                strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
 712                                        } else
 713                                                strcpy(fw_name, "amdgpu/polaris10_smc.bin");
 714                                } else if (type == CGS_UCODE_ID_SMU_SK) {
 715                                        strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
 716                                }
 717                                break;
 718                        case CHIP_POLARIS12:
 719                                strcpy(fw_name, "amdgpu/polaris12_smc.bin");
 720                                break;
 721                        case CHIP_VEGA10:
 722                                strcpy(fw_name, "amdgpu/vega10_smc.bin");
 723                                break;
 724                        default:
 725                                DRM_ERROR("SMC firmware not supported\n");
 726                                return -EINVAL;
 727                        }
 728
 729                        err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
 730                        if (err) {
 731                                DRM_ERROR("Failed to request firmware\n");
 732                                return err;
 733                        }
 734
 735                        err = amdgpu_ucode_validate(adev->pm.fw);
 736                        if (err) {
 737                                DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
 738                                release_firmware(adev->pm.fw);
 739                                adev->pm.fw = NULL;
 740                                return err;
 741                        }
 742
 743                        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 744                                ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
 745                                ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
 746                                ucode->fw = adev->pm.fw;
 747                                header = (const struct common_firmware_header *)ucode->fw->data;
 748                                adev->firmware.fw_size +=
 749                                        ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 750                        }
 751                }
 752
 753                hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
 754                amdgpu_ucode_print_smc_hdr(&hdr->header);
 755                adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
 756                ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
 757                ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
 758                src = (const uint8_t *)(adev->pm.fw->data +
 759                       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 760
 761                info->version = adev->pm.fw_version;
 762                info->image_size = ucode_size;
 763                info->ucode_start_address = ucode_start_address;
 764                info->kptr = (void *)src;
 765        }
 766        return 0;
 767}
 768
 769static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
 770{
 771        CGS_FUNC_ADEV;
 772        return amdgpu_sriov_vf(adev);
 773}
 774
 775static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
 776                                        struct cgs_system_info *sys_info)
 777{
 778        CGS_FUNC_ADEV;
 779
 780        if (NULL == sys_info)
 781                return -ENODEV;
 782
 783        if (sizeof(struct cgs_system_info) != sys_info->size)
 784                return -ENODEV;
 785
 786        switch (sys_info->info_id) {
 787        case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
 788                sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
 789                break;
 790        case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
 791                sys_info->value = adev->pm.pcie_gen_mask;
 792                break;
 793        case CGS_SYSTEM_INFO_PCIE_MLW:
 794                sys_info->value = adev->pm.pcie_mlw_mask;
 795                break;
 796        case CGS_SYSTEM_INFO_PCIE_DEV:
 797                sys_info->value = adev->pdev->device;
 798                break;
 799        case CGS_SYSTEM_INFO_PCIE_REV:
 800                sys_info->value = adev->pdev->revision;
 801                break;
 802        case CGS_SYSTEM_INFO_CG_FLAGS:
 803                sys_info->value = adev->cg_flags;
 804                break;
 805        case CGS_SYSTEM_INFO_PG_FLAGS:
 806                sys_info->value = adev->pg_flags;
 807                break;
 808        case CGS_SYSTEM_INFO_GFX_CU_INFO:
 809                sys_info->value = adev->gfx.cu_info.number;
 810                break;
 811        case CGS_SYSTEM_INFO_GFX_SE_INFO:
 812                sys_info->value = adev->gfx.config.max_shader_engines;
 813                break;
 814        case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
 815                sys_info->value = adev->pdev->subsystem_device;
 816                break;
 817        case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
 818                sys_info->value = adev->pdev->subsystem_vendor;
 819                break;
 820        default:
 821                return -ENODEV;
 822        }
 823
 824        return 0;
 825}
 826
 827static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
 828                                          struct cgs_display_info *info)
 829{
 830        CGS_FUNC_ADEV;
 831        struct amdgpu_crtc *amdgpu_crtc;
 832        struct drm_device *ddev = adev->ddev;
 833        struct drm_crtc *crtc;
 834        uint32_t line_time_us, vblank_lines;
 835        struct cgs_mode_info *mode_info;
 836
 837        if (info == NULL)
 838                return -EINVAL;
 839
 840        mode_info = info->mode_info;
 841        if (mode_info) {
 842                /* if the displays are off, vblank time is max */
 843                mode_info->vblank_time_us = 0xffffffff;
 844                /* always set the reference clock */
 845                mode_info->ref_clock = adev->clock.spll.reference_freq;
 846        }
 847
 848        if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 849                list_for_each_entry(crtc,
 850                                &ddev->mode_config.crtc_list, head) {
 851                        amdgpu_crtc = to_amdgpu_crtc(crtc);
 852                        if (crtc->enabled) {
 853                                info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
 854                                info->display_count++;
 855                        }
 856                        if (mode_info != NULL &&
 857                                crtc->enabled && amdgpu_crtc->enabled &&
 858                                amdgpu_crtc->hw_mode.clock) {
 859                                line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
 860                                                        amdgpu_crtc->hw_mode.clock;
 861                                vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
 862                                                        amdgpu_crtc->hw_mode.crtc_vdisplay +
 863                                                        (amdgpu_crtc->v_border * 2);
 864                                mode_info->vblank_time_us = vblank_lines * line_time_us;
 865                                mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 866                                mode_info->ref_clock = adev->clock.spll.reference_freq;
 867                                mode_info = NULL;
 868                        }
 869                }
 870        }
 871
 872        return 0;
 873}
 874
 875
 876static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
 877{
 878        CGS_FUNC_ADEV;
 879
 880        adev->pm.dpm_enabled = enabled;
 881
 882        return 0;
 883}
 884
 885/** \brief evaluate acpi namespace object, handle or pathname must be valid
 886 *  \param cgs_device
 887 *  \param info input/output arguments for the control method
 888 *  \return status
 889 */
 890
 891#if defined(CONFIG_ACPI)
 892static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
 893                                    struct cgs_acpi_method_info *info)
 894{
 895        CGS_FUNC_ADEV;
 896        acpi_handle handle;
 897        struct acpi_object_list input;
 898        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
 899        union acpi_object *params, *obj;
 900        uint8_t name[5] = {'\0'};
 901        struct cgs_acpi_method_argument *argument;
 902        uint32_t i, count;
 903        acpi_status status;
 904        int result;
 905
 906        handle = ACPI_HANDLE(&adev->pdev->dev);
 907        if (!handle)
 908                return -ENODEV;
 909
 910        memset(&input, 0, sizeof(struct acpi_object_list));
 911
 912        /* validate input info */
 913        if (info->size != sizeof(struct cgs_acpi_method_info))
 914                return -EINVAL;
 915
 916        input.count = info->input_count;
 917        if (info->input_count > 0) {
 918                if (info->pinput_argument == NULL)
 919                        return -EINVAL;
 920                argument = info->pinput_argument;
 921                for (i = 0; i < info->input_count; i++) {
 922                        if (((argument->type == ACPI_TYPE_STRING) ||
 923                             (argument->type == ACPI_TYPE_BUFFER)) &&
 924                            (argument->pointer == NULL))
 925                                return -EINVAL;
 926                        argument++;
 927                }
 928        }
 929
 930        if (info->output_count > 0) {
 931                if (info->poutput_argument == NULL)
 932                        return -EINVAL;
 933                argument = info->poutput_argument;
 934                for (i = 0; i < info->output_count; i++) {
 935                        if (((argument->type == ACPI_TYPE_STRING) ||
 936                                (argument->type == ACPI_TYPE_BUFFER))
 937                                && (argument->pointer == NULL))
 938                                return -EINVAL;
 939                        argument++;
 940                }
 941        }
 942
 943        /* The path name passed to acpi_evaluate_object should be null terminated */
 944        if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
 945                strncpy(name, (char *)&(info->name), sizeof(uint32_t));
 946                name[4] = '\0';
 947        }
 948
 949        /* parse input parameters */
 950        if (input.count > 0) {
 951                input.pointer = params =
 952                                kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
 953                if (params == NULL)
 954                        return -EINVAL;
 955
 956                argument = info->pinput_argument;
 957
 958                for (i = 0; i < input.count; i++) {
 959                        params->type = argument->type;
 960                        switch (params->type) {
 961                        case ACPI_TYPE_INTEGER:
 962                                params->integer.value = argument->value;
 963                                break;
 964                        case ACPI_TYPE_STRING:
 965                                params->string.length = argument->data_length;
 966                                params->string.pointer = argument->pointer;
 967                                break;
 968                        case ACPI_TYPE_BUFFER:
 969                                params->buffer.length = argument->data_length;
 970                                params->buffer.pointer = argument->pointer;
 971                                break;
 972                        default:
 973                                break;
 974                        }
 975                        params++;
 976                        argument++;
 977                }
 978        }
 979
 980        /* parse output info */
 981        count = info->output_count;
 982        argument = info->poutput_argument;
 983
 984        /* evaluate the acpi method */
 985        status = acpi_evaluate_object(handle, name, &input, &output);
 986
 987        if (ACPI_FAILURE(status)) {
 988                result = -EIO;
 989                goto free_input;
 990        }
 991
 992        /* return the output info */
 993        obj = output.pointer;
 994
 995        if (count > 1) {
 996                if ((obj->type != ACPI_TYPE_PACKAGE) ||
 997                        (obj->package.count != count)) {
 998                        result = -EIO;
 999                        goto free_obj;
1000                }
1001                params = obj->package.elements;
1002        } else
1003                params = obj;
1004
1005        if (params == NULL) {
1006                result = -EIO;
1007                goto free_obj;
1008        }
1009
1010        for (i = 0; i < count; i++) {
1011                if (argument->type != params->type) {
1012                        result = -EIO;
1013                        goto free_obj;
1014                }
1015                switch (params->type) {
1016                case ACPI_TYPE_INTEGER:
1017                        argument->value = params->integer.value;
1018                        break;
1019                case ACPI_TYPE_STRING:
1020                        if ((params->string.length != argument->data_length) ||
1021                                (params->string.pointer == NULL)) {
1022                                result = -EIO;
1023                                goto free_obj;
1024                        }
1025                        strncpy(argument->pointer,
1026                                params->string.pointer,
1027                                params->string.length);
1028                        break;
1029                case ACPI_TYPE_BUFFER:
1030                        if (params->buffer.pointer == NULL) {
1031                                result = -EIO;
1032                                goto free_obj;
1033                        }
1034                        memcpy(argument->pointer,
1035                                params->buffer.pointer,
1036                                argument->data_length);
1037                        break;
1038                default:
1039                        break;
1040                }
1041                argument++;
1042                params++;
1043        }
1044
1045        result = 0;
1046free_obj:
1047        kfree(obj);
1048free_input:
1049        kfree((void *)input.pointer);
1050        return result;
1051}
1052#else
1053static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1054                                struct cgs_acpi_method_info *info)
1055{
1056        return -EIO;
1057}
1058#endif
1059
1060static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1061                                        uint32_t acpi_method,
1062                                        uint32_t acpi_function,
1063                                        void *pinput, void *poutput,
1064                                        uint32_t output_count,
1065                                        uint32_t input_size,
1066                                        uint32_t output_size)
1067{
1068        struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1069        struct cgs_acpi_method_argument acpi_output = {0};
1070        struct cgs_acpi_method_info info = {0};
1071
1072        acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1073        acpi_input[0].data_length = sizeof(uint32_t);
1074        acpi_input[0].value = acpi_function;
1075
1076        acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1077        acpi_input[1].data_length = input_size;
1078        acpi_input[1].pointer = pinput;
1079
1080        acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1081        acpi_output.data_length = output_size;
1082        acpi_output.pointer = poutput;
1083
1084        info.size = sizeof(struct cgs_acpi_method_info);
1085        info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1086        info.input_count = 2;
1087        info.name = acpi_method;
1088        info.pinput_argument = acpi_input;
1089        info.output_count = output_count;
1090        info.poutput_argument = &acpi_output;
1091
1092        return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1093}
1094
1095static const struct cgs_ops amdgpu_cgs_ops = {
1096        .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
1097        .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
1098        .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
1099        .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
1100        .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
1101        .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
1102        .read_register = amdgpu_cgs_read_register,
1103        .write_register = amdgpu_cgs_write_register,
1104        .read_ind_register = amdgpu_cgs_read_ind_register,
1105        .write_ind_register = amdgpu_cgs_write_ind_register,
1106        .get_pci_resource = amdgpu_cgs_get_pci_resource,
1107        .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
1108        .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
1109        .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
1110        .get_firmware_info = amdgpu_cgs_get_firmware_info,
1111        .rel_firmware = amdgpu_cgs_rel_firmware,
1112        .set_powergating_state = amdgpu_cgs_set_powergating_state,
1113        .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
1114        .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
1115        .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
1116        .call_acpi_method = amdgpu_cgs_call_acpi_method,
1117        .query_system_info = amdgpu_cgs_query_system_info,
1118        .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
1119        .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
1120};
1121
1122static const struct cgs_os_ops amdgpu_cgs_os_ops = {
1123        .add_irq_source = amdgpu_cgs_add_irq_source,
1124        .irq_get = amdgpu_cgs_irq_get,
1125        .irq_put = amdgpu_cgs_irq_put
1126};
1127
1128struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
1129{
1130        struct amdgpu_cgs_device *cgs_device =
1131                kmalloc(sizeof(*cgs_device), GFP_KERNEL);
1132
1133        if (!cgs_device) {
1134                DRM_ERROR("Couldn't allocate CGS device structure\n");
1135                return NULL;
1136        }
1137
1138        cgs_device->base.ops = &amdgpu_cgs_ops;
1139        cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
1140        cgs_device->adev = adev;
1141
1142        return (struct cgs_device *)cgs_device;
1143}
1144
1145void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
1146{
1147        kfree(cgs_device);
1148}
1149