linux/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu.h"
  24#include "amdgpu_ras.h"
  25#include "mmhub_v1_0.h"
  26
  27#include "mmhub/mmhub_1_0_offset.h"
  28#include "mmhub/mmhub_1_0_sh_mask.h"
  29#include "mmhub/mmhub_1_0_default.h"
  30#include "vega10_enum.h"
  31#include "soc15.h"
  32#include "soc15_common.h"
  33
  34#define mmDAGB0_CNTL_MISC2_RV 0x008f
  35#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
  36
  37static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
  38{
  39        u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
  40        u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
  41
  42        base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
  43        base <<= 24;
  44
  45        top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
  46        top <<= 24;
  47
  48        adev->gmc.fb_start = base;
  49        adev->gmc.fb_end = top;
  50
  51        return base;
  52}
  53
  54static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
  55                                uint64_t page_table_base)
  56{
  57        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
  58
  59        WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
  60                            hub->ctx_addr_distance * vmid,
  61                            lower_32_bits(page_table_base));
  62
  63        WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
  64                            hub->ctx_addr_distance * vmid,
  65                            upper_32_bits(page_table_base));
  66}
  67
  68static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  69{
  70        uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
  71
  72        mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
  73
  74        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
  75                     (u32)(adev->gmc.gart_start >> 12));
  76        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
  77                     (u32)(adev->gmc.gart_start >> 44));
  78
  79        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
  80                     (u32)(adev->gmc.gart_end >> 12));
  81        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
  82                     (u32)(adev->gmc.gart_end >> 44));
  83}
  84
  85static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
  86{
  87        uint64_t value;
  88        uint32_t tmp;
  89
  90        /* Program the AGP BAR */
  91        WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
  92        WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
  93        WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
  94
  95        /* Program the system aperture low logical page number. */
  96        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  97                     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
  98
  99        if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 100                /*
 101                 * Raven2 has a HW issue that it is unable to use the vram which
 102                 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 103                 * workaround that increase system aperture high address (add 1)
 104                 * to get rid of the VM fault and hardware hang.
 105                 */
 106                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 107                             max((adev->gmc.fb_end >> 18) + 0x1,
 108                                 adev->gmc.agp_end >> 18));
 109        else
 110                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 111                             max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 112
 113        if (amdgpu_sriov_vf(adev))
 114                return;
 115
 116        /* Set default page address. */
 117        value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
 118        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
 119                     (u32)(value >> 12));
 120        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
 121                     (u32)(value >> 44));
 122
 123        /* Program "protection fault". */
 124        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
 125                     (u32)(adev->dummy_page_addr >> 12));
 126        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
 127                     (u32)((u64)adev->dummy_page_addr >> 44));
 128
 129        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
 130        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
 131                            ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
 132        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
 133}
 134
 135static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
 136{
 137        uint32_t tmp;
 138
 139        /* Setup TLB control */
 140        tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
 141
 142        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 143        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 144        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
 145                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
 146        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
 147                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 148        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
 149        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
 150                            MTYPE, MTYPE_UC);/* XXX for emulation. */
 151        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
 152
 153        WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
 154}
 155
 156static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
 157{
 158        uint32_t tmp;
 159
 160        if (amdgpu_sriov_vf(adev))
 161                return;
 162
 163        /* Setup L2 cache */
 164        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
 165        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 166        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 167        /* XXX for emulation, Refer to closed source code.*/
 168        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
 169                            0);
 170        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
 171        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 172        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
 173        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
 174
 175        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
 176        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 177        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 178        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
 179
 180        if (adev->gmc.translate_further) {
 181                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
 182                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
 183                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
 184        } else {
 185                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
 186                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
 187                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
 188        }
 189        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
 190
 191        tmp = mmVM_L2_CNTL4_DEFAULT;
 192        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
 193        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
 194        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
 195}
 196
 197static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
 198{
 199        uint32_t tmp;
 200
 201        tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
 202        tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 203        tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 204        tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
 205                            RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
 206        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
 207}
 208
 209static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
 210{
 211        if (amdgpu_sriov_vf(adev))
 212                return;
 213
 214        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
 215                     0XFFFFFFFF);
 216        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
 217                     0x0000000F);
 218
 219        WREG32_SOC15(MMHUB, 0,
 220                     mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
 221        WREG32_SOC15(MMHUB, 0,
 222                     mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
 223
 224        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
 225                     0);
 226        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
 227                     0);
 228}
 229
 230static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
 231{
 232        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 233        unsigned num_level, block_size;
 234        uint32_t tmp;
 235        int i;
 236
 237        num_level = adev->vm_manager.num_level;
 238        block_size = adev->vm_manager.block_size;
 239        if (adev->gmc.translate_further)
 240                num_level -= 1;
 241        else
 242                block_size -= 9;
 243
 244        for (i = 0; i <= 14; i++) {
 245                tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
 246                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 247                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
 248                                    num_level);
 249                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 250                                    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 251                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 252                                    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
 253                                    1);
 254                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 255                                    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 256                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 257                                    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 258                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 259                                    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 260                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 261                                    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 262                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 263                                    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 264                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 265                                    PAGE_TABLE_BLOCK_SIZE,
 266                                    block_size);
 267                /* Send no-retry XNACK on fault to suppress VM fault storm. */
 268                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 269                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
 270                                    !adev->gmc.noretry);
 271                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
 272                                    i * hub->ctx_distance, tmp);
 273                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
 274                                    i * hub->ctx_addr_distance, 0);
 275                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
 276                                    i * hub->ctx_addr_distance, 0);
 277                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
 278                                    i * hub->ctx_addr_distance,
 279                                    lower_32_bits(adev->vm_manager.max_pfn - 1));
 280                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
 281                                    i * hub->ctx_addr_distance,
 282                                    upper_32_bits(adev->vm_manager.max_pfn - 1));
 283        }
 284}
 285
 286static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
 287{
 288        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 289        unsigned i;
 290
 291        for (i = 0; i < 18; ++i) {
 292                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
 293                                    i * hub->eng_addr_distance, 0xffffffff);
 294                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
 295                                    i * hub->eng_addr_distance, 0x1f);
 296        }
 297}
 298
 299static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
 300                                bool enable)
 301{
 302        if (amdgpu_sriov_vf(adev))
 303                return;
 304
 305        if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
 306                amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
 307
 308        }
 309}
 310
 311static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
 312{
 313        if (amdgpu_sriov_vf(adev)) {
 314                /*
 315                 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
 316                 * VF copy registers so vbios post doesn't program them, for
 317                 * SRIOV driver need to program them
 318                 */
 319                WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
 320                             adev->gmc.vram_start >> 24);
 321                WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
 322                             adev->gmc.vram_end >> 24);
 323        }
 324
 325        /* GART Enable. */
 326        mmhub_v1_0_init_gart_aperture_regs(adev);
 327        mmhub_v1_0_init_system_aperture_regs(adev);
 328        mmhub_v1_0_init_tlb_regs(adev);
 329        mmhub_v1_0_init_cache_regs(adev);
 330
 331        mmhub_v1_0_enable_system_domain(adev);
 332        mmhub_v1_0_disable_identity_aperture(adev);
 333        mmhub_v1_0_setup_vmid_config(adev);
 334        mmhub_v1_0_program_invalidation(adev);
 335
 336        return 0;
 337}
 338
 339static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
 340{
 341        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 342        u32 tmp;
 343        u32 i;
 344
 345        /* Disable all tables */
 346        for (i = 0; i < AMDGPU_NUM_VMID; i++)
 347                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL,
 348                                    i * hub->ctx_distance, 0);
 349
 350        /* Setup TLB control */
 351        tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
 352        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 353        tmp = REG_SET_FIELD(tmp,
 354                                MC_VM_MX_L1_TLB_CNTL,
 355                                ENABLE_ADVANCED_DRIVER_MODEL,
 356                                0);
 357        WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
 358
 359        if (!amdgpu_sriov_vf(adev)) {
 360                /* Setup L2 cache */
 361                tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
 362                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 363                WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
 364                WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
 365        }
 366}
 367
 368/**
 369 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
 370 *
 371 * @adev: amdgpu_device pointer
 372 * @value: true redirects VM faults to the default page
 373 */
 374static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
 375{
 376        u32 tmp;
 377
 378        if (amdgpu_sriov_vf(adev))
 379                return;
 380
 381        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
 382        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 383                        RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 384        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 385                        PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 386        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 387                        PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 388        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 389                        PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 390        tmp = REG_SET_FIELD(tmp,
 391                        VM_L2_PROTECTION_FAULT_CNTL,
 392                        TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
 393                        value);
 394        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 395                        NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 396        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 397                        DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 398        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 399                        VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 400        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 401                        READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 402        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 403                        WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 404        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 405                        EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 406        if (!value) {
 407                tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 408                                CRASH_ON_NO_RETRY_FAULT, 1);
 409                tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 410                                CRASH_ON_RETRY_FAULT, 1);
 411        }
 412
 413        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
 414}
 415
 416static void mmhub_v1_0_init(struct amdgpu_device *adev)
 417{
 418        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 419
 420        hub->ctx0_ptb_addr_lo32 =
 421                SOC15_REG_OFFSET(MMHUB, 0,
 422                                 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
 423        hub->ctx0_ptb_addr_hi32 =
 424                SOC15_REG_OFFSET(MMHUB, 0,
 425                                 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
 426        hub->vm_inv_eng0_sem =
 427                SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
 428        hub->vm_inv_eng0_req =
 429                SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
 430        hub->vm_inv_eng0_ack =
 431                SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
 432        hub->vm_context0_cntl =
 433                SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
 434        hub->vm_l2_pro_fault_status =
 435                SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
 436        hub->vm_l2_pro_fault_cntl =
 437                SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
 438
 439        hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
 440        hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
 441                mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 442        hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
 443        hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
 444                mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
 445}
 446
 447static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 448                                                        bool enable)
 449{
 450        uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
 451
 452        def  = data  = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
 453
 454        if (adev->asic_type != CHIP_RAVEN) {
 455                def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 456                def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
 457        } else
 458                def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
 459
 460        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
 461                data |= ATC_L2_MISC_CG__ENABLE_MASK;
 462
 463                data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 464                           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 465                           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 466                           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 467                           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 468                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 469
 470                if (adev->asic_type != CHIP_RAVEN)
 471                        data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 472                                   DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 473                                   DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 474                                   DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 475                                   DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 476                                   DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 477        } else {
 478                data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
 479
 480                data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 481                          DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 482                          DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 483                          DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 484                          DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 485                          DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 486
 487                if (adev->asic_type != CHIP_RAVEN)
 488                        data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 489                                  DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 490                                  DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 491                                  DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 492                                  DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 493                                  DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 494        }
 495
 496        if (def != data)
 497                WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
 498
 499        if (def1 != data1) {
 500                if (adev->asic_type != CHIP_RAVEN)
 501                        WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
 502                else
 503                        WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
 504        }
 505
 506        if (adev->asic_type != CHIP_RAVEN && def2 != data2)
 507                WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
 508}
 509
 510static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 511                                                       bool enable)
 512{
 513        uint32_t def, data;
 514
 515        def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
 516
 517        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 518                data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
 519        else
 520                data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
 521
 522        if (def != data)
 523                WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
 524}
 525
 526static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 527                               enum amd_clockgating_state state)
 528{
 529        if (amdgpu_sriov_vf(adev))
 530                return 0;
 531
 532        switch (adev->asic_type) {
 533        case CHIP_VEGA10:
 534        case CHIP_VEGA12:
 535        case CHIP_VEGA20:
 536        case CHIP_RAVEN:
 537        case CHIP_RENOIR:
 538                mmhub_v1_0_update_medium_grain_clock_gating(adev,
 539                                state == AMD_CG_STATE_GATE);
 540                mmhub_v1_0_update_medium_grain_light_sleep(adev,
 541                                state == AMD_CG_STATE_GATE);
 542                break;
 543        default:
 544                break;
 545        }
 546
 547        return 0;
 548}
 549
 550static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 551{
 552        int data, data1;
 553
 554        if (amdgpu_sriov_vf(adev))
 555                *flags = 0;
 556
 557        data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
 558
 559        data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 560
 561        /* AMD_CG_SUPPORT_MC_MGCG */
 562        if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
 563            !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 564                       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 565                       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 566                       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 567                       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 568                       DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
 569                *flags |= AMD_CG_SUPPORT_MC_MGCG;
 570
 571        /* AMD_CG_SUPPORT_MC_LS */
 572        if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
 573                *flags |= AMD_CG_SUPPORT_MC_LS;
 574}
 575
 576static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
 577        { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 578        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
 579        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
 580        },
 581        { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 582        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
 583        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
 584        },
 585        { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 586        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
 587        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
 588        },
 589        { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 590        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
 591        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
 592        },
 593        { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 594        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
 595        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
 596        },
 597        { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 598        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
 599        0, 0,
 600        },
 601        { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 602        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
 603        0, 0,
 604        },
 605        { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 606        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
 607        0, 0,
 608        },
 609        { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 610        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
 611        0, 0,
 612        },
 613        { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
 614        SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
 615        0, 0,
 616        },
 617        { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
 618        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
 619        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
 620        },
 621        { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
 622        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
 623        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
 624        },
 625        { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
 626        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
 627        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
 628        },
 629        { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
 630        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
 631        0, 0,
 632        },
 633        { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
 634        SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
 635        0, 0,
 636        },
 637        { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 638        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
 639        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
 640        },
 641        { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 642        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
 643        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
 644        },
 645        { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 646        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
 647        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
 648        },
 649        { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 650        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
 651        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
 652        },
 653        { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 654        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
 655        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
 656        },
 657        { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 658        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
 659        0, 0,
 660        },
 661        { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 662        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
 663        0, 0,
 664        },
 665        { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 666        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
 667        0, 0,
 668        },
 669        { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 670        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
 671        0, 0,
 672        },
 673        { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
 674        SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
 675        0, 0,
 676        },
 677        { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
 678        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
 679        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
 680        },
 681        { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
 682        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
 683        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
 684        },
 685        { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
 686        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
 687        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
 688        },
 689        { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
 690        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
 691        0, 0,
 692        },
 693        { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
 694        SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
 695        0, 0,
 696        }
 697};
 698
 699static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
 700   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
 701   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
 702   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
 703   { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
 704};
 705
 706static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
 707        const struct soc15_reg_entry *reg,
 708        uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
 709{
 710        uint32_t i;
 711        uint32_t sec_cnt, ded_cnt;
 712
 713        for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
 714                if (mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
 715                        continue;
 716
 717                sec_cnt = (value &
 718                                mmhub_v1_0_ras_fields[i].sec_count_mask) >>
 719                                mmhub_v1_0_ras_fields[i].sec_count_shift;
 720                if (sec_cnt) {
 721                        dev_info(adev->dev,
 722                                "MMHUB SubBlock %s, SEC %d\n",
 723                                mmhub_v1_0_ras_fields[i].name,
 724                                sec_cnt);
 725                        *sec_count += sec_cnt;
 726                }
 727
 728                ded_cnt = (value &
 729                                mmhub_v1_0_ras_fields[i].ded_count_mask) >>
 730                                mmhub_v1_0_ras_fields[i].ded_count_shift;
 731                if (ded_cnt) {
 732                        dev_info(adev->dev,
 733                                "MMHUB SubBlock %s, DED %d\n",
 734                                mmhub_v1_0_ras_fields[i].name,
 735                                ded_cnt);
 736                        *ded_count += ded_cnt;
 737                }
 738        }
 739
 740        return 0;
 741}
 742
 743static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
 744                                           void *ras_error_status)
 745{
 746        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 747        uint32_t sec_count = 0, ded_count = 0;
 748        uint32_t i;
 749        uint32_t reg_value;
 750
 751        err_data->ue_count = 0;
 752        err_data->ce_count = 0;
 753
 754        for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
 755                reg_value =
 756                        RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
 757                if (reg_value)
 758                        mmhub_v1_0_get_ras_error_count(adev,
 759                                &mmhub_v1_0_edc_cnt_regs[i],
 760                                reg_value, &sec_count, &ded_count);
 761        }
 762
 763        err_data->ce_count += sec_count;
 764        err_data->ue_count += ded_count;
 765}
 766
 767static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
 768{
 769        uint32_t i;
 770
 771        /* read back edc counter registers to reset the counters to 0 */
 772        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
 773                for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
 774                        RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
 775        }
 776}
 777
 778const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = {
 779        .ras_late_init = amdgpu_mmhub_ras_late_init,
 780        .ras_fini = amdgpu_mmhub_ras_fini,
 781        .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
 782        .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
 783};
 784
 785const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
 786        .get_fb_location = mmhub_v1_0_get_fb_location,
 787        .init = mmhub_v1_0_init,
 788        .gart_enable = mmhub_v1_0_gart_enable,
 789        .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
 790        .gart_disable = mmhub_v1_0_gart_disable,
 791        .set_clockgating = mmhub_v1_0_set_clockgating,
 792        .get_clockgating = mmhub_v1_0_get_clockgating,
 793        .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
 794        .update_power_gating = mmhub_v1_0_update_power_gating,
 795};
 796