linux/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu.h"
  24#include "mmhub_v1_0.h"
  25
  26#include "mmhub/mmhub_1_0_offset.h"
  27#include "mmhub/mmhub_1_0_sh_mask.h"
  28#include "mmhub/mmhub_1_0_default.h"
  29#include "athub/athub_1_0_offset.h"
  30#include "athub/athub_1_0_sh_mask.h"
  31#include "vega10_enum.h"
  32
  33#include "soc15_common.h"
  34
  35#define mmDAGB0_CNTL_MISC2_RV 0x008f
  36#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
  37
  38u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
  39{
  40        u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
  41        u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
  42
  43        base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
  44        base <<= 24;
  45
  46        top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
  47        top <<= 24;
  48
  49        adev->gmc.fb_start = base;
  50        adev->gmc.fb_end = top;
  51
  52        return base;
  53}
  54
  55void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
  56                                uint64_t page_table_base)
  57{
  58        /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
  59        int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
  60                        - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
  61
  62        WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
  63                        offset * vmid, lower_32_bits(page_table_base));
  64
  65        WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
  66                        offset * vmid, upper_32_bits(page_table_base));
  67}
  68
  69static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  70{
  71        uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
  72
  73        mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
  74
  75        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
  76                     (u32)(adev->gmc.gart_start >> 12));
  77        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
  78                     (u32)(adev->gmc.gart_start >> 44));
  79
  80        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
  81                     (u32)(adev->gmc.gart_end >> 12));
  82        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
  83                     (u32)(adev->gmc.gart_end >> 44));
  84}
  85
  86static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
  87{
  88        uint64_t value;
  89        uint32_t tmp;
  90
  91        /* Program the AGP BAR */
  92        WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
  93        WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
  94        WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
  95
  96        /* Program the system aperture low logical page number. */
  97        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  98                     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
  99
 100        if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
 101                /*
 102                 * Raven2 has a HW issue that it is unable to use the vram which
 103                 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 104                 * workaround that increase system aperture high address (add 1)
 105                 * to get rid of the VM fault and hardware hang.
 106                 */
 107                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 108                             max((adev->gmc.fb_end >> 18) + 0x1,
 109                                 adev->gmc.agp_end >> 18));
 110        else
 111                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 112                             max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 113
 114        /* Set default page address. */
 115        value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
 116                adev->vm_manager.vram_base_offset;
 117        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
 118                     (u32)(value >> 12));
 119        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
 120                     (u32)(value >> 44));
 121
 122        /* Program "protection fault". */
 123        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
 124                     (u32)(adev->dummy_page_addr >> 12));
 125        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
 126                     (u32)((u64)adev->dummy_page_addr >> 44));
 127
 128        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
 129        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
 130                            ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
 131        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
 132}
 133
 134static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
 135{
 136        uint32_t tmp;
 137
 138        /* Setup TLB control */
 139        tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
 140
 141        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 142        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 143        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
 144                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
 145        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
 146                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 147        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
 148        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
 149                            MTYPE, MTYPE_UC);/* XXX for emulation. */
 150        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
 151
 152        WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
 153}
 154
 155static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
 156{
 157        uint32_t tmp;
 158
 159        /* Setup L2 cache */
 160        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
 161        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
 162        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 163        /* XXX for emulation, Refer to closed source code.*/
 164        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
 165                            0);
 166        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
 167        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 168        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
 169        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
 170
 171        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
 172        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 173        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 174        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
 175
 176        if (adev->gmc.translate_further) {
 177                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
 178                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
 179                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
 180        } else {
 181                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
 182                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
 183                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
 184        }
 185        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
 186
 187        tmp = mmVM_L2_CNTL4_DEFAULT;
 188        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
 189        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
 190        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
 191}
 192
 193static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
 194{
 195        uint32_t tmp;
 196
 197        tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
 198        tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 199        tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 200        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
 201}
 202
 203static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
 204{
 205        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
 206                     0XFFFFFFFF);
 207        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
 208                     0x0000000F);
 209
 210        WREG32_SOC15(MMHUB, 0,
 211                     mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
 212        WREG32_SOC15(MMHUB, 0,
 213                     mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
 214
 215        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
 216                     0);
 217        WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
 218                     0);
 219}
 220
 221static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
 222{
 223        unsigned num_level, block_size;
 224        uint32_t tmp;
 225        int i;
 226
 227        num_level = adev->vm_manager.num_level;
 228        block_size = adev->vm_manager.block_size;
 229        if (adev->gmc.translate_further)
 230                num_level -= 1;
 231        else
 232                block_size -= 9;
 233
 234        for (i = 0; i <= 14; i++) {
 235                tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
 236                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 237                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
 238                                    num_level);
 239                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 240                                    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 241                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 242                                    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
 243                                    1);
 244                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 245                                    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 246                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 247                                    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 248                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 249                                    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 250                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 251                                    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 252                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 253                                    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 254                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 255                                    PAGE_TABLE_BLOCK_SIZE,
 256                                    block_size);
 257                /* Send no-retry XNACK on fault to suppress VM fault storm. */
 258                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 259                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
 260                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
 261                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
 262                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
 263                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
 264                        lower_32_bits(adev->vm_manager.max_pfn - 1));
 265                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
 266                        upper_32_bits(adev->vm_manager.max_pfn - 1));
 267        }
 268}
 269
 270static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
 271{
 272        unsigned i;
 273
 274        for (i = 0; i < 18; ++i) {
 275                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
 276                                    2 * i, 0xffffffff);
 277                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
 278                                    2 * i, 0x1f);
 279        }
 280}
 281
 282void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
 283                                bool enable)
 284{
 285        if (amdgpu_sriov_vf(adev))
 286                return;
 287
 288        if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
 289                if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
 290                        amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
 291
 292        }
 293}
 294
 295int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
 296{
 297        if (amdgpu_sriov_vf(adev)) {
 298                /*
 299                 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
 300                 * VF copy registers so vbios post doesn't program them, for
 301                 * SRIOV driver need to program them
 302                 */
 303                WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
 304                             adev->gmc.vram_start >> 24);
 305                WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
 306                             adev->gmc.vram_end >> 24);
 307        }
 308
 309        /* GART Enable. */
 310        mmhub_v1_0_init_gart_aperture_regs(adev);
 311        mmhub_v1_0_init_system_aperture_regs(adev);
 312        mmhub_v1_0_init_tlb_regs(adev);
 313        mmhub_v1_0_init_cache_regs(adev);
 314
 315        mmhub_v1_0_enable_system_domain(adev);
 316        mmhub_v1_0_disable_identity_aperture(adev);
 317        mmhub_v1_0_setup_vmid_config(adev);
 318        mmhub_v1_0_program_invalidation(adev);
 319
 320        return 0;
 321}
 322
 323void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
 324{
 325        u32 tmp;
 326        u32 i;
 327
 328        /* Disable all tables */
 329        for (i = 0; i < 16; i++)
 330                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0);
 331
 332        /* Setup TLB control */
 333        tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
 334        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 335        tmp = REG_SET_FIELD(tmp,
 336                                MC_VM_MX_L1_TLB_CNTL,
 337                                ENABLE_ADVANCED_DRIVER_MODEL,
 338                                0);
 339        WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
 340
 341        /* Setup L2 cache */
 342        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
 343        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 344        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
 345        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
 346}
 347
 348/**
 349 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
 350 *
 351 * @adev: amdgpu_device pointer
 352 * @value: true redirects VM faults to the default page
 353 */
 354void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
 355{
 356        u32 tmp;
 357        tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
 358        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 359                        RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 360        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 361                        PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 362        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 363                        PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 364        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 365                        PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 366        tmp = REG_SET_FIELD(tmp,
 367                        VM_L2_PROTECTION_FAULT_CNTL,
 368                        TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
 369                        value);
 370        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 371                        NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 372        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 373                        DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 374        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 375                        VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 376        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 377                        READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 378        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 379                        WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 380        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 381                        EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 382        if (!value) {
 383                tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 384                                CRASH_ON_NO_RETRY_FAULT, 1);
 385                tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
 386                                CRASH_ON_RETRY_FAULT, 1);
 387    }
 388
 389        WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
 390}
 391
 392void mmhub_v1_0_init(struct amdgpu_device *adev)
 393{
 394        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
 395
 396        hub->ctx0_ptb_addr_lo32 =
 397                SOC15_REG_OFFSET(MMHUB, 0,
 398                                 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
 399        hub->ctx0_ptb_addr_hi32 =
 400                SOC15_REG_OFFSET(MMHUB, 0,
 401                                 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
 402        hub->vm_inv_eng0_req =
 403                SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
 404        hub->vm_inv_eng0_ack =
 405                SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
 406        hub->vm_context0_cntl =
 407                SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
 408        hub->vm_l2_pro_fault_status =
 409                SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
 410        hub->vm_l2_pro_fault_cntl =
 411                SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
 412
 413}
 414
 415static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 416                                                        bool enable)
 417{
 418        uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
 419
 420        def  = data  = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
 421
 422        if (adev->asic_type != CHIP_RAVEN) {
 423                def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 424                def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
 425        } else
 426                def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
 427
 428        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
 429                data |= ATC_L2_MISC_CG__ENABLE_MASK;
 430
 431                data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 432                           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 433                           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 434                           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 435                           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 436                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 437
 438                if (adev->asic_type != CHIP_RAVEN)
 439                        data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 440                                   DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 441                                   DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 442                                   DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 443                                   DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 444                                   DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 445        } else {
 446                data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
 447
 448                data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 449                          DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 450                          DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 451                          DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 452                          DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 453                          DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 454
 455                if (adev->asic_type != CHIP_RAVEN)
 456                        data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 457                                  DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 458                                  DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 459                                  DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 460                                  DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 461                                  DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 462        }
 463
 464        if (def != data)
 465                WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
 466
 467        if (def1 != data1) {
 468                if (adev->asic_type != CHIP_RAVEN)
 469                        WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
 470                else
 471                        WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
 472        }
 473
 474        if (adev->asic_type != CHIP_RAVEN && def2 != data2)
 475                WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
 476}
 477
 478static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 479                                                   bool enable)
 480{
 481        uint32_t def, data;
 482
 483        def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
 484
 485        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
 486                data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
 487        else
 488                data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
 489
 490        if (def != data)
 491                WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
 492}
 493
 494static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 495                                                       bool enable)
 496{
 497        uint32_t def, data;
 498
 499        def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
 500
 501        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 502                data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
 503        else
 504                data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
 505
 506        if (def != data)
 507                WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
 508}
 509
 510static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 511                                                  bool enable)
 512{
 513        uint32_t def, data;
 514
 515        def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
 516
 517        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
 518            (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 519                data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
 520        else
 521                data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
 522
 523        if(def != data)
 524                WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
 525}
 526
 527int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 528                               enum amd_clockgating_state state)
 529{
 530        if (amdgpu_sriov_vf(adev))
 531                return 0;
 532
 533        switch (adev->asic_type) {
 534        case CHIP_VEGA10:
 535        case CHIP_VEGA12:
 536        case CHIP_VEGA20:
 537        case CHIP_RAVEN:
 538                mmhub_v1_0_update_medium_grain_clock_gating(adev,
 539                                state == AMD_CG_STATE_GATE ? true : false);
 540                athub_update_medium_grain_clock_gating(adev,
 541                                state == AMD_CG_STATE_GATE ? true : false);
 542                mmhub_v1_0_update_medium_grain_light_sleep(adev,
 543                                state == AMD_CG_STATE_GATE ? true : false);
 544                athub_update_medium_grain_light_sleep(adev,
 545                                state == AMD_CG_STATE_GATE ? true : false);
 546                break;
 547        default:
 548                break;
 549        }
 550
 551        return 0;
 552}
 553
 554void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 555{
 556        int data;
 557
 558        if (amdgpu_sriov_vf(adev))
 559                *flags = 0;
 560
 561        /* AMD_CG_SUPPORT_MC_MGCG */
 562        data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
 563        if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
 564                *flags |= AMD_CG_SUPPORT_MC_MGCG;
 565
 566        /* AMD_CG_SUPPORT_MC_LS */
 567        data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
 568        if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
 569                *flags |= AMD_CG_SUPPORT_MC_LS;
 570}
 571