linux/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
<<
>>
Prefs
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include "amdgpu.h"
  25#include "mmhub_v2_3.h"
  26
  27#include "mmhub/mmhub_2_3_0_offset.h"
  28#include "mmhub/mmhub_2_3_0_sh_mask.h"
  29#include "mmhub/mmhub_2_3_0_default.h"
  30#include "navi10_enum.h"
  31
  32#include "soc15_common.h"
  33
  34static const char *mmhub_client_ids_vangogh[][2] = {
  35        [0][0] = "MP0",
  36        [1][0] = "MP1",
  37        [2][0] = "DCEDMC",
  38        [3][0] = "DCEVGA",
  39        [13][0] = "UTCL2",
  40        [26][0] = "OSS",
  41        [27][0] = "HDP",
  42        [28][0] = "VCN",
  43        [29][0] = "VCNU",
  44        [30][0] = "JPEG",
  45        [0][1] = "MP0",
  46        [1][1] = "MP1",
  47        [2][1] = "DCEDMC",
  48        [3][1] = "DCEVGA",
  49        [4][1] = "DCEDWB",
  50        [5][1] = "XDP",
  51        [26][1] = "OSS",
  52        [27][1] = "HDP",
  53        [28][1] = "VCN",
  54        [29][1] = "VCNU",
  55        [30][1] = "JPEG",
  56};
  57
  58static uint32_t mmhub_v2_3_get_invalidate_req(unsigned int vmid,
  59                                              uint32_t flush_type)
  60{
  61        u32 req = 0;
  62
  63        /* invalidate using legacy mode on vmid*/
  64        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
  65                            PER_VMID_INVALIDATE_REQ, 1 << vmid);
  66        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
  67        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
  68        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
  69        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
  70        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
  71        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
  72        req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
  73                            CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
  74
  75        return req;
  76}
  77
  78static void
  79mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
  80                                             uint32_t status)
  81{
  82        uint32_t cid, rw;
  83        const char *mmhub_cid = NULL;
  84
  85        cid = REG_GET_FIELD(status,
  86                            MMVM_L2_PROTECTION_FAULT_STATUS, CID);
  87        rw = REG_GET_FIELD(status,
  88                           MMVM_L2_PROTECTION_FAULT_STATUS, RW);
  89
  90        dev_err(adev->dev,
  91                "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
  92                status);
  93        switch (adev->asic_type) {
  94        case CHIP_VANGOGH:
  95        case CHIP_YELLOW_CARP:
  96                mmhub_cid = mmhub_client_ids_vangogh[cid][rw];
  97                break;
  98        default:
  99                mmhub_cid = NULL;
 100                break;
 101        }
 102        dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
 103                mmhub_cid ? mmhub_cid : "unknown", cid);
 104        dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
 105                REG_GET_FIELD(status,
 106                MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
 107        dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
 108                REG_GET_FIELD(status,
 109                MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
 110        dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
 111                REG_GET_FIELD(status,
 112                MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
 113        dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
 114                REG_GET_FIELD(status,
 115                MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
 116        dev_err(adev->dev, "\t RW: 0x%x\n", rw);
 117}
 118
 119static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev,
 120                                        uint32_t vmid,
 121                                        uint64_t page_table_base)
 122{
 123        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 124
 125        WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
 126                            hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
 127
 128        WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
 129                            hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base));
 130}
 131
 132static void mmhub_v2_3_init_gart_aperture_regs(struct amdgpu_device *adev)
 133{
 134        uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 135
 136        mmhub_v2_3_setup_vm_pt_regs(adev, 0, pt_base);
 137
 138        WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
 139                     (u32)(adev->gmc.gart_start >> 12));
 140        WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
 141                     (u32)(adev->gmc.gart_start >> 44));
 142
 143        WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
 144                     (u32)(adev->gmc.gart_end >> 12));
 145        WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
 146                     (u32)(adev->gmc.gart_end >> 44));
 147}
 148
 149static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
 150{
 151        uint64_t value;
 152        uint32_t tmp;
 153
 154        /* Disable AGP. */
 155        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
 156        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
 157        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
 158
 159        /* Program the system aperture low logical page number. */
 160        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 161                     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 162        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 163                     max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 164
 165        /* Set default page address. */
 166        value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
 167        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
 168                     (u32)(value >> 12));
 169        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
 170                     (u32)(value >> 44));
 171
 172        /* Program "protection fault". */
 173        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
 174                     (u32)(adev->dummy_page_addr >> 12));
 175        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
 176                     (u32)((u64)adev->dummy_page_addr >> 44));
 177
 178        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2);
 179        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
 180                            ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
 181        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
 182}
 183
 184static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
 185{
 186        uint32_t tmp;
 187
 188        /* Setup TLB control */
 189        tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
 190
 191        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
 192        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
 193        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 194                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
 195        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 196                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
 197        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
 198        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 199                            MTYPE, MTYPE_UC); /* UC, uncached */
 200
 201        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
 202}
 203
 204static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev)
 205{
 206        uint32_t tmp;
 207
 208        /* Setup L2 cache */
 209        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
 210        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
 211        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
 212        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
 213                            ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 214        /* XXX for emulation, Refer to closed source code.*/
 215        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
 216                            0);
 217        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
 218        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
 219        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
 220        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
 221
 222        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2);
 223        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 224        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
 225        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
 226
 227        tmp = mmMMVM_L2_CNTL3_DEFAULT;
 228        if (adev->gmc.translate_further) {
 229                tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
 230                tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
 231                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
 232        } else {
 233                tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
 234                tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
 235                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
 236        }
 237        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
 238
 239        tmp = mmMMVM_L2_CNTL4_DEFAULT;
 240        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
 241        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
 242        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp);
 243
 244        tmp = mmMMVM_L2_CNTL5_DEFAULT;
 245        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
 246        WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp);
 247}
 248
 249static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev)
 250{
 251        uint32_t tmp;
 252
 253        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
 254        tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
 255        tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
 256        tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
 257                            RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
 258        WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
 259}
 260
 261static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev)
 262{
 263        WREG32_SOC15(MMHUB, 0,
 264                     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
 265                     0xFFFFFFFF);
 266        WREG32_SOC15(MMHUB, 0,
 267                     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
 268                     0x0000000F);
 269
 270        WREG32_SOC15(MMHUB, 0,
 271                     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
 272        WREG32_SOC15(MMHUB, 0,
 273                     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
 274
 275        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
 276                     0);
 277        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
 278                     0);
 279}
 280
 281static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
 282{
 283        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 284        int i;
 285        uint32_t tmp;
 286
 287        for (i = 0; i <= 14; i++) {
 288                tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i);
 289                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 290                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
 291                                    adev->vm_manager.num_level);
 292                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 293                                    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 294                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 295                                    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
 296                                    1);
 297                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 298                                    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 299                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 300                                    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 301                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 302                                    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 303                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 304                                    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 305                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 306                                    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 307                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 308                                    PAGE_TABLE_BLOCK_SIZE,
 309                                    adev->vm_manager.block_size - 9);
 310                /* Send no-retry XNACK on fault to suppress VM fault storm. */
 311                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
 312                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
 313                                    !adev->gmc.noretry);
 314                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
 315                                    i * hub->ctx_distance, tmp);
 316                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
 317                                    i * hub->ctx_addr_distance, 0);
 318                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
 319                                    i * hub->ctx_addr_distance, 0);
 320                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
 321                                    i * hub->ctx_addr_distance,
 322                                    lower_32_bits(adev->vm_manager.max_pfn - 1));
 323                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
 324                                    i * hub->ctx_addr_distance,
 325                                    upper_32_bits(adev->vm_manager.max_pfn - 1));
 326        }
 327}
 328
 329static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev)
 330{
 331        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 332        unsigned i;
 333
 334        for (i = 0; i < 18; ++i) {
 335                WREG32_SOC15_OFFSET(MMHUB, 0,
 336                                    mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
 337                                    i * hub->eng_addr_distance, 0xffffffff);
 338                WREG32_SOC15_OFFSET(MMHUB, 0,
 339                                    mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
 340                                    i * hub->eng_addr_distance, 0x1f);
 341        }
 342}
 343
 344static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev)
 345{
 346        if (amdgpu_sriov_vf(adev)) {
 347                /*
 348                 * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
 349                 * VF copy registers so vbios post doesn't program them, for
 350                 * SRIOV driver need to program them
 351                 */
 352                WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
 353                             adev->gmc.vram_start >> 24);
 354                WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
 355                             adev->gmc.vram_end >> 24);
 356        }
 357
 358        /* GART Enable. */
 359        mmhub_v2_3_init_gart_aperture_regs(adev);
 360        mmhub_v2_3_init_system_aperture_regs(adev);
 361        mmhub_v2_3_init_tlb_regs(adev);
 362        mmhub_v2_3_init_cache_regs(adev);
 363
 364        mmhub_v2_3_enable_system_domain(adev);
 365        mmhub_v2_3_disable_identity_aperture(adev);
 366        mmhub_v2_3_setup_vmid_config(adev);
 367        mmhub_v2_3_program_invalidation(adev);
 368
 369        return 0;
 370}
 371
 372static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev)
 373{
 374        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 375        u32 tmp;
 376        u32 i;
 377
 378        /* Disable all tables */
 379        for (i = 0; i < AMDGPU_NUM_VMID; i++)
 380                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
 381                                    i * hub->ctx_distance, 0);
 382
 383        /* Setup TLB control */
 384        tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
 385        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
 386        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
 387                            ENABLE_ADVANCED_DRIVER_MODEL, 0);
 388        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
 389
 390        /* Setup L2 cache */
 391        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
 392        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
 393        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
 394        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0);
 395}
 396
 397/**
 398 * mmhub_v2_3_set_fault_enable_default - update GART/VM fault handling
 399 *
 400 * @adev: amdgpu_device pointer
 401 * @value: true redirects VM faults to the default page
 402 */
 403static void mmhub_v2_3_set_fault_enable_default(struct amdgpu_device *adev,
 404                                                bool value)
 405{
 406        u32 tmp;
 407        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
 408        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 409                            RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 410        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 411                            PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 412        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 413                            PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 414        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 415                            PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 416        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 417                            TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
 418                            value);
 419        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 420                            NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 421        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 422                            DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 423        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 424                            VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 425        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 426                            READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 427        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 428                            WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 429        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 430                            EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
 431        if (!value) {
 432                tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 433                                CRASH_ON_NO_RETRY_FAULT, 1);
 434                tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
 435                                CRASH_ON_RETRY_FAULT, 1);
 436        }
 437        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
 438}
 439
 440static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = {
 441        .print_l2_protection_fault_status = mmhub_v2_3_print_l2_protection_fault_status,
 442        .get_invalidate_req = mmhub_v2_3_get_invalidate_req,
 443};
 444
 445static void mmhub_v2_3_init(struct amdgpu_device *adev)
 446{
 447        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 448
 449        hub->ctx0_ptb_addr_lo32 =
 450                SOC15_REG_OFFSET(MMHUB, 0,
 451                                 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
 452        hub->ctx0_ptb_addr_hi32 =
 453                SOC15_REG_OFFSET(MMHUB, 0,
 454                                 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
 455        hub->vm_inv_eng0_sem =
 456                SOC15_REG_OFFSET(MMHUB, 0,
 457                                 mmMMVM_INVALIDATE_ENG0_SEM);
 458        hub->vm_inv_eng0_req =
 459                SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
 460        hub->vm_inv_eng0_ack =
 461                SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK);
 462        hub->vm_context0_cntl =
 463                SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
 464        hub->vm_l2_pro_fault_status =
 465                SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS);
 466        hub->vm_l2_pro_fault_cntl =
 467                SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
 468
 469        hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL;
 470        hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
 471                mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
 472        hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ -
 473                mmMMVM_INVALIDATE_ENG0_REQ;
 474        hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
 475                mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
 476
 477        hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 478                MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 479                MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 480                MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 481                MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 482                MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 483                MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 484
 485        hub->vmhub_funcs = &mmhub_v2_3_vmhub_funcs;
 486}
 487
 488static void
 489mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 490                                            bool enable)
 491{
 492        uint32_t def, data, def1, data1;
 493
 494        def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
 495        def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 496
 497        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
 498                data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
 499                data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 500                           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 501                           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 502                           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 503                           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 504                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 505
 506        } else {
 507                data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
 508                data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 509                          DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 510                          DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 511                          DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 512                          DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 513                          DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 514        }
 515
 516        if (def != data)
 517                WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
 518        if (def1 != data1)
 519                WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
 520}
 521
 522static void
 523mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 524                                           bool enable)
 525{
 526        uint32_t def, data, def1, data1, def2, data2;
 527
 528        def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
 529        def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
 530        def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
 531
 532        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
 533                data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
 534                data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 535                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 536                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 537                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
 538                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
 539                data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 540                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 541                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 542                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
 543                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
 544        } else {
 545                data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
 546                data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 547                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 548                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 549                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
 550                        DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
 551                data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 552                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 553                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 554                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
 555                        DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
 556        }
 557
 558        if (def != data)
 559                WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
 560        if (def1 != data1)
 561                WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
 562        if (def2 != data2)
 563                WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
 564}
 565
 566static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
 567                                      enum amd_clockgating_state state)
 568{
 569        if (amdgpu_sriov_vf(adev))
 570                return 0;
 571
 572        mmhub_v2_3_update_medium_grain_clock_gating(adev,
 573                                state == AMD_CG_STATE_GATE);
 574        mmhub_v2_3_update_medium_grain_light_sleep(adev,
 575                                state == AMD_CG_STATE_GATE);
 576
 577        return 0;
 578}
 579
 580static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 581{
 582        int data, data1, data2, data3;
 583
 584        if (amdgpu_sriov_vf(adev))
 585                *flags = 0;
 586
 587        data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 588        data1  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
 589        data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
 590        data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
 591
 592        /* AMD_CG_SUPPORT_MC_MGCG */
 593        if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
 594                       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
 595                       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
 596                       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
 597                       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
 598                       DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
 599                && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
 600                        *flags |= AMD_CG_SUPPORT_MC_MGCG;
 601        }
 602
 603        /* AMD_CG_SUPPORT_MC_LS */
 604        if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
 605                && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 606                                DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 607                                DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 608                                DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
 609                                DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
 610                && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
 611                                DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
 612                                DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
 613                                DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
 614                                DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
 615                *flags |= AMD_CG_SUPPORT_MC_LS;
 616}
 617
 618const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = {
 619        .init = mmhub_v2_3_init,
 620        .gart_enable = mmhub_v2_3_gart_enable,
 621        .set_fault_enable_default = mmhub_v2_3_set_fault_enable_default,
 622        .gart_disable = mmhub_v2_3_gart_disable,
 623        .set_clockgating = mmhub_v2_3_set_clockgating,
 624        .get_clockgating = mmhub_v2_3_get_clockgating,
 625        .setup_vm_pt_regs = mmhub_v2_3_setup_vm_pt_regs,
 626};
 627