linux/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
<<
>>
Prefs
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu.h"
  24#include "amdgpu_atombios.h"
  25#include "nbio_v7_4.h"
  26
  27#include "nbio/nbio_7_4_offset.h"
  28#include "nbio/nbio_7_4_sh_mask.h"
  29
  30#define smnNBIF_MGCG_CTRL_LCLK  0x1013a21c
  31
  32#define smnCPM_CONTROL                                                                                  0x11180460
  33#define smnPCIE_CNTL2                                                                                   0x11180070
  34#define smnPCIE_CI_CNTL                                                                                 0x11180080
  35
  36static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
  37{
  38    u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
  39
  40        tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
  41        tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
  42
  43        return tmp;
  44}
  45
  46static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
  47{
  48        if (enable)
  49                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
  50                        BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
  51        else
  52                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
  53}
  54
  55static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
  56                                struct amdgpu_ring *ring)
  57{
  58        if (!ring || !ring->funcs->emit_wreg)
  59                WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  60        else
  61                amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
  62                        NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
  63}
  64
  65static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
  66{
  67        return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
  68}
  69
  70static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
  71                                          bool use_doorbell, int doorbell_index)
  72{
  73        u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
  74                        SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
  75
  76        u32 doorbell_range = RREG32(reg);
  77
  78        if (use_doorbell) {
  79                doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
  80                doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
  81        } else
  82                doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
  83
  84        WREG32(reg, doorbell_range);
  85}
  86
  87static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
  88                                               bool enable)
  89{
  90        WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
  91}
  92
  93static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
  94                                                        bool enable)
  95{
  96        u32 tmp = 0;
  97
  98        if (enable) {
  99                tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
 100                      REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
 101                      REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
 102
 103                WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
 104                             lower_32_bits(adev->doorbell.base));
 105                WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
 106                             upper_32_bits(adev->doorbell.base));
 107        }
 108
 109        WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
 110}
 111
 112static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
 113                                        bool use_doorbell, int doorbell_index)
 114{
 115        u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
 116
 117        if (use_doorbell) {
 118                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
 119                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
 120        } else
 121                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
 122
 123        WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
 124}
 125
 126
 127static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 128                                                       bool enable)
 129{
 130        //TODO: Add support for v7.4
 131}
 132
 133static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 134                                                      bool enable)
 135{
 136        uint32_t def, data;
 137
 138        def = data = RREG32_PCIE(smnPCIE_CNTL2);
 139        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 140                data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
 141                         PCIE_CNTL2__MST_MEM_LS_EN_MASK |
 142                         PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
 143        } else {
 144                data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
 145                          PCIE_CNTL2__MST_MEM_LS_EN_MASK |
 146                          PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
 147        }
 148
 149        if (def != data)
 150                WREG32_PCIE(smnPCIE_CNTL2, data);
 151}
 152
 153static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
 154                                            u32 *flags)
 155{
 156        int data;
 157
 158        /* AMD_CG_SUPPORT_BIF_MGCG */
 159        data = RREG32_PCIE(smnCPM_CONTROL);
 160        if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
 161                *flags |= AMD_CG_SUPPORT_BIF_MGCG;
 162
 163        /* AMD_CG_SUPPORT_BIF_LS */
 164        data = RREG32_PCIE(smnPCIE_CNTL2);
 165        if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
 166                *flags |= AMD_CG_SUPPORT_BIF_LS;
 167}
 168
 169static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
 170{
 171        u32 interrupt_cntl;
 172
 173        /* setup interrupt control */
 174        WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
 175        interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
 176        /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
 177         * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
 178         */
 179        interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
 180        /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
 181        interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
 182        WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
 183}
 184
 185static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
 186{
 187        return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
 188}
 189
 190static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
 191{
 192        return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
 193}
 194
 195static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
 196{
 197        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
 198}
 199
 200static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
 201{
 202        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
 203}
 204
 205static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
 206        .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
 207        .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
 208        .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
 209        .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
 210        .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
 211        .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
 212        .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
 213        .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
 214        .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
 215        .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
 216        .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
 217        .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 218};
 219
 220static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
 221{
 222        uint32_t reg;
 223
 224        reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
 225        if (reg & 1)
 226                adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
 227
 228        if (reg & 0x80000000)
 229                adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
 230
 231        if (!reg) {
 232                if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
 233                        adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
 234        }
 235}
 236
 237static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 238{
 239        uint32_t def, data;
 240
 241        def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
 242        data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
 243
 244        if (def != data)
 245                WREG32_PCIE(smnPCIE_CI_CNTL, data);
 246}
 247
 248const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
 249        .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
 250        .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
 251        .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
 252        .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
 253        .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
 254        .get_rev_id = nbio_v7_4_get_rev_id,
 255        .mc_access_enable = nbio_v7_4_mc_access_enable,
 256        .hdp_flush = nbio_v7_4_hdp_flush,
 257        .get_memsize = nbio_v7_4_get_memsize,
 258        .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
 259        .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
 260        .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
 261        .ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
 262        .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
 263        .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
 264        .get_clockgating_state = nbio_v7_4_get_clockgating_state,
 265        .ih_control = nbio_v7_4_ih_control,
 266        .init_registers = nbio_v7_4_init_registers,
 267        .detect_hw_virt = nbio_v7_4_detect_hw_virt,
 268};
 269