linux/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu.h"
  24#include "amdgpu_atombios.h"
  25#include "nbio_v6_1.h"
  26
  27#include "nbio/nbio_6_1_default.h"
  28#include "nbio/nbio_6_1_offset.h"
  29#include "nbio/nbio_6_1_sh_mask.h"
  30#include "vega10_enum.h"
  31
  32#define smnCPM_CONTROL                                                                                  0x11180460
  33#define smnPCIE_CNTL2                                                                                   0x11180070
  34#define smnPCIE_CONFIG_CNTL                                                                             0x11180044
  35#define smnPCIE_CI_CNTL                                                                                 0x11180080
  36
  37static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
  38{
  39        u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
  40
  41        tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
  42        tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
  43
  44        return tmp;
  45}
  46
  47static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
  48{
  49        if (enable)
  50                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
  51                             BIF_FB_EN__FB_READ_EN_MASK |
  52                             BIF_FB_EN__FB_WRITE_EN_MASK);
  53        else
  54                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
  55}
  56
  57static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
  58                                struct amdgpu_ring *ring)
  59{
  60        if (!ring || !ring->funcs->emit_wreg)
  61                WREG32_SOC15_NO_KIQ(NBIO, 0,
  62                                    mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
  63                                    0);
  64        else
  65                amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
  66                        NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
  67}
  68
  69static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
  70{
  71        return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
  72}
  73
  74static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
  75                                  bool use_doorbell, int doorbell_index)
  76{
  77        u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
  78                        SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
  79
  80        u32 doorbell_range = RREG32(reg);
  81
  82        if (use_doorbell) {
  83                doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
  84                doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
  85        } else
  86                doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
  87
  88        WREG32(reg, doorbell_range);
  89
  90}
  91
  92static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
  93                                               bool enable)
  94{
  95        WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
  96}
  97
  98static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
  99                                                        bool enable)
 100{
 101        u32 tmp = 0;
 102
 103        if (enable) {
 104                tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
 105                      REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
 106                      REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
 107
 108                WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
 109                             lower_32_bits(adev->doorbell.base));
 110                WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
 111                             upper_32_bits(adev->doorbell.base));
 112        }
 113
 114        WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
 115}
 116
 117
 118static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
 119                                        bool use_doorbell, int doorbell_index)
 120{
 121        u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
 122
 123        if (use_doorbell) {
 124                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
 125                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
 126        } else
 127                ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
 128
 129        WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
 130}
 131
 132static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
 133{
 134        u32 interrupt_cntl;
 135
 136        /* setup interrupt control */
 137        WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
 138        interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
 139        /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
 140         * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
 141         */
 142        interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
 143        /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
 144        interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
 145        WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
 146}
 147
 148static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 149                                                       bool enable)
 150{
 151        uint32_t def, data;
 152
 153        def = data = RREG32_PCIE(smnCPM_CONTROL);
 154        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
 155                data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
 156                         CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
 157                         CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
 158                         CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
 159                         CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
 160                         CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
 161                         CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
 162        } else {
 163                data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
 164                          CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
 165                          CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
 166                          CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
 167                          CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
 168                          CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
 169                          CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
 170        }
 171
 172        if (def != data)
 173                WREG32_PCIE(smnCPM_CONTROL, data);
 174}
 175
 176static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
 177                                                      bool enable)
 178{
 179        uint32_t def, data;
 180
 181        def = data = RREG32_PCIE(smnPCIE_CNTL2);
 182        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 183                data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
 184                         PCIE_CNTL2__MST_MEM_LS_EN_MASK |
 185                         PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
 186        } else {
 187                data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
 188                          PCIE_CNTL2__MST_MEM_LS_EN_MASK |
 189                          PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
 190        }
 191
 192        if (def != data)
 193                WREG32_PCIE(smnPCIE_CNTL2, data);
 194}
 195
 196static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
 197                                            u32 *flags)
 198{
 199        int data;
 200
 201        /* AMD_CG_SUPPORT_BIF_MGCG */
 202        data = RREG32_PCIE(smnCPM_CONTROL);
 203        if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
 204                *flags |= AMD_CG_SUPPORT_BIF_MGCG;
 205
 206        /* AMD_CG_SUPPORT_BIF_LS */
 207        data = RREG32_PCIE(smnPCIE_CNTL2);
 208        if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
 209                *flags |= AMD_CG_SUPPORT_BIF_LS;
 210}
 211
 212static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
 213{
 214        return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
 215}
 216
 217static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
 218{
 219        return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
 220}
 221
 222static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev)
 223{
 224        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
 225}
 226
 227static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
 228{
 229        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
 230}
 231
 232static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
 233        .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
 234        .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
 235        .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
 236        .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
 237        .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
 238        .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
 239        .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
 240        .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
 241        .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
 242        .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
 243        .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
 244        .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
 245};
 246
 247static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
 248{
 249        uint32_t reg;
 250
 251        reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
 252        if (reg & 1)
 253                adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
 254
 255        if (reg & 0x80000000)
 256                adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
 257
 258        if (!reg) {
 259                if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
 260                        adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
 261        }
 262}
 263
 264static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 265{
 266        uint32_t def, data;
 267
 268        def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
 269        data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
 270        data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
 271
 272        if (def != data)
 273                WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
 274
 275        def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
 276        data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
 277
 278        if (def != data)
 279                WREG32_PCIE(smnPCIE_CI_CNTL, data);
 280}
 281
 282const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
 283        .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
 284        .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
 285        .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
 286        .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
 287        .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
 288        .get_rev_id = nbio_v6_1_get_rev_id,
 289        .mc_access_enable = nbio_v6_1_mc_access_enable,
 290        .hdp_flush = nbio_v6_1_hdp_flush,
 291        .get_memsize = nbio_v6_1_get_memsize,
 292        .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
 293        .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
 294        .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
 295        .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
 296        .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
 297        .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
 298        .get_clockgating_state = nbio_v6_1_get_clockgating_state,
 299        .ih_control = nbio_v6_1_ih_control,
 300        .init_registers = nbio_v6_1_init_registers,
 301        .detect_hw_virt = nbio_v6_1_detect_hw_virt,
 302};
 303