linux/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2020 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu.h"
  24#include "amdgpu_atombios.h"
  25#include "hdp_v5_0.h"
  26
  27#include "hdp/hdp_5_0_0_offset.h"
  28#include "hdp/hdp_5_0_0_sh_mask.h"
  29#include <uapi/linux/kfd_ioctl.h>
  30
  31static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
  32                                struct amdgpu_ring *ring)
  33{
  34        if (!ring || !ring->funcs->emit_wreg)
  35                WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
  36        else
  37                amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
  38}
  39
  40static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
  41                                    struct amdgpu_ring *ring)
  42{
  43        if (!ring || !ring->funcs->emit_wreg) {
  44                WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
  45        } else {
  46                amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
  47                                        HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
  48        }
  49}
  50
  51static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
  52                                          bool enable)
  53{
  54        uint32_t hdp_clk_cntl, hdp_clk_cntl1;
  55        uint32_t hdp_mem_pwr_cntl;
  56
  57        if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
  58                                AMD_CG_SUPPORT_HDP_DS |
  59                                AMD_CG_SUPPORT_HDP_SD)))
  60                return;
  61
  62        hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
  63        hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
  64
  65        /* Before doing clock/power mode switch,
  66         * forced on IPH & RC clock */
  67        hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
  68                                     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
  69        hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
  70                                     RC_MEM_CLK_SOFT_OVERRIDE, 1);
  71        WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
  72
  73        /* HDP 5.0 doesn't support dynamic power mode switch,
  74         * disable clock and power gating before any changing */
  75        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  76                                         IPH_MEM_POWER_CTRL_EN, 0);
  77        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  78                                         IPH_MEM_POWER_LS_EN, 0);
  79        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  80                                         IPH_MEM_POWER_DS_EN, 0);
  81        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  82                                         IPH_MEM_POWER_SD_EN, 0);
  83        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  84                                         RC_MEM_POWER_CTRL_EN, 0);
  85        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  86                                         RC_MEM_POWER_LS_EN, 0);
  87        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  88                                         RC_MEM_POWER_DS_EN, 0);
  89        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
  90                                         RC_MEM_POWER_SD_EN, 0);
  91        WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
  92
  93        /* only one clock gating mode (LS/DS/SD) can be enabled */
  94        if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
  95                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
  96                                                 HDP_MEM_POWER_CTRL,
  97                                                 IPH_MEM_POWER_LS_EN, enable);
  98                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
  99                                                 HDP_MEM_POWER_CTRL,
 100                                                 RC_MEM_POWER_LS_EN, enable);
 101        } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
 102                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 103                                                 HDP_MEM_POWER_CTRL,
 104                                                 IPH_MEM_POWER_DS_EN, enable);
 105                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 106                                                 HDP_MEM_POWER_CTRL,
 107                                                 RC_MEM_POWER_DS_EN, enable);
 108        } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
 109                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 110                                                 HDP_MEM_POWER_CTRL,
 111                                                 IPH_MEM_POWER_SD_EN, enable);
 112                /* RC should not use shut down mode, fallback to ds */
 113                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 114                                                 HDP_MEM_POWER_CTRL,
 115                                                 RC_MEM_POWER_DS_EN, enable);
 116        }
 117
 118        /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
 119         * be set for SRAM LS/DS/SD */
 120        if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
 121                              AMD_CG_SUPPORT_HDP_SD)) {
 122                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 123                                                 IPH_MEM_POWER_CTRL_EN, 1);
 124                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 125                                                 RC_MEM_POWER_CTRL_EN, 1);
 126        }
 127
 128        WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
 129
 130        /* restore IPH & RC clock override after clock/power mode changing */
 131        WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
 132}
 133
 134static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 135                                                      bool enable)
 136{
 137        uint32_t hdp_clk_cntl;
 138
 139        if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 140                return;
 141
 142        hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 143
 144        if (enable) {
 145                hdp_clk_cntl &=
 146                        ~(uint32_t)
 147                        (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 148                         HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 149                         HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 150                         HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 151                         HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 152                         HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
 153        } else {
 154                hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 155                        HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 156                        HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 157                        HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 158                        HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 159                        HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
 160        }
 161
 162        WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
 163}
 164
 165static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
 166                                              bool enable)
 167{
 168        hdp_v5_0_update_mem_power_gating(adev, enable);
 169        hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
 170}
 171
 172static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
 173                                            u32 *flags)
 174{
 175        uint32_t tmp;
 176
 177        /* AMD_CG_SUPPORT_HDP_MGCG */
 178        tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 179        if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 180                     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 181                     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 182                     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 183                     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 184                     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
 185                *flags |= AMD_CG_SUPPORT_HDP_MGCG;
 186
 187        /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
 188        tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
 189        if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
 190                *flags |= AMD_CG_SUPPORT_HDP_LS;
 191        else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
 192                *flags |= AMD_CG_SUPPORT_HDP_DS;
 193        else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
 194                *flags |= AMD_CG_SUPPORT_HDP_SD;
 195}
 196
 197static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
 198{
 199        u32 tmp;
 200
 201        tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
 202        tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
 203        WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
 204}
 205
 206const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
 207        .flush_hdp = hdp_v5_0_flush_hdp,
 208        .invalidate_hdp = hdp_v5_0_invalidate_hdp,
 209        .update_clock_gating = hdp_v5_0_update_clock_gating,
 210        .get_clock_gating_state = hdp_v5_0_get_clockgating_state,
 211        .init_registers = hdp_v5_0_init_registers,
 212};
 213