1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "sdma0/sdma0_4_2_offset.h"
34#include "sdma0/sdma0_4_2_sh_mask.h"
35#include "sdma1/sdma1_4_2_offset.h"
36#include "sdma1/sdma1_4_2_sh_mask.h"
37#include "hdp/hdp_4_0_offset.h"
38#include "sdma0/sdma0_4_1_default.h"
39
40#include "soc15_common.h"
41#include "soc15.h"
42#include "vega10_sdma_pkt_open.h"
43
44#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
45#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
46
47#include "amdgpu_ras.h"
48
49MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
50MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
51MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
52MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
53MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
54MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
55MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
56MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
57MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
58
59#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
60#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
61
62#define WREG32_SDMA(instance, offset, value) \
63 WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
64#define RREG32_SDMA(instance, offset) \
65 RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
66
67static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
68static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
69static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
70static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
71
72static const struct soc15_reg_golden golden_settings_sdma_4[] = {
73 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
74 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
75 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
76 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
77 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
81 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
84 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
85 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
86 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
87 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
88 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
89 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
90 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
91 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
92 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
93 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
94 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
95 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
96 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
97 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
98};
99
100static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
101 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
102 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
103 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
104 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
105 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
106};
107
108static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
109 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
110 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
111 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
112 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
113 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
114};
115
116static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
117 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
118 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
119 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
120 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
121 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
122 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
123 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
124 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
125 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
126 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
127 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
128};
129
130static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
131 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
132};
133
134static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
135{
136 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
137 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
138 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
139 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
140 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
141 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
142 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
143 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
144 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RD_BURST_CNTL, 0x0000000f, 0x00000003),
145 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
146 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
147 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
148 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
149 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
150 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
151 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
152 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
153 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
154 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
155 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
156 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
157 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
158 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
159 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
160 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
161 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
162};
163
164static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
165 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
166 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
167 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
168 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
169 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
170 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
171 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
172 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
173 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RD_BURST_CNTL, 0x0000000f, 0x00000003),
174 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
175 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
176 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
177 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
178 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
179 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
180 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
181 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
182 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
183 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
184 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
185 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
186 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
187 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
188 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
189 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
190 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
191};
192
193static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
194{
195 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
196 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
197};
198
199static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
200{
201 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00003001),
202 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001)
203};
204
205static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
206 u32 instance, u32 offset)
207{
208 return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) :
209 (adev->reg_offset[SDMA1_HWIP][0][0] + offset));
210}
211
212static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
213{
214 switch (adev->asic_type) {
215 case CHIP_VEGA10:
216 if (!amdgpu_virt_support_skip_setting(adev)) {
217 soc15_program_register_sequence(adev,
218 golden_settings_sdma_4,
219 ARRAY_SIZE(golden_settings_sdma_4));
220 soc15_program_register_sequence(adev,
221 golden_settings_sdma_vg10,
222 ARRAY_SIZE(golden_settings_sdma_vg10));
223 }
224 break;
225 case CHIP_VEGA12:
226 soc15_program_register_sequence(adev,
227 golden_settings_sdma_4,
228 ARRAY_SIZE(golden_settings_sdma_4));
229 soc15_program_register_sequence(adev,
230 golden_settings_sdma_vg12,
231 ARRAY_SIZE(golden_settings_sdma_vg12));
232 break;
233 case CHIP_VEGA20:
234 soc15_program_register_sequence(adev,
235 golden_settings_sdma0_4_2_init,
236 ARRAY_SIZE(golden_settings_sdma0_4_2_init));
237 soc15_program_register_sequence(adev,
238 golden_settings_sdma0_4_2,
239 ARRAY_SIZE(golden_settings_sdma0_4_2));
240 soc15_program_register_sequence(adev,
241 golden_settings_sdma1_4_2,
242 ARRAY_SIZE(golden_settings_sdma1_4_2));
243 break;
244 case CHIP_RAVEN:
245 soc15_program_register_sequence(adev,
246 golden_settings_sdma_4_1,
247 ARRAY_SIZE(golden_settings_sdma_4_1));
248 if (adev->rev_id >= 8)
249 soc15_program_register_sequence(adev,
250 golden_settings_sdma_rv2,
251 ARRAY_SIZE(golden_settings_sdma_rv2));
252 else
253 soc15_program_register_sequence(adev,
254 golden_settings_sdma_rv1,
255 ARRAY_SIZE(golden_settings_sdma_rv1));
256 break;
257 default:
258 break;
259 }
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
275{
276 const char *chip_name;
277 char fw_name[30];
278 int err = 0, i;
279 struct amdgpu_firmware_info *info = NULL;
280 const struct common_firmware_header *header = NULL;
281 const struct sdma_firmware_header_v1_0 *hdr;
282
283 DRM_DEBUG("\n");
284
285 switch (adev->asic_type) {
286 case CHIP_VEGA10:
287 chip_name = "vega10";
288 break;
289 case CHIP_VEGA12:
290 chip_name = "vega12";
291 break;
292 case CHIP_VEGA20:
293 chip_name = "vega20";
294 break;
295 case CHIP_RAVEN:
296 if (adev->rev_id >= 8)
297 chip_name = "raven2";
298 else if (adev->pdev->device == 0x15d8)
299 chip_name = "picasso";
300 else
301 chip_name = "raven";
302 break;
303 default:
304 BUG();
305 }
306
307 for (i = 0; i < adev->sdma.num_instances; i++) {
308 if (i == 0)
309 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
310 else
311 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
312 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
313 if (err)
314 goto out;
315 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
316 if (err)
317 goto out;
318 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
319 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
320 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
321 if (adev->sdma.instance[i].feature_version >= 20)
322 adev->sdma.instance[i].burst_nop = true;
323 DRM_DEBUG("psp_load == '%s'\n",
324 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
325
326 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
327 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
328 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
329 info->fw = adev->sdma.instance[i].fw;
330 header = (const struct common_firmware_header *)info->fw->data;
331 adev->firmware.fw_size +=
332 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
333 }
334 }
335out:
336 if (err) {
337 DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
338 for (i = 0; i < adev->sdma.num_instances; i++) {
339 release_firmware(adev->sdma.instance[i].fw);
340 adev->sdma.instance[i].fw = NULL;
341 }
342 }
343 return err;
344}
345
346
347
348
349
350
351
352
353static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
354{
355 u64 *rptr;
356
357
358 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
359
360 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
361 return ((*rptr) >> 2);
362}
363
364
365
366
367
368
369
370
371static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
372{
373 struct amdgpu_device *adev = ring->adev;
374 u64 wptr;
375
376 if (ring->use_doorbell) {
377
378 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
379 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
380 } else {
381 wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
382 wptr = wptr << 32;
383 wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
384 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
385 ring->me, wptr);
386 }
387
388 return wptr >> 2;
389}
390
391
392
393
394
395
396
397
398static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
399{
400 struct amdgpu_device *adev = ring->adev;
401
402 DRM_DEBUG("Setting write pointer\n");
403 if (ring->use_doorbell) {
404 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
405
406 DRM_DEBUG("Using doorbell -- "
407 "wptr_offs == 0x%08x "
408 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
409 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
410 ring->wptr_offs,
411 lower_32_bits(ring->wptr << 2),
412 upper_32_bits(ring->wptr << 2));
413
414 WRITE_ONCE(*wb, (ring->wptr << 2));
415 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
416 ring->doorbell_index, ring->wptr << 2);
417 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
418 } else {
419 DRM_DEBUG("Not using doorbell -- "
420 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
421 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
422 ring->me,
423 lower_32_bits(ring->wptr << 2),
424 ring->me,
425 upper_32_bits(ring->wptr << 2));
426 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
427 lower_32_bits(ring->wptr << 2));
428 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
429 upper_32_bits(ring->wptr << 2));
430 }
431}
432
433
434
435
436
437
438
439
440static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
441{
442 struct amdgpu_device *adev = ring->adev;
443 u64 wptr;
444
445 if (ring->use_doorbell) {
446
447 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
448 } else {
449 wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
450 wptr = wptr << 32;
451 wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
452 }
453
454 return wptr >> 2;
455}
456
457
458
459
460
461
462
463
464static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
465{
466 struct amdgpu_device *adev = ring->adev;
467
468 if (ring->use_doorbell) {
469 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
470
471
472 WRITE_ONCE(*wb, (ring->wptr << 2));
473 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
474 } else {
475 uint64_t wptr = ring->wptr << 2;
476
477 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
478 lower_32_bits(wptr));
479 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
480 upper_32_bits(wptr));
481 }
482}
483
484static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
485{
486 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
487 int i;
488
489 for (i = 0; i < count; i++)
490 if (sdma && sdma->burst_nop && (i == 0))
491 amdgpu_ring_write(ring, ring->funcs->nop |
492 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
493 else
494 amdgpu_ring_write(ring, ring->funcs->nop);
495}
496
497
498
499
500
501
502
503
504
505static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
506 struct amdgpu_job *job,
507 struct amdgpu_ib *ib,
508 uint32_t flags)
509{
510 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
511
512
513 sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
514
515 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
516 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
517
518 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
519 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
520 amdgpu_ring_write(ring, ib->length_dw);
521 amdgpu_ring_write(ring, 0);
522 amdgpu_ring_write(ring, 0);
523
524}
525
526static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
527 int mem_space, int hdp,
528 uint32_t addr0, uint32_t addr1,
529 uint32_t ref, uint32_t mask,
530 uint32_t inv)
531{
532 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
533 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
534 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
535 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
536 if (mem_space) {
537
538 amdgpu_ring_write(ring, addr0);
539 amdgpu_ring_write(ring, addr1);
540 } else {
541
542 amdgpu_ring_write(ring, addr0 << 2);
543 amdgpu_ring_write(ring, addr1 << 2);
544 }
545 amdgpu_ring_write(ring, ref);
546 amdgpu_ring_write(ring, mask);
547 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
548 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv));
549}
550
551
552
553
554
555
556
557
558static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
559{
560 struct amdgpu_device *adev = ring->adev;
561 u32 ref_and_mask = 0;
562 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
563
564 if (ring->me == 0)
565 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
566 else
567 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
568
569 sdma_v4_0_wait_reg_mem(ring, 0, 1,
570 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
571 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
572 ref_and_mask, ref_and_mask, 10);
573}
574
575
576
577
578
579
580
581
582
583
584
585static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
586 unsigned flags)
587{
588 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
589
590 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
591
592 BUG_ON(addr & 0x3);
593 amdgpu_ring_write(ring, lower_32_bits(addr));
594 amdgpu_ring_write(ring, upper_32_bits(addr));
595 amdgpu_ring_write(ring, lower_32_bits(seq));
596
597
598 if (write64bit) {
599 addr += 4;
600 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
601
602 BUG_ON(addr & 0x3);
603 amdgpu_ring_write(ring, lower_32_bits(addr));
604 amdgpu_ring_write(ring, upper_32_bits(addr));
605 amdgpu_ring_write(ring, upper_32_bits(seq));
606 }
607
608
609 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
610 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
611}
612
613
614
615
616
617
618
619
620
621static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
622{
623 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
624 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
625 u32 rb_cntl, ib_cntl;
626 int i;
627
628 if ((adev->mman.buffer_funcs_ring == sdma0) ||
629 (adev->mman.buffer_funcs_ring == sdma1))
630 amdgpu_ttm_set_buffer_funcs_status(adev, false);
631
632 for (i = 0; i < adev->sdma.num_instances; i++) {
633 rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
634 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
635 WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
636 ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
637 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
638 WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
639 }
640
641 sdma0->sched.ready = false;
642 sdma1->sched.ready = false;
643}
644
645
646
647
648
649
650
651
652static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
653{
654
655}
656
657
658
659
660
661
662
663
664static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
665{
666 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
667 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
668 u32 rb_cntl, ib_cntl;
669 int i;
670
671 if ((adev->mman.buffer_funcs_ring == sdma0) ||
672 (adev->mman.buffer_funcs_ring == sdma1))
673 amdgpu_ttm_set_buffer_funcs_status(adev, false);
674
675 for (i = 0; i < adev->sdma.num_instances; i++) {
676 rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
677 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
678 RB_ENABLE, 0);
679 WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
680 ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
681 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
682 IB_ENABLE, 0);
683 WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
684 }
685
686 sdma0->sched.ready = false;
687 sdma1->sched.ready = false;
688}
689
690
691
692
693
694
695
696
697
698static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
699{
700 u32 f32_cntl, phase_quantum = 0;
701 int i;
702
703 if (amdgpu_sdma_phase_quantum) {
704 unsigned value = amdgpu_sdma_phase_quantum;
705 unsigned unit = 0;
706
707 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
708 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
709 value = (value + 1) >> 1;
710 unit++;
711 }
712 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
713 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
714 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
715 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
716 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
717 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
718 WARN_ONCE(1,
719 "clamping sdma_phase_quantum to %uK clock cycles\n",
720 value << unit);
721 }
722 phase_quantum =
723 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
724 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
725 }
726
727 for (i = 0; i < adev->sdma.num_instances; i++) {
728 f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
729 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
730 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
731 if (enable && amdgpu_sdma_phase_quantum) {
732 WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
733 WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
734 WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
735 }
736 WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
737 }
738
739}
740
741
742
743
744
745
746
747
748
749static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
750{
751 u32 f32_cntl;
752 int i;
753
754 if (enable == false) {
755 sdma_v4_0_gfx_stop(adev);
756 sdma_v4_0_rlc_stop(adev);
757 if (adev->sdma.has_page_queue)
758 sdma_v4_0_page_stop(adev);
759 }
760
761 for (i = 0; i < adev->sdma.num_instances; i++) {
762 f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
763 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
764 WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
765 }
766}
767
768
769
770
771static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
772{
773
774 uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
775
776 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
777#ifdef __BIG_ENDIAN
778 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
779 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
780 RPTR_WRITEBACK_SWAP_ENABLE, 1);
781#endif
782 return rb_cntl;
783}
784
785
786
787
788
789
790
791
792
793
794static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
795{
796 struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
797 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
798 u32 wb_offset;
799 u32 doorbell;
800 u32 doorbell_offset;
801 u64 wptr_gpu_addr;
802
803 wb_offset = (ring->rptr_offs * 4);
804
805 rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
806 rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
807 WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
808
809
810 WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
811 WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
812 WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
813 WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
814
815
816 WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
817 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
818 WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
819 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
820
821 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
822 RPTR_WRITEBACK_ENABLE, 1);
823
824 WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
825 WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
826
827 ring->wptr = 0;
828
829
830 WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
831
832 doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
833 doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
834
835 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
836 ring->use_doorbell);
837 doorbell_offset = REG_SET_FIELD(doorbell_offset,
838 SDMA0_GFX_DOORBELL_OFFSET,
839 OFFSET, ring->doorbell_index);
840 WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
841 WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
842
843 sdma_v4_0_ring_set_wptr(ring);
844
845
846 WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
847
848
849 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
850 WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
851 lower_32_bits(wptr_gpu_addr));
852 WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
853 upper_32_bits(wptr_gpu_addr));
854 wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
855 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
856 SDMA0_GFX_RB_WPTR_POLL_CNTL,
857 F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
858 WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
859
860
861 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
862 WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
863
864 ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
865 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
866#ifdef __BIG_ENDIAN
867 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
868#endif
869
870 WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
871
872 ring->sched.ready = true;
873}
874
875
876
877
878
879
880
881
882
883
884static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
885{
886 struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
887 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
888 u32 wb_offset;
889 u32 doorbell;
890 u32 doorbell_offset;
891 u64 wptr_gpu_addr;
892
893 wb_offset = (ring->rptr_offs * 4);
894
895 rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
896 rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
897 WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
898
899
900 WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
901 WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
902 WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
903 WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
904
905
906 WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
907 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
908 WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
909 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
910
911 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
912 RPTR_WRITEBACK_ENABLE, 1);
913
914 WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
915 WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
916
917 ring->wptr = 0;
918
919
920 WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
921
922 doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
923 doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
924
925 doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
926 ring->use_doorbell);
927 doorbell_offset = REG_SET_FIELD(doorbell_offset,
928 SDMA0_PAGE_DOORBELL_OFFSET,
929 OFFSET, ring->doorbell_index);
930 WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
931 WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
932
933
934 sdma_v4_0_page_ring_set_wptr(ring);
935
936
937 WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
938
939
940 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
941 WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
942 lower_32_bits(wptr_gpu_addr));
943 WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
944 upper_32_bits(wptr_gpu_addr));
945 wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
946 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
947 SDMA0_PAGE_RB_WPTR_POLL_CNTL,
948 F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
949 WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
950
951
952 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
953 WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
954
955 ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
956 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
957#ifdef __BIG_ENDIAN
958 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
959#endif
960
961 WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
962
963 ring->sched.ready = true;
964}
965
966static void
967sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
968{
969 uint32_t def, data;
970
971 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
972
973 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
974 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
975
976 if (data != def)
977 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
978 } else {
979
980 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
981 data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
982 if (data != def)
983 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
984 }
985}
986
987static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
988{
989 uint32_t def, data;
990
991
992 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
993 data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
994 if (data != def)
995 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
996
997
998 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
999 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1000 if (data != def)
1001 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1002
1003
1004 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1005 data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
1006 data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
1007
1008 data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
1009 data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
1010 if(data != def)
1011 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1012}
1013
1014static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
1015{
1016 if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
1017 return;
1018
1019 switch (adev->asic_type) {
1020 case CHIP_RAVEN:
1021 sdma_v4_1_init_power_gating(adev);
1022 sdma_v4_1_update_power_gating(adev, true);
1023 break;
1024 default:
1025 break;
1026 }
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
1038{
1039 sdma_v4_0_init_pg(adev);
1040
1041 return 0;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
1053{
1054 const struct sdma_firmware_header_v1_0 *hdr;
1055 const __le32 *fw_data;
1056 u32 fw_size;
1057 int i, j;
1058
1059
1060 sdma_v4_0_enable(adev, false);
1061
1062 for (i = 0; i < adev->sdma.num_instances; i++) {
1063 if (!adev->sdma.instance[i].fw)
1064 return -EINVAL;
1065
1066 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
1067 amdgpu_ucode_print_sdma_hdr(&hdr->header);
1068 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1069
1070 fw_data = (const __le32 *)
1071 (adev->sdma.instance[i].fw->data +
1072 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1073
1074 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
1075
1076 for (j = 0; j < fw_size; j++)
1077 WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
1078 le32_to_cpup(fw_data++));
1079
1080 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
1081 adev->sdma.instance[i].fw_version);
1082 }
1083
1084 return 0;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static int sdma_v4_0_start(struct amdgpu_device *adev)
1096{
1097 struct amdgpu_ring *ring;
1098 int i, r = 0;
1099
1100 if (amdgpu_sriov_vf(adev)) {
1101 sdma_v4_0_ctx_switch_enable(adev, false);
1102 sdma_v4_0_enable(adev, false);
1103 } else {
1104
1105 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1106 r = sdma_v4_0_load_microcode(adev);
1107 if (r)
1108 return r;
1109 }
1110
1111
1112 sdma_v4_0_enable(adev, true);
1113
1114 sdma_v4_0_ctx_switch_enable(adev, true);
1115 }
1116
1117
1118 for (i = 0; i < adev->sdma.num_instances; i++) {
1119 uint32_t temp;
1120
1121 WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
1122 sdma_v4_0_gfx_resume(adev, i);
1123 if (adev->sdma.has_page_queue)
1124 sdma_v4_0_page_resume(adev, i);
1125
1126
1127 temp = RREG32_SDMA(i, mmSDMA0_CNTL);
1128 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
1129 WREG32_SDMA(i, mmSDMA0_CNTL, temp);
1130
1131 if (!amdgpu_sriov_vf(adev)) {
1132
1133 temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
1134 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
1135 WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
1136 }
1137 }
1138
1139 if (amdgpu_sriov_vf(adev)) {
1140 sdma_v4_0_ctx_switch_enable(adev, true);
1141 sdma_v4_0_enable(adev, true);
1142 } else {
1143 r = sdma_v4_0_rlc_resume(adev);
1144 if (r)
1145 return r;
1146 }
1147
1148 for (i = 0; i < adev->sdma.num_instances; i++) {
1149 ring = &adev->sdma.instance[i].ring;
1150
1151 r = amdgpu_ring_test_helper(ring);
1152 if (r)
1153 return r;
1154
1155 if (adev->sdma.has_page_queue) {
1156 struct amdgpu_ring *page = &adev->sdma.instance[i].page;
1157
1158 r = amdgpu_ring_test_helper(page);
1159 if (r)
1160 return r;
1161
1162 if (adev->mman.buffer_funcs_ring == page)
1163 amdgpu_ttm_set_buffer_funcs_status(adev, true);
1164 }
1165
1166 if (adev->mman.buffer_funcs_ring == ring)
1167 amdgpu_ttm_set_buffer_funcs_status(adev, true);
1168 }
1169
1170 return r;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
1183{
1184 struct amdgpu_device *adev = ring->adev;
1185 unsigned i;
1186 unsigned index;
1187 int r;
1188 u32 tmp;
1189 u64 gpu_addr;
1190
1191 r = amdgpu_device_wb_get(adev, &index);
1192 if (r)
1193 return r;
1194
1195 gpu_addr = adev->wb.gpu_addr + (index * 4);
1196 tmp = 0xCAFEDEAD;
1197 adev->wb.wb[index] = cpu_to_le32(tmp);
1198
1199 r = amdgpu_ring_alloc(ring, 5);
1200 if (r)
1201 goto error_free_wb;
1202
1203 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1204 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1205 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1206 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1207 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1208 amdgpu_ring_write(ring, 0xDEADBEEF);
1209 amdgpu_ring_commit(ring);
1210
1211 for (i = 0; i < adev->usec_timeout; i++) {
1212 tmp = le32_to_cpu(adev->wb.wb[index]);
1213 if (tmp == 0xDEADBEEF)
1214 break;
1215 udelay(1);
1216 }
1217
1218 if (i >= adev->usec_timeout)
1219 r = -ETIMEDOUT;
1220
1221error_free_wb:
1222 amdgpu_device_wb_free(adev, index);
1223 return r;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1235{
1236 struct amdgpu_device *adev = ring->adev;
1237 struct amdgpu_ib ib;
1238 struct dma_fence *f = NULL;
1239 unsigned index;
1240 long r;
1241 u32 tmp = 0;
1242 u64 gpu_addr;
1243
1244 r = amdgpu_device_wb_get(adev, &index);
1245 if (r)
1246 return r;
1247
1248 gpu_addr = adev->wb.gpu_addr + (index * 4);
1249 tmp = 0xCAFEDEAD;
1250 adev->wb.wb[index] = cpu_to_le32(tmp);
1251 memset(&ib, 0, sizeof(ib));
1252 r = amdgpu_ib_get(adev, NULL, 256, &ib);
1253 if (r)
1254 goto err0;
1255
1256 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1257 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1258 ib.ptr[1] = lower_32_bits(gpu_addr);
1259 ib.ptr[2] = upper_32_bits(gpu_addr);
1260 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1261 ib.ptr[4] = 0xDEADBEEF;
1262 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1263 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1264 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1265 ib.length_dw = 8;
1266
1267 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1268 if (r)
1269 goto err1;
1270
1271 r = dma_fence_wait_timeout(f, false, timeout);
1272 if (r == 0) {
1273 r = -ETIMEDOUT;
1274 goto err1;
1275 } else if (r < 0) {
1276 goto err1;
1277 }
1278 tmp = le32_to_cpu(adev->wb.wb[index]);
1279 if (tmp == 0xDEADBEEF)
1280 r = 0;
1281 else
1282 r = -EINVAL;
1283
1284err1:
1285 amdgpu_ib_free(adev, &ib, NULL);
1286 dma_fence_put(f);
1287err0:
1288 amdgpu_device_wb_free(adev, index);
1289 return r;
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
1304 uint64_t pe, uint64_t src,
1305 unsigned count)
1306{
1307 unsigned bytes = count * 8;
1308
1309 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1310 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1311 ib->ptr[ib->length_dw++] = bytes - 1;
1312 ib->ptr[ib->length_dw++] = 0;
1313 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1314 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1315 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1316 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1317
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1333 uint64_t value, unsigned count,
1334 uint32_t incr)
1335{
1336 unsigned ndw = count * 2;
1337
1338 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1339 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1340 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1341 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1342 ib->ptr[ib->length_dw++] = ndw - 1;
1343 for (; ndw > 0; ndw -= 2) {
1344 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1345 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1346 value += incr;
1347 }
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1363 uint64_t pe,
1364 uint64_t addr, unsigned count,
1365 uint32_t incr, uint64_t flags)
1366{
1367
1368 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1369 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1370 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1371 ib->ptr[ib->length_dw++] = lower_32_bits(flags);
1372 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1373 ib->ptr[ib->length_dw++] = lower_32_bits(addr);
1374 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1375 ib->ptr[ib->length_dw++] = incr;
1376 ib->ptr[ib->length_dw++] = 0;
1377 ib->ptr[ib->length_dw++] = count - 1;
1378}
1379
1380
1381
1382
1383
1384
1385
1386static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1387{
1388 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1389 u32 pad_count;
1390 int i;
1391
1392 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1393 for (i = 0; i < pad_count; i++)
1394 if (sdma && sdma->burst_nop && (i == 0))
1395 ib->ptr[ib->length_dw++] =
1396 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1397 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1398 else
1399 ib->ptr[ib->length_dw++] =
1400 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1412{
1413 uint32_t seq = ring->fence_drv.sync_seq;
1414 uint64_t addr = ring->fence_drv.gpu_addr;
1415
1416
1417 sdma_v4_0_wait_reg_mem(ring, 1, 0,
1418 addr & 0xfffffffc,
1419 upper_32_bits(addr) & 0xffffffff,
1420 seq, 0xffffffff, 4);
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1434 unsigned vmid, uint64_t pd_addr)
1435{
1436 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1437}
1438
1439static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
1440 uint32_t reg, uint32_t val)
1441{
1442 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1443 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1444 amdgpu_ring_write(ring, reg);
1445 amdgpu_ring_write(ring, val);
1446}
1447
1448static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1449 uint32_t val, uint32_t mask)
1450{
1451 sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1452}
1453
1454static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
1455{
1456 uint fw_version = adev->sdma.instance[0].fw_version;
1457
1458 switch (adev->asic_type) {
1459 case CHIP_VEGA10:
1460 return fw_version >= 430;
1461 case CHIP_VEGA12:
1462
1463 return false;
1464 case CHIP_VEGA20:
1465 return fw_version >= 123;
1466 default:
1467 return false;
1468 }
1469}
1470
1471static int sdma_v4_0_early_init(void *handle)
1472{
1473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1474 int r;
1475
1476 if (adev->asic_type == CHIP_RAVEN)
1477 adev->sdma.num_instances = 1;
1478 else
1479 adev->sdma.num_instances = 2;
1480
1481 r = sdma_v4_0_init_microcode(adev);
1482 if (r) {
1483 DRM_ERROR("Failed to load sdma firmware!\n");
1484 return r;
1485 }
1486
1487
1488 if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
1489 adev->sdma.has_page_queue = false;
1490 else if (sdma_v4_0_fw_support_paging_queue(adev))
1491 adev->sdma.has_page_queue = true;
1492
1493 sdma_v4_0_set_ring_funcs(adev);
1494 sdma_v4_0_set_buffer_funcs(adev);
1495 sdma_v4_0_set_vm_pte_funcs(adev);
1496 sdma_v4_0_set_irq_funcs(adev);
1497
1498 return 0;
1499}
1500
1501static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
1502 struct amdgpu_iv_entry *entry);
1503
1504static int sdma_v4_0_late_init(void *handle)
1505{
1506 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1507 struct ras_common_if **ras_if = &adev->sdma.ras_if;
1508 struct ras_ih_if ih_info = {
1509 .cb = sdma_v4_0_process_ras_data_cb,
1510 };
1511 struct ras_fs_if fs_info = {
1512 .sysfs_name = "sdma_err_count",
1513 .debugfs_name = "sdma_err_inject",
1514 };
1515 struct ras_common_if ras_block = {
1516 .block = AMDGPU_RAS_BLOCK__SDMA,
1517 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
1518 .sub_block_index = 0,
1519 .name = "sdma",
1520 };
1521 int r;
1522
1523 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
1524 amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
1525 return 0;
1526 }
1527
1528
1529 if (*ras_if) {
1530
1531
1532
1533 ih_info.head = **ras_if;
1534 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
1535 if (r) {
1536 if (r == -EAGAIN) {
1537
1538 amdgpu_ras_request_reset_on_boot(adev,
1539 AMDGPU_RAS_BLOCK__SDMA);
1540 return 0;
1541 }
1542
1543 goto irq;
1544 }
1545
1546 goto resume;
1547 }
1548
1549 *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
1550 if (!*ras_if)
1551 return -ENOMEM;
1552
1553 **ras_if = ras_block;
1554
1555 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
1556 if (r) {
1557 if (r == -EAGAIN) {
1558 amdgpu_ras_request_reset_on_boot(adev,
1559 AMDGPU_RAS_BLOCK__SDMA);
1560 r = 0;
1561 }
1562 goto feature;
1563 }
1564
1565 ih_info.head = **ras_if;
1566 fs_info.head = **ras_if;
1567
1568 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
1569 if (r)
1570 goto interrupt;
1571
1572 amdgpu_ras_debugfs_create(adev, &fs_info);
1573
1574 r = amdgpu_ras_sysfs_create(adev, &fs_info);
1575 if (r)
1576 goto sysfs;
1577resume:
1578 r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
1579 if (r)
1580 goto irq;
1581
1582 r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
1583 if (r) {
1584 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
1585 goto irq;
1586 }
1587
1588 return 0;
1589irq:
1590 amdgpu_ras_sysfs_remove(adev, *ras_if);
1591sysfs:
1592 amdgpu_ras_debugfs_remove(adev, *ras_if);
1593 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1594interrupt:
1595 amdgpu_ras_feature_enable(adev, *ras_if, 0);
1596feature:
1597 kfree(*ras_if);
1598 *ras_if = NULL;
1599 return r;
1600}
1601
1602static int sdma_v4_0_sw_init(void *handle)
1603{
1604 struct amdgpu_ring *ring;
1605 int r, i;
1606 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1607
1608
1609 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
1610 &adev->sdma.trap_irq);
1611 if (r)
1612 return r;
1613
1614
1615 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
1616 &adev->sdma.trap_irq);
1617 if (r)
1618 return r;
1619
1620
1621 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1622 &adev->sdma.ecc_irq);
1623 if (r)
1624 return r;
1625
1626
1627 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_SRAM_ECC,
1628 &adev->sdma.ecc_irq);
1629 if (r)
1630 return r;
1631
1632 for (i = 0; i < adev->sdma.num_instances; i++) {
1633 ring = &adev->sdma.instance[i].ring;
1634 ring->ring_obj = NULL;
1635 ring->use_doorbell = true;
1636
1637 DRM_INFO("use_doorbell being set to: [%s]\n",
1638 ring->use_doorbell?"true":"false");
1639
1640
1641 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1642
1643 sprintf(ring->name, "sdma%d", i);
1644 r = amdgpu_ring_init(adev, ring, 1024,
1645 &adev->sdma.trap_irq,
1646 (i == 0) ?
1647 AMDGPU_SDMA_IRQ_INSTANCE0 :
1648 AMDGPU_SDMA_IRQ_INSTANCE1);
1649 if (r)
1650 return r;
1651
1652 if (adev->sdma.has_page_queue) {
1653 ring = &adev->sdma.instance[i].page;
1654 ring->ring_obj = NULL;
1655 ring->use_doorbell = true;
1656
1657
1658
1659
1660 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1661 ring->doorbell_index += 0x400;
1662
1663 sprintf(ring->name, "page%d", i);
1664 r = amdgpu_ring_init(adev, ring, 1024,
1665 &adev->sdma.trap_irq,
1666 (i == 0) ?
1667 AMDGPU_SDMA_IRQ_INSTANCE0 :
1668 AMDGPU_SDMA_IRQ_INSTANCE1);
1669 if (r)
1670 return r;
1671 }
1672 }
1673
1674 return r;
1675}
1676
1677static int sdma_v4_0_sw_fini(void *handle)
1678{
1679 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1680 int i;
1681
1682 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
1683 adev->sdma.ras_if) {
1684 struct ras_common_if *ras_if = adev->sdma.ras_if;
1685 struct ras_ih_if ih_info = {
1686 .head = *ras_if,
1687 };
1688
1689
1690 amdgpu_ras_debugfs_remove(adev, ras_if);
1691 amdgpu_ras_sysfs_remove(adev, ras_if);
1692
1693 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1694 amdgpu_ras_feature_enable(adev, ras_if, 0);
1695 kfree(ras_if);
1696 }
1697
1698 for (i = 0; i < adev->sdma.num_instances; i++) {
1699 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1700 if (adev->sdma.has_page_queue)
1701 amdgpu_ring_fini(&adev->sdma.instance[i].page);
1702 }
1703
1704 for (i = 0; i < adev->sdma.num_instances; i++) {
1705 release_firmware(adev->sdma.instance[i].fw);
1706 adev->sdma.instance[i].fw = NULL;
1707 }
1708
1709 return 0;
1710}
1711
1712static int sdma_v4_0_hw_init(void *handle)
1713{
1714 int r;
1715 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1716
1717 if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
1718 adev->powerplay.pp_funcs->set_powergating_by_smu)
1719 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
1720
1721 sdma_v4_0_init_golden_registers(adev);
1722
1723 r = sdma_v4_0_start(adev);
1724
1725 return r;
1726}
1727
1728static int sdma_v4_0_hw_fini(void *handle)
1729{
1730 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1731
1732 if (amdgpu_sriov_vf(adev))
1733 return 0;
1734
1735 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
1736 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
1737
1738 sdma_v4_0_ctx_switch_enable(adev, false);
1739 sdma_v4_0_enable(adev, false);
1740
1741 if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
1742 && adev->powerplay.pp_funcs->set_powergating_by_smu)
1743 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
1744
1745 return 0;
1746}
1747
1748static int sdma_v4_0_suspend(void *handle)
1749{
1750 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1751
1752 return sdma_v4_0_hw_fini(adev);
1753}
1754
1755static int sdma_v4_0_resume(void *handle)
1756{
1757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1758
1759 return sdma_v4_0_hw_init(adev);
1760}
1761
1762static bool sdma_v4_0_is_idle(void *handle)
1763{
1764 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1765 u32 i;
1766
1767 for (i = 0; i < adev->sdma.num_instances; i++) {
1768 u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
1769
1770 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1771 return false;
1772 }
1773
1774 return true;
1775}
1776
1777static int sdma_v4_0_wait_for_idle(void *handle)
1778{
1779 unsigned i;
1780 u32 sdma0, sdma1;
1781 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1782
1783 for (i = 0; i < adev->usec_timeout; i++) {
1784 sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
1785 sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
1786
1787 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1788 return 0;
1789 udelay(1);
1790 }
1791 return -ETIMEDOUT;
1792}
1793
1794static int sdma_v4_0_soft_reset(void *handle)
1795{
1796
1797
1798 return 0;
1799}
1800
1801static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
1802 struct amdgpu_irq_src *source,
1803 unsigned type,
1804 enum amdgpu_interrupt_state state)
1805{
1806 u32 sdma_cntl;
1807
1808 sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
1809 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1810 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1811 WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
1812
1813 return 0;
1814}
1815
1816static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
1817 struct amdgpu_irq_src *source,
1818 struct amdgpu_iv_entry *entry)
1819{
1820 uint32_t instance;
1821
1822 DRM_DEBUG("IH: SDMA trap\n");
1823 switch (entry->client_id) {
1824 case SOC15_IH_CLIENTID_SDMA0:
1825 instance = 0;
1826 break;
1827 case SOC15_IH_CLIENTID_SDMA1:
1828 instance = 1;
1829 break;
1830 default:
1831 return 0;
1832 }
1833
1834 switch (entry->ring_id) {
1835 case 0:
1836 amdgpu_fence_process(&adev->sdma.instance[instance].ring);
1837 break;
1838 case 1:
1839 if (adev->asic_type == CHIP_VEGA20)
1840 amdgpu_fence_process(&adev->sdma.instance[instance].page);
1841 break;
1842 case 2:
1843
1844 break;
1845 case 3:
1846 if (adev->asic_type != CHIP_VEGA20)
1847 amdgpu_fence_process(&adev->sdma.instance[instance].page);
1848 break;
1849 }
1850 return 0;
1851}
1852
1853static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
1854 struct amdgpu_iv_entry *entry)
1855{
1856 uint32_t instance, err_source;
1857
1858 switch (entry->client_id) {
1859 case SOC15_IH_CLIENTID_SDMA0:
1860 instance = 0;
1861 break;
1862 case SOC15_IH_CLIENTID_SDMA1:
1863 instance = 1;
1864 break;
1865 default:
1866 return 0;
1867 }
1868
1869 switch (entry->src_id) {
1870 case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
1871 err_source = 0;
1872 break;
1873 case SDMA0_4_0__SRCID__SDMA_ECC:
1874 err_source = 1;
1875 break;
1876 default:
1877 return 0;
1878 }
1879
1880 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
1881
1882 amdgpu_ras_reset_gpu(adev, 0);
1883
1884 return AMDGPU_RAS_UE;
1885}
1886
1887static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
1888 struct amdgpu_irq_src *source,
1889 struct amdgpu_iv_entry *entry)
1890{
1891 struct ras_common_if *ras_if = adev->sdma.ras_if;
1892 struct ras_dispatch_if ih_data = {
1893 .entry = entry,
1894 };
1895
1896 if (!ras_if)
1897 return 0;
1898
1899 ih_data.head = *ras_if;
1900
1901 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1902 return 0;
1903}
1904
1905static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1906 struct amdgpu_irq_src *source,
1907 struct amdgpu_iv_entry *entry)
1908{
1909 int instance;
1910
1911 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1912
1913 switch (entry->client_id) {
1914 case SOC15_IH_CLIENTID_SDMA0:
1915 instance = 0;
1916 break;
1917 case SOC15_IH_CLIENTID_SDMA1:
1918 instance = 1;
1919 break;
1920 default:
1921 return 0;
1922 }
1923
1924 switch (entry->ring_id) {
1925 case 0:
1926 drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
1927 break;
1928 }
1929 return 0;
1930}
1931
1932static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
1933 struct amdgpu_irq_src *source,
1934 unsigned type,
1935 enum amdgpu_interrupt_state state)
1936{
1937 u32 sdma_edc_config;
1938
1939 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1940 sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) :
1941 sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG);
1942
1943 sdma_edc_config = RREG32(reg_offset);
1944 sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
1945 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1946 WREG32(reg_offset, sdma_edc_config);
1947
1948 return 0;
1949}
1950
1951static void sdma_v4_0_update_medium_grain_clock_gating(
1952 struct amdgpu_device *adev,
1953 bool enable)
1954{
1955 uint32_t data, def;
1956
1957 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1958
1959 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1960 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1961 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1962 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1963 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1964 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1965 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1966 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1967 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1968 if (def != data)
1969 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1970
1971 if (adev->sdma.num_instances > 1) {
1972 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1973 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1974 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1975 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1976 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1977 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1978 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1979 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1980 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1981 if (def != data)
1982 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
1983 }
1984 } else {
1985
1986 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1987 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1988 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1989 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1990 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1991 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1992 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1993 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1994 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1995
1996 if (def != data)
1997 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1998
1999 if (adev->sdma.num_instances > 1) {
2000 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
2001 data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
2002 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
2003 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2004 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2005 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2006 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2007 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2008 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2009 if (def != data)
2010 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
2011 }
2012 }
2013}
2014
2015
2016static void sdma_v4_0_update_medium_grain_light_sleep(
2017 struct amdgpu_device *adev,
2018 bool enable)
2019{
2020 uint32_t data, def;
2021
2022 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
2023
2024 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
2025 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2026 if (def != data)
2027 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
2028
2029
2030 if (adev->sdma.num_instances > 1) {
2031 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
2032 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2033 if (def != data)
2034 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
2035 }
2036 } else {
2037
2038 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
2039 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2040 if (def != data)
2041 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
2042
2043
2044 if (adev->sdma.num_instances > 1) {
2045 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
2046 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2047 if (def != data)
2048 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
2049 }
2050 }
2051}
2052
2053static int sdma_v4_0_set_clockgating_state(void *handle,
2054 enum amd_clockgating_state state)
2055{
2056 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2057
2058 if (amdgpu_sriov_vf(adev))
2059 return 0;
2060
2061 switch (adev->asic_type) {
2062 case CHIP_VEGA10:
2063 case CHIP_VEGA12:
2064 case CHIP_VEGA20:
2065 case CHIP_RAVEN:
2066 sdma_v4_0_update_medium_grain_clock_gating(adev,
2067 state == AMD_CG_STATE_GATE ? true : false);
2068 sdma_v4_0_update_medium_grain_light_sleep(adev,
2069 state == AMD_CG_STATE_GATE ? true : false);
2070 break;
2071 default:
2072 break;
2073 }
2074 return 0;
2075}
2076
2077static int sdma_v4_0_set_powergating_state(void *handle,
2078 enum amd_powergating_state state)
2079{
2080 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2081
2082 switch (adev->asic_type) {
2083 case CHIP_RAVEN:
2084 sdma_v4_1_update_power_gating(adev,
2085 state == AMD_PG_STATE_GATE ? true : false);
2086 break;
2087 default:
2088 break;
2089 }
2090
2091 return 0;
2092}
2093
2094static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
2095{
2096 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2097 int data;
2098
2099 if (amdgpu_sriov_vf(adev))
2100 *flags = 0;
2101
2102
2103 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
2104 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
2105 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
2106
2107
2108 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
2109 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
2110 *flags |= AMD_CG_SUPPORT_SDMA_LS;
2111}
2112
2113const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
2114 .name = "sdma_v4_0",
2115 .early_init = sdma_v4_0_early_init,
2116 .late_init = sdma_v4_0_late_init,
2117 .sw_init = sdma_v4_0_sw_init,
2118 .sw_fini = sdma_v4_0_sw_fini,
2119 .hw_init = sdma_v4_0_hw_init,
2120 .hw_fini = sdma_v4_0_hw_fini,
2121 .suspend = sdma_v4_0_suspend,
2122 .resume = sdma_v4_0_resume,
2123 .is_idle = sdma_v4_0_is_idle,
2124 .wait_for_idle = sdma_v4_0_wait_for_idle,
2125 .soft_reset = sdma_v4_0_soft_reset,
2126 .set_clockgating_state = sdma_v4_0_set_clockgating_state,
2127 .set_powergating_state = sdma_v4_0_set_powergating_state,
2128 .get_clockgating_state = sdma_v4_0_get_clockgating_state,
2129};
2130
2131static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
2132 .type = AMDGPU_RING_TYPE_SDMA,
2133 .align_mask = 0xf,
2134 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2135 .support_64bit_ptrs = true,
2136 .vmhub = AMDGPU_MMHUB,
2137 .get_rptr = sdma_v4_0_ring_get_rptr,
2138 .get_wptr = sdma_v4_0_ring_get_wptr,
2139 .set_wptr = sdma_v4_0_ring_set_wptr,
2140 .emit_frame_size =
2141 6 +
2142 3 +
2143 6 +
2144
2145 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2146 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2147 10 + 10 + 10,
2148 .emit_ib_size = 7 + 6,
2149 .emit_ib = sdma_v4_0_ring_emit_ib,
2150 .emit_fence = sdma_v4_0_ring_emit_fence,
2151 .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2152 .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2153 .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2154 .test_ring = sdma_v4_0_ring_test_ring,
2155 .test_ib = sdma_v4_0_ring_test_ib,
2156 .insert_nop = sdma_v4_0_ring_insert_nop,
2157 .pad_ib = sdma_v4_0_ring_pad_ib,
2158 .emit_wreg = sdma_v4_0_ring_emit_wreg,
2159 .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2160 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2161};
2162
2163static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
2164 .type = AMDGPU_RING_TYPE_SDMA,
2165 .align_mask = 0xf,
2166 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2167 .support_64bit_ptrs = true,
2168 .vmhub = AMDGPU_MMHUB,
2169 .get_rptr = sdma_v4_0_ring_get_rptr,
2170 .get_wptr = sdma_v4_0_page_ring_get_wptr,
2171 .set_wptr = sdma_v4_0_page_ring_set_wptr,
2172 .emit_frame_size =
2173 6 +
2174 3 +
2175 6 +
2176
2177 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2178 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2179 10 + 10 + 10,
2180 .emit_ib_size = 7 + 6,
2181 .emit_ib = sdma_v4_0_ring_emit_ib,
2182 .emit_fence = sdma_v4_0_ring_emit_fence,
2183 .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2184 .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2185 .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2186 .test_ring = sdma_v4_0_ring_test_ring,
2187 .test_ib = sdma_v4_0_ring_test_ib,
2188 .insert_nop = sdma_v4_0_ring_insert_nop,
2189 .pad_ib = sdma_v4_0_ring_pad_ib,
2190 .emit_wreg = sdma_v4_0_ring_emit_wreg,
2191 .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2192 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2193};
2194
2195static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
2196{
2197 int i;
2198
2199 for (i = 0; i < adev->sdma.num_instances; i++) {
2200 adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
2201 adev->sdma.instance[i].ring.me = i;
2202 if (adev->sdma.has_page_queue) {
2203 adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
2204 adev->sdma.instance[i].page.me = i;
2205 }
2206 }
2207}
2208
2209static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
2210 .set = sdma_v4_0_set_trap_irq_state,
2211 .process = sdma_v4_0_process_trap_irq,
2212};
2213
2214static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
2215 .process = sdma_v4_0_process_illegal_inst_irq,
2216};
2217
2218static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
2219 .set = sdma_v4_0_set_ecc_irq_state,
2220 .process = sdma_v4_0_process_ecc_irq,
2221};
2222
2223
2224
2225static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
2226{
2227 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
2228 adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
2229 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
2230 adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
2231 adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
2232}
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
2247 uint64_t src_offset,
2248 uint64_t dst_offset,
2249 uint32_t byte_count)
2250{
2251 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2252 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
2253 ib->ptr[ib->length_dw++] = byte_count - 1;
2254 ib->ptr[ib->length_dw++] = 0;
2255 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2256 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2257 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2258 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2259}
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
2272 uint32_t src_data,
2273 uint64_t dst_offset,
2274 uint32_t byte_count)
2275{
2276 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2277 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2278 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2279 ib->ptr[ib->length_dw++] = src_data;
2280 ib->ptr[ib->length_dw++] = byte_count - 1;
2281}
2282
2283static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
2284 .copy_max_bytes = 0x400000,
2285 .copy_num_dw = 7,
2286 .emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
2287
2288 .fill_max_bytes = 0x400000,
2289 .fill_num_dw = 5,
2290 .emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
2291};
2292
2293static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
2294{
2295 adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
2296 if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
2297 adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
2298 else
2299 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2300}
2301
2302static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
2303 .copy_pte_num_dw = 7,
2304 .copy_pte = sdma_v4_0_vm_copy_pte,
2305
2306 .write_pte = sdma_v4_0_vm_write_pte,
2307 .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
2308};
2309
2310static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
2311{
2312 struct drm_gpu_scheduler *sched;
2313 unsigned i;
2314
2315 adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
2316 if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
2317 for (i = 1; i < adev->sdma.num_instances; i++) {
2318 sched = &adev->sdma.instance[i].page.sched;
2319 adev->vm_manager.vm_pte_rqs[i - 1] =
2320 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2321 }
2322 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
2323 adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
2324 } else {
2325 for (i = 0; i < adev->sdma.num_instances; i++) {
2326 sched = &adev->sdma.instance[i].ring.sched;
2327 adev->vm_manager.vm_pte_rqs[i] =
2328 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2329 }
2330 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
2331 }
2332}
2333
2334const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
2335 .type = AMD_IP_BLOCK_TYPE_SDMA,
2336 .major = 4,
2337 .minor = 0,
2338 .rev = 0,
2339 .funcs = &sdma_v4_0_ip_funcs,
2340};
2341