1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/firmware.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29
30#include "amdgpu.h"
31#include "amdgpu_gfx.h"
32#include "soc15.h"
33#include "soc15d.h"
34#include "amdgpu_atomfirmware.h"
35#include "amdgpu_pm.h"
36
37#include "gc/gc_9_0_offset.h"
38#include "gc/gc_9_0_sh_mask.h"
39
40#include "vega10_enum.h"
41
42#include "soc15_common.h"
43#include "clearstate_gfx9.h"
44#include "v9_structs.h"
45
46#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47
48#include "amdgpu_ras.h"
49
50#include "gfx_v9_4.h"
51#include "gfx_v9_0.h"
52#include "gfx_v9_4_2.h"
53
54#include "asic_reg/pwr/pwr_10_0_offset.h"
55#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
56#include "asic_reg/gc/gc_9_0_default.h"
57
58#define GFX9_NUM_GFX_RINGS 1
59#define GFX9_MEC_HPD_SIZE 4096
60#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
61#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
62
63#define mmGCEA_PROBE_MAP 0x070c
64#define mmGCEA_PROBE_MAP_BASE_IDX 0
65
66MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
67MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
68MODULE_FIRMWARE("amdgpu/vega10_me.bin");
69MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
70MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
71MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
72
73MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
74MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
75MODULE_FIRMWARE("amdgpu/vega12_me.bin");
76MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
77MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
78MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
79
80MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
81MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
82MODULE_FIRMWARE("amdgpu/vega20_me.bin");
83MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
84MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
85MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
86
87MODULE_FIRMWARE("amdgpu/raven_ce.bin");
88MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
89MODULE_FIRMWARE("amdgpu/raven_me.bin");
90MODULE_FIRMWARE("amdgpu/raven_mec.bin");
91MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
92MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
93
94MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
95MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
96MODULE_FIRMWARE("amdgpu/picasso_me.bin");
97MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
98MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
99MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
100MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
101
102MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
103MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
104MODULE_FIRMWARE("amdgpu/raven2_me.bin");
105MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
106MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
107MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
108MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
109
110MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
111MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
112
113MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
114MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
115MODULE_FIRMWARE("amdgpu/renoir_me.bin");
116MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
117MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
118
119MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
120MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
121MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
122MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
123MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
124MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
125
126MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
127MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
128MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
129
130#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
131#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
132#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
133#define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX 0
134#define mmTCP_CHAN_STEER_2_ARCT 0x0b09
135#define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX 0
136#define mmTCP_CHAN_STEER_3_ARCT 0x0b0a
137#define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX 0
138#define mmTCP_CHAN_STEER_4_ARCT 0x0b0b
139#define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX 0
140#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
141#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
142
143enum ta_ras_gfx_subblock {
144
145 TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
146 TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
147 TA_RAS_BLOCK__GFX_CPC_UCODE,
148 TA_RAS_BLOCK__GFX_DC_STATE_ME1,
149 TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
150 TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
151 TA_RAS_BLOCK__GFX_DC_STATE_ME2,
152 TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
153 TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
154 TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
155
156 TA_RAS_BLOCK__GFX_CPF_INDEX_START,
157 TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
158 TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
159 TA_RAS_BLOCK__GFX_CPF_TAG,
160 TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
161
162 TA_RAS_BLOCK__GFX_CPG_INDEX_START,
163 TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
164 TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
165 TA_RAS_BLOCK__GFX_CPG_TAG,
166 TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
167
168 TA_RAS_BLOCK__GFX_GDS_INDEX_START,
169 TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
170 TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
171 TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
172 TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
173 TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
174 TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
175
176 TA_RAS_BLOCK__GFX_SPI_SR_MEM,
177
178 TA_RAS_BLOCK__GFX_SQ_INDEX_START,
179 TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
180 TA_RAS_BLOCK__GFX_SQ_LDS_D,
181 TA_RAS_BLOCK__GFX_SQ_LDS_I,
182 TA_RAS_BLOCK__GFX_SQ_VGPR,
183 TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
184
185 TA_RAS_BLOCK__GFX_SQC_INDEX_START,
186
187 TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
188 TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
189 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
190 TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
191 TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
192 TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
193 TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
194 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
195 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
196 TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
197 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
198
199 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
200 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
201 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
202 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
203 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
204 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
205 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
206 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
207 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
208 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
209 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
210 TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
211 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
212
213 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
214 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
215 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
216 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
217 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
218 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
219 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
220 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
221 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
222 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
223 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
224 TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
225 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
226 TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
227
228 TA_RAS_BLOCK__GFX_TA_INDEX_START,
229 TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
230 TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
231 TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
232 TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
233 TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
234 TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
235
236 TA_RAS_BLOCK__GFX_TCA_INDEX_START,
237 TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
238 TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
239 TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
240
241 TA_RAS_BLOCK__GFX_TCC_INDEX_START,
242
243 TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
244 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
245 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
246 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
247 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
248 TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
249 TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
250 TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
251 TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
252 TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
253
254 TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
255 TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
256 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
257 TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
258 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
259
260 TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
261 TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
262 TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
263 TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
264 TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
265 TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
266 TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
267 TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
268 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
269 TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
270 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
271
272 TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
273 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
274 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
275 TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
276 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
277
278 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
279 TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
280 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
281 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
282 TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
283 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
284 TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
285
286 TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
287
288 TA_RAS_BLOCK__GFX_TCP_INDEX_START,
289 TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
290 TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
291 TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
292 TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
293 TA_RAS_BLOCK__GFX_TCP_DB_RAM,
294 TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
295 TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
296 TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
297
298 TA_RAS_BLOCK__GFX_TD_INDEX_START,
299 TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
300 TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
301 TA_RAS_BLOCK__GFX_TD_CS_FIFO,
302 TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
303
304 TA_RAS_BLOCK__GFX_EA_INDEX_START,
305
306 TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
307 TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
308 TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
309 TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
310 TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
311 TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
312 TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
313 TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
314 TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
315 TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
316
317 TA_RAS_BLOCK__GFX_EA_INDEX1_START,
318 TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
319 TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
320 TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
321 TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
322 TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
323 TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
324 TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
325 TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
326
327 TA_RAS_BLOCK__GFX_EA_INDEX2_START,
328 TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
329 TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
330 TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
331 TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
332 TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
333 TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
334
335 TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
336
337 TA_RAS_BLOCK__UTC_VML2_WALKER,
338
339 TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
340
341 TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
342 TA_RAS_BLOCK__GFX_MAX
343};
344
345struct ras_gfx_subblock {
346 unsigned char *name;
347 int ta_subblock;
348 int hw_supported_error_type;
349 int sw_supported_error_type;
350};
351
352#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
353 [AMDGPU_RAS_BLOCK__##subblock] = { \
354 #subblock, \
355 TA_RAS_BLOCK__##subblock, \
356 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
357 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
358 }
359
360static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
361 AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
362 AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
363 AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
364 AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
365 AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
366 AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
367 AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
368 AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
369 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
370 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
371 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
372 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
373 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
374 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
375 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
376 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
377 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
378 0),
379 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
380 0),
381 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
382 AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
383 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
384 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
385 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
386 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
387 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
388 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
389 0, 0),
390 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
391 0),
392 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
393 0, 0),
394 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
395 0),
396 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
397 0, 0),
398 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
399 0),
400 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
401 1),
402 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
403 0, 0, 0),
404 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
405 0),
406 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
407 0),
408 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
409 0),
410 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
411 0),
412 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
413 0),
414 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
415 0, 0),
416 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
417 0),
418 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
419 0),
420 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
421 0, 0, 0),
422 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
423 0),
424 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
425 0),
426 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
427 0),
428 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
429 0),
430 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
431 0),
432 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
433 0, 0),
434 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
435 0),
436 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
437 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
438 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
439 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
440 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
441 AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
442 AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
443 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
444 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
445 1),
446 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
447 1),
448 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
449 1),
450 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
451 0),
452 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
453 0),
454 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
455 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
456 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
457 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
458 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
459 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
460 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
461 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
462 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
463 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
464 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
465 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
466 0),
467 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
468 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
469 0),
470 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
471 0, 0),
472 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
473 0),
474 AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
475 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
476 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
477 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
478 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
479 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
480 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
481 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
482 AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
483 AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
484 AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
485 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
486 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
487 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
488 AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
489 AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
490 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
491 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
492 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
493 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
494 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
495 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
496 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
497 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
498 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
499 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
500 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
501 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
502 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
503 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
504 AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
505 AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
506 AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
507 AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
508};
509
510static const struct soc15_reg_golden golden_settings_gc_9_0[] =
511{
512 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
513 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
514 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
515 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
516 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
517 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
518 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
519 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
520 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
521 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
522 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
523 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
524 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
525 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
526 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
527 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
528 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
529 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
530 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
531 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
532};
533
534static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
535{
536 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
537 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
538 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
539 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
540 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
541 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
542 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
543 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
544 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
545 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
546 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
547 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
548 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
549 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
550 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
551 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
552 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
553 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
554};
555
556static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
557{
558 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
559 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
560 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
561 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
562 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
563 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
564 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
565 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
566 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
567 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
568 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
569};
570
571static const struct soc15_reg_golden golden_settings_gc_9_1[] =
572{
573 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
574 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
575 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
576 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
577 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
578 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
579 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
580 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
581 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
582 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
583 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
584 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
585 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
586 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
587 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
588 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
589 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
590 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
591 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
592 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
593 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
594 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
595 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
596 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
597};
598
599static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
600{
601 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
602 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
603 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
604 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
605 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
606 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
607 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
608};
609
610static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
611{
612 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
613 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
614 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
615 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
616 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
617 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
618 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
619 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
620 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
621 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
622 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
623 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
624 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
625 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
626 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
627 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
628 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
629 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
630 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
631};
632
633static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
634{
635 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
636 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
637 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
638 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
639 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
640 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
641 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
642 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
643 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
644 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
645 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
646 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
647};
648
649static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
650{
651 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
652 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
653 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
654};
655
656static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
657{
658 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
659 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
660 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
661 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
662 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
663 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
664 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
665 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
666 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
667 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
668 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
669 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
670 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
671 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
672 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
673 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
674};
675
676static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
677{
678 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
679 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
680 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
681 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
682 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
683 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
684 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
685 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
686 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
687 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
688 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
689 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
690 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
691};
692
693static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
694{
695 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
696 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
697 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
698 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
699 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
700 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
701 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
702 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
703 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
704 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
705 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
706};
707
708static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
709 {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
710 {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
711};
712
713static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
714{
715 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
716 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
717 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
718 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
719 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
720 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
721 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
722 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
723};
724
725static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
726{
727 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
728 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
729 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
730 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
731 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
732 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
733 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
734 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
735};
736
737static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
738{
739 static void *scratch_reg0;
740 static void *scratch_reg1;
741 static void *scratch_reg2;
742 static void *scratch_reg3;
743 static void *spare_int;
744 static uint32_t grbm_cntl;
745 static uint32_t grbm_idx;
746
747 scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
748 scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
749 scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
750 scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
751 spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
752
753 grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
754 grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
755
756 if (amdgpu_sriov_runtime(adev)) {
757 pr_err("shouldn't call rlcg write register during runtime\n");
758 return;
759 }
760
761 if (offset == grbm_cntl || offset == grbm_idx) {
762 if (offset == grbm_cntl)
763 writel(v, scratch_reg2);
764 else if (offset == grbm_idx)
765 writel(v, scratch_reg3);
766
767 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
768 } else {
769 uint32_t i = 0;
770 uint32_t retries = 50000;
771
772 writel(v, scratch_reg0);
773 writel(offset | 0x80000000, scratch_reg1);
774 writel(1, spare_int);
775 for (i = 0; i < retries; i++) {
776 u32 tmp;
777
778 tmp = readl(scratch_reg1);
779 if (!(tmp & 0x80000000))
780 break;
781
782 udelay(10);
783 }
784 if (i >= retries)
785 pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
786 }
787
788}
789
790static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
791 u32 v, u32 acc_flags, u32 hwip)
792{
793 if ((acc_flags & AMDGPU_REGS_RLC) &&
794 amdgpu_sriov_fullaccess(adev)) {
795 gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
796
797 return;
798 }
799
800 if (acc_flags & AMDGPU_REGS_NO_KIQ)
801 WREG32_NO_KIQ(offset, v);
802 else
803 WREG32(offset, v);
804}
805
806#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
807#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
808#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
809#define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
810
811static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
812static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
813static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
814static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
815static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
816 struct amdgpu_cu_info *cu_info);
817static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
818static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
819static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
820static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
821 void *ras_error_status);
822static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
823 void *inject_if);
824static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
825
826static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
827 uint64_t queue_mask)
828{
829 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
830 amdgpu_ring_write(kiq_ring,
831 PACKET3_SET_RESOURCES_VMID_MASK(0) |
832
833 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
834 amdgpu_ring_write(kiq_ring,
835 lower_32_bits(queue_mask));
836 amdgpu_ring_write(kiq_ring,
837 upper_32_bits(queue_mask));
838 amdgpu_ring_write(kiq_ring, 0);
839 amdgpu_ring_write(kiq_ring, 0);
840 amdgpu_ring_write(kiq_ring, 0);
841 amdgpu_ring_write(kiq_ring, 0);
842}
843
844static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
845 struct amdgpu_ring *ring)
846{
847 struct amdgpu_device *adev = kiq_ring->adev;
848 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
849 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
850 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
851
852 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
853
854 amdgpu_ring_write(kiq_ring,
855 PACKET3_MAP_QUEUES_QUEUE_SEL(0) |
856 PACKET3_MAP_QUEUES_VMID(0) |
857 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
858 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
859 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
860
861 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
862
863 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
864 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
865
866 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
867 amdgpu_ring_write(kiq_ring,
868 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
869 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
870 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
871 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
872 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
873}
874
875static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
876 struct amdgpu_ring *ring,
877 enum amdgpu_unmap_queues_action action,
878 u64 gpu_addr, u64 seq)
879{
880 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
881
882 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
883 amdgpu_ring_write(kiq_ring,
884 PACKET3_UNMAP_QUEUES_ACTION(action) |
885 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
886 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
887 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
888 amdgpu_ring_write(kiq_ring,
889 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
890
891 if (action == PREEMPT_QUEUES_NO_UNMAP) {
892 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
893 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
894 amdgpu_ring_write(kiq_ring, seq);
895 } else {
896 amdgpu_ring_write(kiq_ring, 0);
897 amdgpu_ring_write(kiq_ring, 0);
898 amdgpu_ring_write(kiq_ring, 0);
899 }
900}
901
902static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
903 struct amdgpu_ring *ring,
904 u64 addr,
905 u64 seq)
906{
907 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
908
909 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
910 amdgpu_ring_write(kiq_ring,
911 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
912 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
913 PACKET3_QUERY_STATUS_COMMAND(2));
914
915 amdgpu_ring_write(kiq_ring,
916 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
917 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
918 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
919 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
920 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
921 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
922}
923
924static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
925 uint16_t pasid, uint32_t flush_type,
926 bool all_hub)
927{
928 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
929 amdgpu_ring_write(kiq_ring,
930 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
931 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
932 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
933 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
934}
935
936static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
937 .kiq_set_resources = gfx_v9_0_kiq_set_resources,
938 .kiq_map_queues = gfx_v9_0_kiq_map_queues,
939 .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
940 .kiq_query_status = gfx_v9_0_kiq_query_status,
941 .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
942 .set_resources_size = 8,
943 .map_queues_size = 7,
944 .unmap_queues_size = 6,
945 .query_status_size = 7,
946 .invalidate_tlbs_size = 2,
947};
948
949static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
950{
951 adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
952}
953
954static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
955{
956 switch (adev->asic_type) {
957 case CHIP_VEGA10:
958 soc15_program_register_sequence(adev,
959 golden_settings_gc_9_0,
960 ARRAY_SIZE(golden_settings_gc_9_0));
961 soc15_program_register_sequence(adev,
962 golden_settings_gc_9_0_vg10,
963 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
964 break;
965 case CHIP_VEGA12:
966 soc15_program_register_sequence(adev,
967 golden_settings_gc_9_2_1,
968 ARRAY_SIZE(golden_settings_gc_9_2_1));
969 soc15_program_register_sequence(adev,
970 golden_settings_gc_9_2_1_vg12,
971 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
972 break;
973 case CHIP_VEGA20:
974 soc15_program_register_sequence(adev,
975 golden_settings_gc_9_0,
976 ARRAY_SIZE(golden_settings_gc_9_0));
977 soc15_program_register_sequence(adev,
978 golden_settings_gc_9_0_vg20,
979 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
980 break;
981 case CHIP_ARCTURUS:
982 soc15_program_register_sequence(adev,
983 golden_settings_gc_9_4_1_arct,
984 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
985 break;
986 case CHIP_RAVEN:
987 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
988 ARRAY_SIZE(golden_settings_gc_9_1));
989 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
990 soc15_program_register_sequence(adev,
991 golden_settings_gc_9_1_rv2,
992 ARRAY_SIZE(golden_settings_gc_9_1_rv2));
993 else
994 soc15_program_register_sequence(adev,
995 golden_settings_gc_9_1_rv1,
996 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
997 break;
998 case CHIP_RENOIR:
999 soc15_program_register_sequence(adev,
1000 golden_settings_gc_9_1_rn,
1001 ARRAY_SIZE(golden_settings_gc_9_1_rn));
1002 return;
1003 case CHIP_ALDEBARAN:
1004 gfx_v9_4_2_init_golden_registers(adev,
1005 adev->smuio.funcs->get_die_id(adev));
1006 break;
1007 default:
1008 break;
1009 }
1010
1011 if ((adev->asic_type != CHIP_ARCTURUS) &&
1012 (adev->asic_type != CHIP_ALDEBARAN))
1013 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
1014 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
1015}
1016
1017static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
1018{
1019 adev->gfx.scratch.num_reg = 8;
1020 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1021 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
1022}
1023
1024static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1025 bool wc, uint32_t reg, uint32_t val)
1026{
1027 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1028 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1029 WRITE_DATA_DST_SEL(0) |
1030 (wc ? WR_CONFIRM : 0));
1031 amdgpu_ring_write(ring, reg);
1032 amdgpu_ring_write(ring, 0);
1033 amdgpu_ring_write(ring, val);
1034}
1035
1036static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1037 int mem_space, int opt, uint32_t addr0,
1038 uint32_t addr1, uint32_t ref, uint32_t mask,
1039 uint32_t inv)
1040{
1041 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1042 amdgpu_ring_write(ring,
1043
1044 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1045 WAIT_REG_MEM_OPERATION(opt) |
1046 WAIT_REG_MEM_FUNCTION(3) |
1047 WAIT_REG_MEM_ENGINE(eng_sel)));
1048
1049 if (mem_space)
1050 BUG_ON(addr0 & 0x3);
1051 amdgpu_ring_write(ring, addr0);
1052 amdgpu_ring_write(ring, addr1);
1053 amdgpu_ring_write(ring, ref);
1054 amdgpu_ring_write(ring, mask);
1055 amdgpu_ring_write(ring, inv);
1056}
1057
1058static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1059{
1060 struct amdgpu_device *adev = ring->adev;
1061 uint32_t scratch;
1062 uint32_t tmp = 0;
1063 unsigned i;
1064 int r;
1065
1066 r = amdgpu_gfx_scratch_get(adev, &scratch);
1067 if (r)
1068 return r;
1069
1070 WREG32(scratch, 0xCAFEDEAD);
1071 r = amdgpu_ring_alloc(ring, 3);
1072 if (r)
1073 goto error_free_scratch;
1074
1075 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1076 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1077 amdgpu_ring_write(ring, 0xDEADBEEF);
1078 amdgpu_ring_commit(ring);
1079
1080 for (i = 0; i < adev->usec_timeout; i++) {
1081 tmp = RREG32(scratch);
1082 if (tmp == 0xDEADBEEF)
1083 break;
1084 udelay(1);
1085 }
1086
1087 if (i >= adev->usec_timeout)
1088 r = -ETIMEDOUT;
1089
1090error_free_scratch:
1091 amdgpu_gfx_scratch_free(adev, scratch);
1092 return r;
1093}
1094
1095static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1096{
1097 struct amdgpu_device *adev = ring->adev;
1098 struct amdgpu_ib ib;
1099 struct dma_fence *f = NULL;
1100
1101 unsigned index;
1102 uint64_t gpu_addr;
1103 uint32_t tmp;
1104 long r;
1105
1106 r = amdgpu_device_wb_get(adev, &index);
1107 if (r)
1108 return r;
1109
1110 gpu_addr = adev->wb.gpu_addr + (index * 4);
1111 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1112 memset(&ib, 0, sizeof(ib));
1113 r = amdgpu_ib_get(adev, NULL, 16,
1114 AMDGPU_IB_POOL_DIRECT, &ib);
1115 if (r)
1116 goto err1;
1117
1118 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1119 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1120 ib.ptr[2] = lower_32_bits(gpu_addr);
1121 ib.ptr[3] = upper_32_bits(gpu_addr);
1122 ib.ptr[4] = 0xDEADBEEF;
1123 ib.length_dw = 5;
1124
1125 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1126 if (r)
1127 goto err2;
1128
1129 r = dma_fence_wait_timeout(f, false, timeout);
1130 if (r == 0) {
1131 r = -ETIMEDOUT;
1132 goto err2;
1133 } else if (r < 0) {
1134 goto err2;
1135 }
1136
1137 tmp = adev->wb.wb[index];
1138 if (tmp == 0xDEADBEEF)
1139 r = 0;
1140 else
1141 r = -EINVAL;
1142
1143err2:
1144 amdgpu_ib_free(adev, &ib, NULL);
1145 dma_fence_put(f);
1146err1:
1147 amdgpu_device_wb_free(adev, index);
1148 return r;
1149}
1150
1151
1152static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1153{
1154 release_firmware(adev->gfx.pfp_fw);
1155 adev->gfx.pfp_fw = NULL;
1156 release_firmware(adev->gfx.me_fw);
1157 adev->gfx.me_fw = NULL;
1158 release_firmware(adev->gfx.ce_fw);
1159 adev->gfx.ce_fw = NULL;
1160 release_firmware(adev->gfx.rlc_fw);
1161 adev->gfx.rlc_fw = NULL;
1162 release_firmware(adev->gfx.mec_fw);
1163 adev->gfx.mec_fw = NULL;
1164 release_firmware(adev->gfx.mec2_fw);
1165 adev->gfx.mec2_fw = NULL;
1166
1167 kfree(adev->gfx.rlc.register_list_format);
1168}
1169
1170static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1171{
1172 const struct rlc_firmware_header_v2_1 *rlc_hdr;
1173
1174 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1175 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1176 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1177 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1178 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1179 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1180 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1181 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1182 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1183 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1184 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1185 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1186 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1187 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1188 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1189}
1190
1191static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1192{
1193 adev->gfx.me_fw_write_wait = false;
1194 adev->gfx.mec_fw_write_wait = false;
1195
1196 if ((adev->asic_type != CHIP_ARCTURUS) &&
1197 ((adev->gfx.mec_fw_version < 0x000001a5) ||
1198 (adev->gfx.mec_feature_version < 46) ||
1199 (adev->gfx.pfp_fw_version < 0x000000b7) ||
1200 (adev->gfx.pfp_feature_version < 46)))
1201 DRM_WARN_ONCE("CP firmware version too old, please update!");
1202
1203 switch (adev->asic_type) {
1204 case CHIP_VEGA10:
1205 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1206 (adev->gfx.me_feature_version >= 42) &&
1207 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
1208 (adev->gfx.pfp_feature_version >= 42))
1209 adev->gfx.me_fw_write_wait = true;
1210
1211 if ((adev->gfx.mec_fw_version >= 0x00000193) &&
1212 (adev->gfx.mec_feature_version >= 42))
1213 adev->gfx.mec_fw_write_wait = true;
1214 break;
1215 case CHIP_VEGA12:
1216 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1217 (adev->gfx.me_feature_version >= 44) &&
1218 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
1219 (adev->gfx.pfp_feature_version >= 44))
1220 adev->gfx.me_fw_write_wait = true;
1221
1222 if ((adev->gfx.mec_fw_version >= 0x00000196) &&
1223 (adev->gfx.mec_feature_version >= 44))
1224 adev->gfx.mec_fw_write_wait = true;
1225 break;
1226 case CHIP_VEGA20:
1227 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1228 (adev->gfx.me_feature_version >= 44) &&
1229 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
1230 (adev->gfx.pfp_feature_version >= 44))
1231 adev->gfx.me_fw_write_wait = true;
1232
1233 if ((adev->gfx.mec_fw_version >= 0x00000197) &&
1234 (adev->gfx.mec_feature_version >= 44))
1235 adev->gfx.mec_fw_write_wait = true;
1236 break;
1237 case CHIP_RAVEN:
1238 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1239 (adev->gfx.me_feature_version >= 42) &&
1240 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
1241 (adev->gfx.pfp_feature_version >= 42))
1242 adev->gfx.me_fw_write_wait = true;
1243
1244 if ((adev->gfx.mec_fw_version >= 0x00000192) &&
1245 (adev->gfx.mec_feature_version >= 42))
1246 adev->gfx.mec_fw_write_wait = true;
1247 break;
1248 default:
1249 adev->gfx.me_fw_write_wait = true;
1250 adev->gfx.mec_fw_write_wait = true;
1251 break;
1252 }
1253}
1254
1255struct amdgpu_gfxoff_quirk {
1256 u16 chip_vendor;
1257 u16 chip_device;
1258 u16 subsys_vendor;
1259 u16 subsys_device;
1260 u8 revision;
1261};
1262
1263static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1264
1265 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1266
1267 { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1268
1269 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1270 { 0, 0, 0, 0, 0 },
1271};
1272
1273static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1274{
1275 const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1276
1277 while (p && p->chip_device != 0) {
1278 if (pdev->vendor == p->chip_vendor &&
1279 pdev->device == p->chip_device &&
1280 pdev->subsystem_vendor == p->subsys_vendor &&
1281 pdev->subsystem_device == p->subsys_device &&
1282 pdev->revision == p->revision) {
1283 return true;
1284 }
1285 ++p;
1286 }
1287 return false;
1288}
1289
1290static bool is_raven_kicker(struct amdgpu_device *adev)
1291{
1292 if (adev->pm.fw_version >= 0x41e2b)
1293 return true;
1294 else
1295 return false;
1296}
1297
1298static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
1299{
1300 if ((adev->asic_type == CHIP_RENOIR) &&
1301 (adev->gfx.me_fw_version >= 0x000000a5) &&
1302 (adev->gfx.me_feature_version >= 52))
1303 return true;
1304 else
1305 return false;
1306}
1307
1308static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1309{
1310 if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1311 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1312
1313 switch (adev->asic_type) {
1314 case CHIP_VEGA10:
1315 case CHIP_VEGA12:
1316 case CHIP_VEGA20:
1317 break;
1318 case CHIP_RAVEN:
1319 if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1320 (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1321 ((!is_raven_kicker(adev) &&
1322 adev->gfx.rlc_fw_version < 531) ||
1323 (adev->gfx.rlc_feature_version < 1) ||
1324 !adev->gfx.rlc.is_rlc_v2_1))
1325 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1326
1327 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1328 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1329 AMD_PG_SUPPORT_CP |
1330 AMD_PG_SUPPORT_RLC_SMU_HS;
1331 break;
1332 case CHIP_RENOIR:
1333 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1334 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1335 AMD_PG_SUPPORT_CP |
1336 AMD_PG_SUPPORT_RLC_SMU_HS;
1337 break;
1338 default:
1339 break;
1340 }
1341}
1342
1343static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1344 const char *chip_name)
1345{
1346 char fw_name[30];
1347 int err;
1348 struct amdgpu_firmware_info *info = NULL;
1349 const struct common_firmware_header *header = NULL;
1350 const struct gfx_firmware_header_v1_0 *cp_hdr;
1351
1352 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1353 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1354 if (err)
1355 goto out;
1356 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1357 if (err)
1358 goto out;
1359 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1360 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1361 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1362
1363 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1364 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1365 if (err)
1366 goto out;
1367 err = amdgpu_ucode_validate(adev->gfx.me_fw);
1368 if (err)
1369 goto out;
1370 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1371 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1372 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1373
1374 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1375 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1376 if (err)
1377 goto out;
1378 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1379 if (err)
1380 goto out;
1381 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1382 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1383 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1384
1385 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1386 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1387 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1388 info->fw = adev->gfx.pfp_fw;
1389 header = (const struct common_firmware_header *)info->fw->data;
1390 adev->firmware.fw_size +=
1391 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1392
1393 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1394 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1395 info->fw = adev->gfx.me_fw;
1396 header = (const struct common_firmware_header *)info->fw->data;
1397 adev->firmware.fw_size +=
1398 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1399
1400 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1401 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1402 info->fw = adev->gfx.ce_fw;
1403 header = (const struct common_firmware_header *)info->fw->data;
1404 adev->firmware.fw_size +=
1405 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1406 }
1407
1408out:
1409 if (err) {
1410 dev_err(adev->dev,
1411 "gfx9: Failed to load firmware \"%s\"\n",
1412 fw_name);
1413 release_firmware(adev->gfx.pfp_fw);
1414 adev->gfx.pfp_fw = NULL;
1415 release_firmware(adev->gfx.me_fw);
1416 adev->gfx.me_fw = NULL;
1417 release_firmware(adev->gfx.ce_fw);
1418 adev->gfx.ce_fw = NULL;
1419 }
1420 return err;
1421}
1422
1423static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1424 const char *chip_name)
1425{
1426 char fw_name[30];
1427 int err;
1428 struct amdgpu_firmware_info *info = NULL;
1429 const struct common_firmware_header *header = NULL;
1430 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1431 unsigned int *tmp = NULL;
1432 unsigned int i = 0;
1433 uint16_t version_major;
1434 uint16_t version_minor;
1435 uint32_t smu_version;
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 if (!strcmp(chip_name, "picasso") &&
1446 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1447 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1448 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1449 else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1450 (smu_version >= 0x41e2b))
1451
1452
1453
1454 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1455 else
1456 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1457 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1458 if (err)
1459 goto out;
1460 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1461 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1462
1463 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1464 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1465 if (version_major == 2 && version_minor == 1)
1466 adev->gfx.rlc.is_rlc_v2_1 = true;
1467
1468 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1469 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1470 adev->gfx.rlc.save_and_restore_offset =
1471 le32_to_cpu(rlc_hdr->save_and_restore_offset);
1472 adev->gfx.rlc.clear_state_descriptor_offset =
1473 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1474 adev->gfx.rlc.avail_scratch_ram_locations =
1475 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1476 adev->gfx.rlc.reg_restore_list_size =
1477 le32_to_cpu(rlc_hdr->reg_restore_list_size);
1478 adev->gfx.rlc.reg_list_format_start =
1479 le32_to_cpu(rlc_hdr->reg_list_format_start);
1480 adev->gfx.rlc.reg_list_format_separate_start =
1481 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1482 adev->gfx.rlc.starting_offsets_start =
1483 le32_to_cpu(rlc_hdr->starting_offsets_start);
1484 adev->gfx.rlc.reg_list_format_size_bytes =
1485 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1486 adev->gfx.rlc.reg_list_size_bytes =
1487 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1488 adev->gfx.rlc.register_list_format =
1489 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1490 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1491 if (!adev->gfx.rlc.register_list_format) {
1492 err = -ENOMEM;
1493 goto out;
1494 }
1495
1496 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1497 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1498 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1499 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1500
1501 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1502
1503 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1504 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1505 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1506 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1507
1508 if (adev->gfx.rlc.is_rlc_v2_1)
1509 gfx_v9_0_init_rlc_ext_microcode(adev);
1510
1511 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1512 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1513 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1514 info->fw = adev->gfx.rlc_fw;
1515 header = (const struct common_firmware_header *)info->fw->data;
1516 adev->firmware.fw_size +=
1517 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1518
1519 if (adev->gfx.rlc.is_rlc_v2_1 &&
1520 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1521 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1522 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1523 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1524 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1525 info->fw = adev->gfx.rlc_fw;
1526 adev->firmware.fw_size +=
1527 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1528
1529 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1530 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1531 info->fw = adev->gfx.rlc_fw;
1532 adev->firmware.fw_size +=
1533 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1534
1535 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1536 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1537 info->fw = adev->gfx.rlc_fw;
1538 adev->firmware.fw_size +=
1539 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1540 }
1541 }
1542
1543out:
1544 if (err) {
1545 dev_err(adev->dev,
1546 "gfx9: Failed to load firmware \"%s\"\n",
1547 fw_name);
1548 release_firmware(adev->gfx.rlc_fw);
1549 adev->gfx.rlc_fw = NULL;
1550 }
1551 return err;
1552}
1553
1554static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1555{
1556 if (adev->asic_type == CHIP_ALDEBARAN ||
1557 adev->asic_type == CHIP_ARCTURUS ||
1558 adev->asic_type == CHIP_RENOIR)
1559 return false;
1560
1561 return true;
1562}
1563
1564static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1565 const char *chip_name)
1566{
1567 char fw_name[30];
1568 int err;
1569 struct amdgpu_firmware_info *info = NULL;
1570 const struct common_firmware_header *header = NULL;
1571 const struct gfx_firmware_header_v1_0 *cp_hdr;
1572
1573 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1574 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1575 if (err)
1576 goto out;
1577 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1578 if (err)
1579 goto out;
1580 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1581 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1582 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1583
1584
1585 if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1586 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1587 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1588 if (!err) {
1589 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1590 if (err)
1591 goto out;
1592 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1593 adev->gfx.mec2_fw->data;
1594 adev->gfx.mec2_fw_version =
1595 le32_to_cpu(cp_hdr->header.ucode_version);
1596 adev->gfx.mec2_feature_version =
1597 le32_to_cpu(cp_hdr->ucode_feature_version);
1598 } else {
1599 err = 0;
1600 adev->gfx.mec2_fw = NULL;
1601 }
1602 } else {
1603 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
1604 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
1605 }
1606
1607 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1608 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1609 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1610 info->fw = adev->gfx.mec_fw;
1611 header = (const struct common_firmware_header *)info->fw->data;
1612 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1613 adev->firmware.fw_size +=
1614 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1615
1616 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1617 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1618 info->fw = adev->gfx.mec_fw;
1619 adev->firmware.fw_size +=
1620 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1621
1622 if (adev->gfx.mec2_fw) {
1623 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1624 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1625 info->fw = adev->gfx.mec2_fw;
1626 header = (const struct common_firmware_header *)info->fw->data;
1627 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1628 adev->firmware.fw_size +=
1629 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1630
1631
1632
1633 if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1634 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1635 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1636 info->fw = adev->gfx.mec2_fw;
1637 adev->firmware.fw_size +=
1638 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1639 PAGE_SIZE);
1640 }
1641 }
1642 }
1643
1644out:
1645 gfx_v9_0_check_if_need_gfxoff(adev);
1646 gfx_v9_0_check_fw_write_wait(adev);
1647 if (err) {
1648 dev_err(adev->dev,
1649 "gfx9: Failed to load firmware \"%s\"\n",
1650 fw_name);
1651 release_firmware(adev->gfx.mec_fw);
1652 adev->gfx.mec_fw = NULL;
1653 release_firmware(adev->gfx.mec2_fw);
1654 adev->gfx.mec2_fw = NULL;
1655 }
1656 return err;
1657}
1658
1659static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1660{
1661 const char *chip_name;
1662 int r;
1663
1664 DRM_DEBUG("\n");
1665
1666 switch (adev->asic_type) {
1667 case CHIP_VEGA10:
1668 chip_name = "vega10";
1669 break;
1670 case CHIP_VEGA12:
1671 chip_name = "vega12";
1672 break;
1673 case CHIP_VEGA20:
1674 chip_name = "vega20";
1675 break;
1676 case CHIP_RAVEN:
1677 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1678 chip_name = "raven2";
1679 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1680 chip_name = "picasso";
1681 else
1682 chip_name = "raven";
1683 break;
1684 case CHIP_ARCTURUS:
1685 chip_name = "arcturus";
1686 break;
1687 case CHIP_RENOIR:
1688 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1689 chip_name = "renoir";
1690 else
1691 chip_name = "green_sardine";
1692 break;
1693 case CHIP_ALDEBARAN:
1694 chip_name = "aldebaran";
1695 break;
1696 default:
1697 BUG();
1698 }
1699
1700
1701 if (adev->gfx.num_gfx_rings) {
1702 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1703 if (r)
1704 return r;
1705 }
1706
1707 r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1708 if (r)
1709 return r;
1710
1711 r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1712 if (r)
1713 return r;
1714
1715 return r;
1716}
1717
1718static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1719{
1720 u32 count = 0;
1721 const struct cs_section_def *sect = NULL;
1722 const struct cs_extent_def *ext = NULL;
1723
1724
1725 count += 2;
1726
1727 count += 3;
1728
1729 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1730 for (ext = sect->section; ext->extent != NULL; ++ext) {
1731 if (sect->id == SECT_CONTEXT)
1732 count += 2 + ext->reg_count;
1733 else
1734 return 0;
1735 }
1736 }
1737
1738
1739 count += 2;
1740
1741 count += 2;
1742
1743 return count;
1744}
1745
1746static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1747 volatile u32 *buffer)
1748{
1749 u32 count = 0, i;
1750 const struct cs_section_def *sect = NULL;
1751 const struct cs_extent_def *ext = NULL;
1752
1753 if (adev->gfx.rlc.cs_data == NULL)
1754 return;
1755 if (buffer == NULL)
1756 return;
1757
1758 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1759 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1760
1761 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1762 buffer[count++] = cpu_to_le32(0x80000000);
1763 buffer[count++] = cpu_to_le32(0x80000000);
1764
1765 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1766 for (ext = sect->section; ext->extent != NULL; ++ext) {
1767 if (sect->id == SECT_CONTEXT) {
1768 buffer[count++] =
1769 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1770 buffer[count++] = cpu_to_le32(ext->reg_index -
1771 PACKET3_SET_CONTEXT_REG_START);
1772 for (i = 0; i < ext->reg_count; i++)
1773 buffer[count++] = cpu_to_le32(ext->extent[i]);
1774 } else {
1775 return;
1776 }
1777 }
1778 }
1779
1780 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1781 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1782
1783 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1784 buffer[count++] = cpu_to_le32(0);
1785}
1786
1787static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1788{
1789 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1790 uint32_t pg_always_on_cu_num = 2;
1791 uint32_t always_on_cu_num;
1792 uint32_t i, j, k;
1793 uint32_t mask, cu_bitmap, counter;
1794
1795 if (adev->flags & AMD_IS_APU)
1796 always_on_cu_num = 4;
1797 else if (adev->asic_type == CHIP_VEGA12)
1798 always_on_cu_num = 8;
1799 else
1800 always_on_cu_num = 12;
1801
1802 mutex_lock(&adev->grbm_idx_mutex);
1803 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1804 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1805 mask = 1;
1806 cu_bitmap = 0;
1807 counter = 0;
1808 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1809
1810 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1811 if (cu_info->bitmap[i][j] & mask) {
1812 if (counter == pg_always_on_cu_num)
1813 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1814 if (counter < always_on_cu_num)
1815 cu_bitmap |= mask;
1816 else
1817 break;
1818 counter++;
1819 }
1820 mask <<= 1;
1821 }
1822
1823 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1824 cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1825 }
1826 }
1827 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1828 mutex_unlock(&adev->grbm_idx_mutex);
1829}
1830
1831static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1832{
1833 uint32_t data;
1834
1835
1836 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1837 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1838 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1839 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1840
1841
1842 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1843
1844
1845 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1846
1847 mutex_lock(&adev->grbm_idx_mutex);
1848
1849 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1850 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1851
1852
1853 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1854 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1855 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1856 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1857
1858
1859 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1860 data &= 0x0000FFFF;
1861 data |= 0x00C00000;
1862 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1863
1864
1865
1866
1867
1868
1869
1870
1871 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1872 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1873 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1874 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1875 mutex_unlock(&adev->grbm_idx_mutex);
1876
1877 gfx_v9_0_init_always_on_cu_mask(adev);
1878}
1879
1880static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1881{
1882 uint32_t data;
1883
1884
1885 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1886 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1887 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1888 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1889
1890
1891 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1892
1893
1894 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1895
1896 mutex_lock(&adev->grbm_idx_mutex);
1897
1898 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1899 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1900
1901
1902 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1903 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1904 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1905 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1906
1907
1908 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1909 data &= 0x0000FFFF;
1910 data |= 0x00C00000;
1911 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1912
1913
1914
1915
1916
1917
1918
1919
1920 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1921 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1922 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1923 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1924 mutex_unlock(&adev->grbm_idx_mutex);
1925
1926 gfx_v9_0_init_always_on_cu_mask(adev);
1927}
1928
1929static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1930{
1931 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1932}
1933
1934static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1935{
1936 if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1937 return 5;
1938 else
1939 return 4;
1940}
1941
1942static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1943{
1944 const struct cs_section_def *cs_data;
1945 int r;
1946
1947 adev->gfx.rlc.cs_data = gfx9_cs_data;
1948
1949 cs_data = adev->gfx.rlc.cs_data;
1950
1951 if (cs_data) {
1952
1953 r = amdgpu_gfx_rlc_init_csb(adev);
1954 if (r)
1955 return r;
1956 }
1957
1958 if (adev->flags & AMD_IS_APU) {
1959
1960 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024);
1961 r = amdgpu_gfx_rlc_init_cpt(adev);
1962 if (r)
1963 return r;
1964 }
1965
1966 switch (adev->asic_type) {
1967 case CHIP_RAVEN:
1968 gfx_v9_0_init_lbpw(adev);
1969 break;
1970 case CHIP_VEGA20:
1971 gfx_v9_4_init_lbpw(adev);
1972 break;
1973 default:
1974 break;
1975 }
1976
1977
1978 if (adev->gfx.rlc.funcs->update_spm_vmid)
1979 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1980
1981 return 0;
1982}
1983
1984static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1985{
1986 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1987 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1988}
1989
1990static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1991{
1992 int r;
1993 u32 *hpd;
1994 const __le32 *fw_data;
1995 unsigned fw_size;
1996 u32 *fw;
1997 size_t mec_hpd_size;
1998
1999 const struct gfx_firmware_header_v1_0 *mec_hdr;
2000
2001 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2002
2003
2004 amdgpu_gfx_compute_queue_acquire(adev);
2005 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
2006 if (mec_hpd_size) {
2007 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2008 AMDGPU_GEM_DOMAIN_VRAM,
2009 &adev->gfx.mec.hpd_eop_obj,
2010 &adev->gfx.mec.hpd_eop_gpu_addr,
2011 (void **)&hpd);
2012 if (r) {
2013 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
2014 gfx_v9_0_mec_fini(adev);
2015 return r;
2016 }
2017
2018 memset(hpd, 0, mec_hpd_size);
2019
2020 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2021 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2022 }
2023
2024 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2025
2026 fw_data = (const __le32 *)
2027 (adev->gfx.mec_fw->data +
2028 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2029 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
2030
2031 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
2032 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2033 &adev->gfx.mec.mec_fw_obj,
2034 &adev->gfx.mec.mec_fw_gpu_addr,
2035 (void **)&fw);
2036 if (r) {
2037 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
2038 gfx_v9_0_mec_fini(adev);
2039 return r;
2040 }
2041
2042 memcpy(fw, fw_data, fw_size);
2043
2044 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
2045 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
2046
2047 return 0;
2048}
2049
2050static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
2051{
2052 WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2053 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2054 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2055 (address << SQ_IND_INDEX__INDEX__SHIFT) |
2056 (SQ_IND_INDEX__FORCE_READ_MASK));
2057 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2058}
2059
2060static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
2061 uint32_t wave, uint32_t thread,
2062 uint32_t regno, uint32_t num, uint32_t *out)
2063{
2064 WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2065 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2066 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2067 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
2068 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2069 (SQ_IND_INDEX__FORCE_READ_MASK) |
2070 (SQ_IND_INDEX__AUTO_INCR_MASK));
2071 while (num--)
2072 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2073}
2074
2075static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2076{
2077
2078 dst[(*no_fields)++] = 1;
2079 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2080 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2081 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2082 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2083 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2084 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2085 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2086 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2087 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2088 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2089 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2090 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2091 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2092 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2093 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
2094}
2095
2096static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2097 uint32_t wave, uint32_t start,
2098 uint32_t size, uint32_t *dst)
2099{
2100 wave_read_regs(
2101 adev, simd, wave, 0,
2102 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2103}
2104
2105static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2106 uint32_t wave, uint32_t thread,
2107 uint32_t start, uint32_t size,
2108 uint32_t *dst)
2109{
2110 wave_read_regs(
2111 adev, simd, wave, thread,
2112 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2113}
2114
2115static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2116 u32 me, u32 pipe, u32 q, u32 vm)
2117{
2118 soc15_grbm_select(adev, me, pipe, q, vm);
2119}
2120
2121static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2122 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2123 .select_se_sh = &gfx_v9_0_select_se_sh,
2124 .read_wave_data = &gfx_v9_0_read_wave_data,
2125 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2126 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2127 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2128};
2129
2130static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
2131 .ras_late_init = amdgpu_gfx_ras_late_init,
2132 .ras_fini = amdgpu_gfx_ras_fini,
2133 .ras_error_inject = &gfx_v9_0_ras_error_inject,
2134 .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2135 .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2136};
2137
2138static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2139{
2140 u32 gb_addr_config;
2141 int err;
2142
2143 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2144
2145 switch (adev->asic_type) {
2146 case CHIP_VEGA10:
2147 adev->gfx.config.max_hw_contexts = 8;
2148 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2149 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2150 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2151 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2152 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2153 break;
2154 case CHIP_VEGA12:
2155 adev->gfx.config.max_hw_contexts = 8;
2156 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2157 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2158 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2159 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2160 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2161 DRM_INFO("fix gfx.config for vega12\n");
2162 break;
2163 case CHIP_VEGA20:
2164 adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
2165 adev->gfx.config.max_hw_contexts = 8;
2166 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2167 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2168 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2169 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2170 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2171 gb_addr_config &= ~0xf3e777ff;
2172 gb_addr_config |= 0x22014042;
2173
2174 err = amdgpu_atomfirmware_get_gfx_info(adev);
2175 if (err)
2176 return err;
2177 break;
2178 case CHIP_RAVEN:
2179 adev->gfx.config.max_hw_contexts = 8;
2180 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2181 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2182 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2183 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2184 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2185 gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2186 else
2187 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2188 break;
2189 case CHIP_ARCTURUS:
2190 adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
2191 adev->gfx.config.max_hw_contexts = 8;
2192 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2193 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2194 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2195 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2196 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2197 gb_addr_config &= ~0xf3e777ff;
2198 gb_addr_config |= 0x22014042;
2199 break;
2200 case CHIP_RENOIR:
2201 adev->gfx.config.max_hw_contexts = 8;
2202 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2203 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2204 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2205 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2206 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2207 gb_addr_config &= ~0xf3e777ff;
2208 gb_addr_config |= 0x22010042;
2209 break;
2210 case CHIP_ALDEBARAN:
2211 adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
2212 adev->gfx.config.max_hw_contexts = 8;
2213 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2214 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2215 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2216 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2217 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2218 gb_addr_config &= ~0xf3e777ff;
2219 gb_addr_config |= 0x22014042;
2220
2221 err = amdgpu_atomfirmware_get_gfx_info(adev);
2222 if (err)
2223 return err;
2224 break;
2225 default:
2226 BUG();
2227 break;
2228 }
2229
2230 adev->gfx.config.gb_addr_config = gb_addr_config;
2231
2232 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2233 REG_GET_FIELD(
2234 adev->gfx.config.gb_addr_config,
2235 GB_ADDR_CONFIG,
2236 NUM_PIPES);
2237
2238 adev->gfx.config.max_tile_pipes =
2239 adev->gfx.config.gb_addr_config_fields.num_pipes;
2240
2241 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2242 REG_GET_FIELD(
2243 adev->gfx.config.gb_addr_config,
2244 GB_ADDR_CONFIG,
2245 NUM_BANKS);
2246 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2247 REG_GET_FIELD(
2248 adev->gfx.config.gb_addr_config,
2249 GB_ADDR_CONFIG,
2250 MAX_COMPRESSED_FRAGS);
2251 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2252 REG_GET_FIELD(
2253 adev->gfx.config.gb_addr_config,
2254 GB_ADDR_CONFIG,
2255 NUM_RB_PER_SE);
2256 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2257 REG_GET_FIELD(
2258 adev->gfx.config.gb_addr_config,
2259 GB_ADDR_CONFIG,
2260 NUM_SHADER_ENGINES);
2261 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2262 REG_GET_FIELD(
2263 adev->gfx.config.gb_addr_config,
2264 GB_ADDR_CONFIG,
2265 PIPE_INTERLEAVE_SIZE));
2266
2267 return 0;
2268}
2269
2270static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2271 int mec, int pipe, int queue)
2272{
2273 unsigned irq_type;
2274 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2275 unsigned int hw_prio;
2276
2277 ring = &adev->gfx.compute_ring[ring_id];
2278
2279
2280 ring->me = mec + 1;
2281 ring->pipe = pipe;
2282 ring->queue = queue;
2283
2284 ring->ring_obj = NULL;
2285 ring->use_doorbell = true;
2286 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2287 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2288 + (ring_id * GFX9_MEC_HPD_SIZE);
2289 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2290
2291 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2292 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2293 + ring->pipe;
2294 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2295 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
2296
2297 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
2298 hw_prio, NULL);
2299}
2300
2301static int gfx_v9_0_sw_init(void *handle)
2302{
2303 int i, j, k, r, ring_id;
2304 struct amdgpu_ring *ring;
2305 struct amdgpu_kiq *kiq;
2306 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2307
2308 switch (adev->asic_type) {
2309 case CHIP_VEGA10:
2310 case CHIP_VEGA12:
2311 case CHIP_VEGA20:
2312 case CHIP_RAVEN:
2313 case CHIP_ARCTURUS:
2314 case CHIP_RENOIR:
2315 case CHIP_ALDEBARAN:
2316 adev->gfx.mec.num_mec = 2;
2317 break;
2318 default:
2319 adev->gfx.mec.num_mec = 1;
2320 break;
2321 }
2322
2323 adev->gfx.mec.num_pipe_per_mec = 4;
2324 adev->gfx.mec.num_queue_per_pipe = 8;
2325
2326
2327 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2328 if (r)
2329 return r;
2330
2331
2332 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2333 &adev->gfx.priv_reg_irq);
2334 if (r)
2335 return r;
2336
2337
2338 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2339 &adev->gfx.priv_inst_irq);
2340 if (r)
2341 return r;
2342
2343
2344 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2345 &adev->gfx.cp_ecc_error_irq);
2346 if (r)
2347 return r;
2348
2349
2350 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2351 &adev->gfx.cp_ecc_error_irq);
2352 if (r)
2353 return r;
2354
2355 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2356
2357 gfx_v9_0_scratch_init(adev);
2358
2359 r = gfx_v9_0_init_microcode(adev);
2360 if (r) {
2361 DRM_ERROR("Failed to load gfx firmware!\n");
2362 return r;
2363 }
2364
2365 r = adev->gfx.rlc.funcs->init(adev);
2366 if (r) {
2367 DRM_ERROR("Failed to init rlc BOs!\n");
2368 return r;
2369 }
2370
2371 r = gfx_v9_0_mec_init(adev);
2372 if (r) {
2373 DRM_ERROR("Failed to init MEC BOs!\n");
2374 return r;
2375 }
2376
2377
2378 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2379 ring = &adev->gfx.gfx_ring[i];
2380 ring->ring_obj = NULL;
2381 if (!i)
2382 sprintf(ring->name, "gfx");
2383 else
2384 sprintf(ring->name, "gfx_%d", i);
2385 ring->use_doorbell = true;
2386 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2387 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2388 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2389 AMDGPU_RING_PRIO_DEFAULT, NULL);
2390 if (r)
2391 return r;
2392 }
2393
2394
2395 ring_id = 0;
2396 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2397 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2398 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2399 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2400 continue;
2401
2402 r = gfx_v9_0_compute_ring_init(adev,
2403 ring_id,
2404 i, k, j);
2405 if (r)
2406 return r;
2407
2408 ring_id++;
2409 }
2410 }
2411 }
2412
2413 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2414 if (r) {
2415 DRM_ERROR("Failed to init KIQ BOs!\n");
2416 return r;
2417 }
2418
2419 kiq = &adev->gfx.kiq;
2420 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2421 if (r)
2422 return r;
2423
2424
2425 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2426 if (r)
2427 return r;
2428
2429 adev->gfx.ce_ram_size = 0x8000;
2430
2431 r = gfx_v9_0_gpu_early_init(adev);
2432 if (r)
2433 return r;
2434
2435 return 0;
2436}
2437
2438
2439static int gfx_v9_0_sw_fini(void *handle)
2440{
2441 int i;
2442 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2443
2444 if (adev->gfx.ras_funcs &&
2445 adev->gfx.ras_funcs->ras_fini)
2446 adev->gfx.ras_funcs->ras_fini(adev);
2447
2448 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2449 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2450 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2451 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2452
2453 amdgpu_gfx_mqd_sw_fini(adev);
2454 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2455 amdgpu_gfx_kiq_fini(adev);
2456
2457 gfx_v9_0_mec_fini(adev);
2458 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2459 if (adev->flags & AMD_IS_APU) {
2460 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2461 &adev->gfx.rlc.cp_table_gpu_addr,
2462 (void **)&adev->gfx.rlc.cp_table_ptr);
2463 }
2464 gfx_v9_0_free_microcode(adev);
2465
2466 return 0;
2467}
2468
2469
2470static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2471{
2472
2473}
2474
2475void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2476 u32 instance)
2477{
2478 u32 data;
2479
2480 if (instance == 0xffffffff)
2481 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2482 else
2483 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2484
2485 if (se_num == 0xffffffff)
2486 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2487 else
2488 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2489
2490 if (sh_num == 0xffffffff)
2491 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2492 else
2493 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2494
2495 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2496}
2497
2498static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2499{
2500 u32 data, mask;
2501
2502 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2503 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2504
2505 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2506 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2507
2508 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2509 adev->gfx.config.max_sh_per_se);
2510
2511 return (~data) & mask;
2512}
2513
2514static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2515{
2516 int i, j;
2517 u32 data;
2518 u32 active_rbs = 0;
2519 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2520 adev->gfx.config.max_sh_per_se;
2521
2522 mutex_lock(&adev->grbm_idx_mutex);
2523 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2524 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2525 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2526 data = gfx_v9_0_get_rb_active_bitmap(adev);
2527 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2528 rb_bitmap_width_per_sh);
2529 }
2530 }
2531 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2532 mutex_unlock(&adev->grbm_idx_mutex);
2533
2534 adev->gfx.config.backend_enable_mask = active_rbs;
2535 adev->gfx.config.num_rbs = hweight32(active_rbs);
2536}
2537
2538#define DEFAULT_SH_MEM_BASES (0x6000)
2539static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2540{
2541 int i;
2542 uint32_t sh_mem_config;
2543 uint32_t sh_mem_bases;
2544
2545
2546
2547
2548
2549
2550
2551 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2552
2553 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2554 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2555 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2556
2557 mutex_lock(&adev->srbm_mutex);
2558 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2559 soc15_grbm_select(adev, 0, 0, 0, i);
2560
2561 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2562 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2563 }
2564 soc15_grbm_select(adev, 0, 0, 0, 0);
2565 mutex_unlock(&adev->srbm_mutex);
2566
2567
2568
2569 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2570 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2571 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2572 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2573 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2574 }
2575}
2576
2577static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2578{
2579 int vmid;
2580
2581
2582
2583
2584
2585
2586
2587 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2588 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2589 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2590 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2591 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2592 }
2593}
2594
2595static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2596{
2597 uint32_t tmp;
2598
2599 switch (adev->asic_type) {
2600 case CHIP_ARCTURUS:
2601 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2602 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2603 DISABLE_BARRIER_WAITCNT, 1);
2604 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2605 break;
2606 default:
2607 break;
2608 }
2609}
2610
2611static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2612{
2613 u32 tmp;
2614 int i;
2615
2616 WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2617
2618 gfx_v9_0_tiling_mode_table_init(adev);
2619
2620 gfx_v9_0_setup_rb(adev);
2621 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2622 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2623
2624
2625
2626 mutex_lock(&adev->srbm_mutex);
2627 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2628 soc15_grbm_select(adev, 0, 0, 0, i);
2629
2630 if (i == 0) {
2631 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2632 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2633 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2634 !!adev->gmc.noretry);
2635 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2636 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2637 } else {
2638 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2639 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2640 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2641 !!adev->gmc.noretry);
2642 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2643 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2644 (adev->gmc.private_aperture_start >> 48));
2645 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2646 (adev->gmc.shared_aperture_start >> 48));
2647 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2648 }
2649 }
2650 soc15_grbm_select(adev, 0, 0, 0, 0);
2651
2652 mutex_unlock(&adev->srbm_mutex);
2653
2654 gfx_v9_0_init_compute_vmid(adev);
2655 gfx_v9_0_init_gds_vmid(adev);
2656 gfx_v9_0_init_sq_config(adev);
2657}
2658
2659static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2660{
2661 u32 i, j, k;
2662 u32 mask;
2663
2664 mutex_lock(&adev->grbm_idx_mutex);
2665 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2666 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2667 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2668 for (k = 0; k < adev->usec_timeout; k++) {
2669 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2670 break;
2671 udelay(1);
2672 }
2673 if (k == adev->usec_timeout) {
2674 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2675 0xffffffff, 0xffffffff);
2676 mutex_unlock(&adev->grbm_idx_mutex);
2677 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2678 i, j);
2679 return;
2680 }
2681 }
2682 }
2683 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2684 mutex_unlock(&adev->grbm_idx_mutex);
2685
2686 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2687 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2688 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2689 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2690 for (k = 0; k < adev->usec_timeout; k++) {
2691 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2692 break;
2693 udelay(1);
2694 }
2695}
2696
2697static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2698 bool enable)
2699{
2700 u32 tmp;
2701
2702
2703
2704 tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2705
2706 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2707 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2708 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2709 if(adev->gfx.num_gfx_rings)
2710 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2711
2712 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2713}
2714
2715static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2716{
2717 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2718
2719 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2720 adev->gfx.rlc.clear_state_gpu_addr >> 32);
2721 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2722 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2723 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2724 adev->gfx.rlc.clear_state_size);
2725}
2726
2727static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2728 int indirect_offset,
2729 int list_size,
2730 int *unique_indirect_regs,
2731 int unique_indirect_reg_count,
2732 int *indirect_start_offsets,
2733 int *indirect_start_offsets_count,
2734 int max_start_offsets_count)
2735{
2736 int idx;
2737
2738 for (; indirect_offset < list_size; indirect_offset++) {
2739 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2740 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2741 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2742
2743 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2744 indirect_offset += 2;
2745
2746
2747 for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2748 if (unique_indirect_regs[idx] ==
2749 register_list_format[indirect_offset] ||
2750 !unique_indirect_regs[idx])
2751 break;
2752 }
2753
2754 BUG_ON(idx >= unique_indirect_reg_count);
2755
2756 if (!unique_indirect_regs[idx])
2757 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2758
2759 indirect_offset++;
2760 }
2761 }
2762}
2763
2764static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2765{
2766 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2767 int unique_indirect_reg_count = 0;
2768
2769 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2770 int indirect_start_offsets_count = 0;
2771
2772 int list_size = 0;
2773 int i = 0, j = 0;
2774 u32 tmp = 0;
2775
2776 u32 *register_list_format =
2777 kmemdup(adev->gfx.rlc.register_list_format,
2778 adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2779 if (!register_list_format)
2780 return -ENOMEM;
2781
2782
2783 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2784 gfx_v9_1_parse_ind_reg_list(register_list_format,
2785 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2786 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2787 unique_indirect_regs,
2788 unique_indirect_reg_count,
2789 indirect_start_offsets,
2790 &indirect_start_offsets_count,
2791 ARRAY_SIZE(indirect_start_offsets));
2792
2793
2794 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2795 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2796 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2797
2798
2799 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2800 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2801 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2802 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2803 adev->gfx.rlc.register_restore[i]);
2804
2805
2806 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2807 adev->gfx.rlc.reg_list_format_start);
2808
2809
2810 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2811 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2812 register_list_format[i]);
2813
2814
2815 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2816 if (register_list_format[i] == 0xFFFFFFFF) {
2817 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2818 continue;
2819 }
2820
2821 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2822 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2823
2824 for (j = 0; j < unique_indirect_reg_count; j++) {
2825 if (register_list_format[i] == unique_indirect_regs[j]) {
2826 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2827 break;
2828 }
2829 }
2830
2831 BUG_ON(j >= unique_indirect_reg_count);
2832
2833 i++;
2834 }
2835
2836
2837 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2838 list_size = list_size >> 1;
2839 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2840 adev->gfx.rlc.reg_restore_list_size);
2841 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2842
2843
2844 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2845 adev->gfx.rlc.starting_offsets_start);
2846 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2847 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2848 indirect_start_offsets[i]);
2849
2850
2851 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2852 if (unique_indirect_regs[i] != 0) {
2853 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2854 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2855 unique_indirect_regs[i] & 0x3FFFF);
2856
2857 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2858 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2859 unique_indirect_regs[i] >> 20);
2860 }
2861 }
2862
2863 kfree(register_list_format);
2864 return 0;
2865}
2866
2867static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2868{
2869 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2870}
2871
2872static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2873 bool enable)
2874{
2875 uint32_t data = 0;
2876 uint32_t default_data = 0;
2877
2878 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2879 if (enable) {
2880
2881 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2882 if(default_data != data)
2883 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2884
2885
2886 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2887 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2888 if(default_data != data)
2889 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2890 } else {
2891
2892 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2893 if(default_data != data)
2894 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2895 }
2896}
2897
2898static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2899{
2900 uint32_t data = 0;
2901
2902 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2903 AMD_PG_SUPPORT_GFX_SMG |
2904 AMD_PG_SUPPORT_GFX_DMG)) {
2905
2906 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2907 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2908 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2909 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2910
2911
2912 data = 0;
2913 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2914 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2915 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2916 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2917 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2918
2919 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2920 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2921 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2922 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2923
2924 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2925 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2926 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2927 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2928
2929 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2930 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2931
2932
2933 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2934 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2935 if (adev->asic_type != CHIP_RENOIR)
2936 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2937 }
2938}
2939
2940static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2941 bool enable)
2942{
2943 uint32_t data = 0;
2944 uint32_t default_data = 0;
2945
2946 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2947 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2948 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2949 enable ? 1 : 0);
2950 if (default_data != data)
2951 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2952}
2953
2954static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2955 bool enable)
2956{
2957 uint32_t data = 0;
2958 uint32_t default_data = 0;
2959
2960 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2961 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2962 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2963 enable ? 1 : 0);
2964 if(default_data != data)
2965 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2966}
2967
2968static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2969 bool enable)
2970{
2971 uint32_t data = 0;
2972 uint32_t default_data = 0;
2973
2974 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2975 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2976 CP_PG_DISABLE,
2977 enable ? 0 : 1);
2978 if(default_data != data)
2979 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2980}
2981
2982static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2983 bool enable)
2984{
2985 uint32_t data, default_data;
2986
2987 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2988 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2989 GFX_POWER_GATING_ENABLE,
2990 enable ? 1 : 0);
2991 if(default_data != data)
2992 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2993}
2994
2995static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2996 bool enable)
2997{
2998 uint32_t data, default_data;
2999
3000 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3001 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3002 GFX_PIPELINE_PG_ENABLE,
3003 enable ? 1 : 0);
3004 if(default_data != data)
3005 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3006
3007 if (!enable)
3008
3009 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
3010}
3011
3012static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
3013 bool enable)
3014{
3015 uint32_t data, default_data;
3016
3017 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3018 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3019 STATIC_PER_CU_PG_ENABLE,
3020 enable ? 1 : 0);
3021 if(default_data != data)
3022 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3023}
3024
3025static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3026 bool enable)
3027{
3028 uint32_t data, default_data;
3029
3030 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3031 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3032 DYN_PER_CU_PG_ENABLE,
3033 enable ? 1 : 0);
3034 if(default_data != data)
3035 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3036}
3037
3038static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3039{
3040 gfx_v9_0_init_csb(adev);
3041
3042
3043
3044
3045
3046 if (adev->gfx.rlc.is_rlc_v2_1) {
3047 if (adev->asic_type == CHIP_VEGA12 ||
3048 (adev->apu_flags & AMD_APU_IS_RAVEN2))
3049 gfx_v9_1_init_rlc_save_restore_list(adev);
3050 gfx_v9_0_enable_save_restore_machine(adev);
3051 }
3052
3053 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3054 AMD_PG_SUPPORT_GFX_SMG |
3055 AMD_PG_SUPPORT_GFX_DMG |
3056 AMD_PG_SUPPORT_CP |
3057 AMD_PG_SUPPORT_GDS |
3058 AMD_PG_SUPPORT_RLC_SMU_HS)) {
3059 WREG32(mmRLC_JUMP_TABLE_RESTORE,
3060 adev->gfx.rlc.cp_table_gpu_addr >> 8);
3061 gfx_v9_0_init_gfx_power_gating(adev);
3062 }
3063}
3064
3065static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3066{
3067 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3068 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3069 gfx_v9_0_wait_for_rlc_serdes(adev);
3070}
3071
3072static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3073{
3074 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3075 udelay(50);
3076 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3077 udelay(50);
3078}
3079
3080static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3081{
3082#ifdef AMDGPU_RLC_DEBUG_RETRY
3083 u32 rlc_ucode_ver;
3084#endif
3085
3086 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3087 udelay(50);
3088
3089
3090 if (!(adev->flags & AMD_IS_APU)) {
3091 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3092 udelay(50);
3093 }
3094
3095#ifdef AMDGPU_RLC_DEBUG_RETRY
3096
3097 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3098 if(rlc_ucode_ver == 0x108) {
3099 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3100 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3101
3102
3103 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3104
3105
3106
3107 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3108 }
3109#endif
3110}
3111
3112static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3113{
3114 const struct rlc_firmware_header_v2_0 *hdr;
3115 const __le32 *fw_data;
3116 unsigned i, fw_size;
3117
3118 if (!adev->gfx.rlc_fw)
3119 return -EINVAL;
3120
3121 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3122 amdgpu_ucode_print_rlc_hdr(&hdr->header);
3123
3124 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3125 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3126 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3127
3128 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3129 RLCG_UCODE_LOADING_START_ADDRESS);
3130 for (i = 0; i < fw_size; i++)
3131 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3132 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3133
3134 return 0;
3135}
3136
3137static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3138{
3139 int r;
3140
3141 if (amdgpu_sriov_vf(adev)) {
3142 gfx_v9_0_init_csb(adev);
3143 return 0;
3144 }
3145
3146 adev->gfx.rlc.funcs->stop(adev);
3147
3148
3149 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3150
3151 gfx_v9_0_init_pg(adev);
3152
3153 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3154
3155 r = gfx_v9_0_rlc_load_microcode(adev);
3156 if (r)
3157 return r;
3158 }
3159
3160 switch (adev->asic_type) {
3161 case CHIP_RAVEN:
3162 if (amdgpu_lbpw == 0)
3163 gfx_v9_0_enable_lbpw(adev, false);
3164 else
3165 gfx_v9_0_enable_lbpw(adev, true);
3166 break;
3167 case CHIP_VEGA20:
3168 if (amdgpu_lbpw > 0)
3169 gfx_v9_0_enable_lbpw(adev, true);
3170 else
3171 gfx_v9_0_enable_lbpw(adev, false);
3172 break;
3173 default:
3174 break;
3175 }
3176
3177 adev->gfx.rlc.funcs->start(adev);
3178
3179 return 0;
3180}
3181
3182static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3183{
3184 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3185
3186 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3187 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3188 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3189 WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3190 udelay(50);
3191}
3192
3193static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3194{
3195 const struct gfx_firmware_header_v1_0 *pfp_hdr;
3196 const struct gfx_firmware_header_v1_0 *ce_hdr;
3197 const struct gfx_firmware_header_v1_0 *me_hdr;
3198 const __le32 *fw_data;
3199 unsigned i, fw_size;
3200
3201 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3202 return -EINVAL;
3203
3204 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3205 adev->gfx.pfp_fw->data;
3206 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3207 adev->gfx.ce_fw->data;
3208 me_hdr = (const struct gfx_firmware_header_v1_0 *)
3209 adev->gfx.me_fw->data;
3210
3211 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3212 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3213 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3214
3215 gfx_v9_0_cp_gfx_enable(adev, false);
3216
3217
3218 fw_data = (const __le32 *)
3219 (adev->gfx.pfp_fw->data +
3220 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3221 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3222 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3223 for (i = 0; i < fw_size; i++)
3224 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3225 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3226
3227
3228 fw_data = (const __le32 *)
3229 (adev->gfx.ce_fw->data +
3230 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3231 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3232 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3233 for (i = 0; i < fw_size; i++)
3234 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3235 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3236
3237
3238 fw_data = (const __le32 *)
3239 (adev->gfx.me_fw->data +
3240 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3241 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3242 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3243 for (i = 0; i < fw_size; i++)
3244 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3245 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3246
3247 return 0;
3248}
3249
3250static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3251{
3252 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3253 const struct cs_section_def *sect = NULL;
3254 const struct cs_extent_def *ext = NULL;
3255 int r, i, tmp;
3256
3257
3258 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3259 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3260
3261 gfx_v9_0_cp_gfx_enable(adev, true);
3262
3263 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3264 if (r) {
3265 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3266 return r;
3267 }
3268
3269 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3270 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3271
3272 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3273 amdgpu_ring_write(ring, 0x80000000);
3274 amdgpu_ring_write(ring, 0x80000000);
3275
3276 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3277 for (ext = sect->section; ext->extent != NULL; ++ext) {
3278 if (sect->id == SECT_CONTEXT) {
3279 amdgpu_ring_write(ring,
3280 PACKET3(PACKET3_SET_CONTEXT_REG,
3281 ext->reg_count));
3282 amdgpu_ring_write(ring,
3283 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3284 for (i = 0; i < ext->reg_count; i++)
3285 amdgpu_ring_write(ring, ext->extent[i]);
3286 }
3287 }
3288 }
3289
3290 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3291 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3292
3293 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3294 amdgpu_ring_write(ring, 0);
3295
3296 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3297 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3298 amdgpu_ring_write(ring, 0x8000);
3299 amdgpu_ring_write(ring, 0x8000);
3300
3301 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3302 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3303 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3304 amdgpu_ring_write(ring, tmp);
3305 amdgpu_ring_write(ring, 0);
3306
3307 amdgpu_ring_commit(ring);
3308
3309 return 0;
3310}
3311
3312static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3313{
3314 struct amdgpu_ring *ring;
3315 u32 tmp;
3316 u32 rb_bufsz;
3317 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3318
3319
3320 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3321
3322
3323 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3324
3325
3326 ring = &adev->gfx.gfx_ring[0];
3327 rb_bufsz = order_base_2(ring->ring_size / 8);
3328 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3329 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3330#ifdef __BIG_ENDIAN
3331 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3332#endif
3333 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3334
3335
3336 ring->wptr = 0;
3337 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3338 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3339
3340
3341 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3342 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3343 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3344
3345 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3346 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3347 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3348
3349 mdelay(1);
3350 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3351
3352 rb_addr = ring->gpu_addr >> 8;
3353 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3354 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3355
3356 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3357 if (ring->use_doorbell) {
3358 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3359 DOORBELL_OFFSET, ring->doorbell_index);
3360 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3361 DOORBELL_EN, 1);
3362 } else {
3363 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3364 }
3365 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3366
3367 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3368 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3369 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3370
3371 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3372 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3373
3374
3375
3376 gfx_v9_0_cp_gfx_start(adev);
3377 ring->sched.ready = true;
3378
3379 return 0;
3380}
3381
3382static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3383{
3384 if (enable) {
3385 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3386 } else {
3387 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3388 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3389 adev->gfx.kiq.ring.sched.ready = false;
3390 }
3391 udelay(50);
3392}
3393
3394static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3395{
3396 const struct gfx_firmware_header_v1_0 *mec_hdr;
3397 const __le32 *fw_data;
3398 unsigned i;
3399 u32 tmp;
3400
3401 if (!adev->gfx.mec_fw)
3402 return -EINVAL;
3403
3404 gfx_v9_0_cp_compute_enable(adev, false);
3405
3406 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3407 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3408
3409 fw_data = (const __le32 *)
3410 (adev->gfx.mec_fw->data +
3411 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3412 tmp = 0;
3413 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3414 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3415 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3416
3417 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3418 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3419 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3420 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3421
3422
3423 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3424 mec_hdr->jt_offset);
3425 for (i = 0; i < mec_hdr->jt_size; i++)
3426 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3427 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3428
3429 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3430 adev->gfx.mec_fw_version);
3431
3432
3433 return 0;
3434}
3435
3436
3437static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3438{
3439 uint32_t tmp;
3440 struct amdgpu_device *adev = ring->adev;
3441
3442
3443 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3444 tmp &= 0xffffff00;
3445 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3446 WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3447 tmp |= 0x80;
3448 WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3449}
3450
3451static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3452{
3453 struct amdgpu_device *adev = ring->adev;
3454
3455 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3456 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3457 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3458 mqd->cp_hqd_queue_priority =
3459 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3460 }
3461 }
3462}
3463
3464static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3465{
3466 struct amdgpu_device *adev = ring->adev;
3467 struct v9_mqd *mqd = ring->mqd_ptr;
3468 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3469 uint32_t tmp;
3470
3471 mqd->header = 0xC0310800;
3472 mqd->compute_pipelinestat_enable = 0x00000001;
3473 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3474 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3475 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3476 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3477 mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3478 mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3479 mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3480 mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3481 mqd->compute_misc_reserved = 0x00000003;
3482
3483 mqd->dynamic_cu_mask_addr_lo =
3484 lower_32_bits(ring->mqd_gpu_addr
3485 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3486 mqd->dynamic_cu_mask_addr_hi =
3487 upper_32_bits(ring->mqd_gpu_addr
3488 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3489
3490 eop_base_addr = ring->eop_gpu_addr >> 8;
3491 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3492 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3493
3494
3495 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3496 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3497 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3498
3499 mqd->cp_hqd_eop_control = tmp;
3500
3501
3502 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3503
3504 if (ring->use_doorbell) {
3505 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3506 DOORBELL_OFFSET, ring->doorbell_index);
3507 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3508 DOORBELL_EN, 1);
3509 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3510 DOORBELL_SOURCE, 0);
3511 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3512 DOORBELL_HIT, 0);
3513 } else {
3514 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3515 DOORBELL_EN, 0);
3516 }
3517
3518 mqd->cp_hqd_pq_doorbell_control = tmp;
3519
3520
3521 ring->wptr = 0;
3522 mqd->cp_hqd_dequeue_request = 0;
3523 mqd->cp_hqd_pq_rptr = 0;
3524 mqd->cp_hqd_pq_wptr_lo = 0;
3525 mqd->cp_hqd_pq_wptr_hi = 0;
3526
3527
3528 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3529 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3530
3531
3532 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3533 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3534 mqd->cp_mqd_control = tmp;
3535
3536
3537 hqd_gpu_addr = ring->gpu_addr >> 8;
3538 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3539 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3540
3541
3542 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3543 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3544 (order_base_2(ring->ring_size / 4) - 1));
3545 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3546 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3547#ifdef __BIG_ENDIAN
3548 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3549#endif
3550 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3551 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3552 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3553 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3554 mqd->cp_hqd_pq_control = tmp;
3555
3556
3557 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3558 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3559 mqd->cp_hqd_pq_rptr_report_addr_hi =
3560 upper_32_bits(wb_gpu_addr) & 0xffff;
3561
3562
3563 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3564 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3565 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3566
3567 tmp = 0;
3568
3569 if (ring->use_doorbell) {
3570 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3571 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3572 DOORBELL_OFFSET, ring->doorbell_index);
3573
3574 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3575 DOORBELL_EN, 1);
3576 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3577 DOORBELL_SOURCE, 0);
3578 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3579 DOORBELL_HIT, 0);
3580 }
3581
3582 mqd->cp_hqd_pq_doorbell_control = tmp;
3583
3584
3585 ring->wptr = 0;
3586 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3587
3588
3589 mqd->cp_hqd_vmid = 0;
3590
3591 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3592 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3593 mqd->cp_hqd_persistent_state = tmp;
3594
3595
3596 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3597 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3598 mqd->cp_hqd_ib_control = tmp;
3599
3600
3601 gfx_v9_0_mqd_set_priority(ring, mqd);
3602 mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
3603
3604
3605
3606
3607 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3608 mqd->cp_hqd_active = 1;
3609
3610 return 0;
3611}
3612
3613static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3614{
3615 struct amdgpu_device *adev = ring->adev;
3616 struct v9_mqd *mqd = ring->mqd_ptr;
3617 int j;
3618
3619
3620 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3621
3622 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3623 mqd->cp_hqd_eop_base_addr_lo);
3624 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3625 mqd->cp_hqd_eop_base_addr_hi);
3626
3627
3628 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3629 mqd->cp_hqd_eop_control);
3630
3631
3632 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3633 mqd->cp_hqd_pq_doorbell_control);
3634
3635
3636 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3637 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3638 for (j = 0; j < adev->usec_timeout; j++) {
3639 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3640 break;
3641 udelay(1);
3642 }
3643 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3644 mqd->cp_hqd_dequeue_request);
3645 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3646 mqd->cp_hqd_pq_rptr);
3647 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3648 mqd->cp_hqd_pq_wptr_lo);
3649 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3650 mqd->cp_hqd_pq_wptr_hi);
3651 }
3652
3653
3654 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3655 mqd->cp_mqd_base_addr_lo);
3656 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3657 mqd->cp_mqd_base_addr_hi);
3658
3659
3660 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3661 mqd->cp_mqd_control);
3662
3663
3664 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3665 mqd->cp_hqd_pq_base_lo);
3666 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3667 mqd->cp_hqd_pq_base_hi);
3668
3669
3670 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3671 mqd->cp_hqd_pq_control);
3672
3673
3674 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3675 mqd->cp_hqd_pq_rptr_report_addr_lo);
3676 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3677 mqd->cp_hqd_pq_rptr_report_addr_hi);
3678
3679
3680 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3681 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3682 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3683 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3684
3685
3686 if (ring->use_doorbell) {
3687 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3688 (adev->doorbell_index.kiq * 2) << 2);
3689
3690
3691
3692
3693
3694 if (check_if_enlarge_doorbell_range(adev))
3695 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3696 (adev->doorbell.size - 4));
3697 else
3698 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3699 (adev->doorbell_index.userqueue_end * 2) << 2);
3700 }
3701
3702 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3703 mqd->cp_hqd_pq_doorbell_control);
3704
3705
3706 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3707 mqd->cp_hqd_pq_wptr_lo);
3708 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3709 mqd->cp_hqd_pq_wptr_hi);
3710
3711
3712 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3713
3714 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3715 mqd->cp_hqd_persistent_state);
3716
3717
3718 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3719 mqd->cp_hqd_active);
3720
3721 if (ring->use_doorbell)
3722 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3723
3724 return 0;
3725}
3726
3727static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3728{
3729 struct amdgpu_device *adev = ring->adev;
3730 int j;
3731
3732
3733 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3734
3735 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3736
3737 for (j = 0; j < adev->usec_timeout; j++) {
3738 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3739 break;
3740 udelay(1);
3741 }
3742
3743 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3744 DRM_DEBUG("KIQ dequeue request failed.\n");
3745
3746
3747 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3748 }
3749
3750 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3751 0);
3752 }
3753
3754 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3755 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3756 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3757 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3758 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3759 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3760 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3761 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3762
3763 return 0;
3764}
3765
3766static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3767{
3768 struct amdgpu_device *adev = ring->adev;
3769 struct v9_mqd *mqd = ring->mqd_ptr;
3770 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3771 struct v9_mqd *tmp_mqd;
3772
3773 gfx_v9_0_kiq_setting(ring);
3774
3775
3776
3777
3778
3779
3780 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3781 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3782
3783 if (adev->gfx.mec.mqd_backup[mqd_idx])
3784 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3785
3786
3787 ring->wptr = 0;
3788 amdgpu_ring_clear_ring(ring);
3789
3790 mutex_lock(&adev->srbm_mutex);
3791 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3792 gfx_v9_0_kiq_init_register(ring);
3793 soc15_grbm_select(adev, 0, 0, 0, 0);
3794 mutex_unlock(&adev->srbm_mutex);
3795 } else {
3796 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3797 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3798 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3799 mutex_lock(&adev->srbm_mutex);
3800 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3801 gfx_v9_0_mqd_init(ring);
3802 gfx_v9_0_kiq_init_register(ring);
3803 soc15_grbm_select(adev, 0, 0, 0, 0);
3804 mutex_unlock(&adev->srbm_mutex);
3805
3806 if (adev->gfx.mec.mqd_backup[mqd_idx])
3807 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3808 }
3809
3810 return 0;
3811}
3812
3813static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3814{
3815 struct amdgpu_device *adev = ring->adev;
3816 struct v9_mqd *mqd = ring->mqd_ptr;
3817 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3818 struct v9_mqd *tmp_mqd;
3819
3820
3821
3822
3823 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3824
3825 if (!tmp_mqd->cp_hqd_pq_control ||
3826 (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
3827 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3828 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3829 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3830 mutex_lock(&adev->srbm_mutex);
3831 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3832 gfx_v9_0_mqd_init(ring);
3833 soc15_grbm_select(adev, 0, 0, 0, 0);
3834 mutex_unlock(&adev->srbm_mutex);
3835
3836 if (adev->gfx.mec.mqd_backup[mqd_idx])
3837 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3838 } else if (amdgpu_in_reset(adev)) {
3839
3840 if (adev->gfx.mec.mqd_backup[mqd_idx])
3841 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3842
3843
3844 ring->wptr = 0;
3845 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3846 amdgpu_ring_clear_ring(ring);
3847 } else {
3848 amdgpu_ring_clear_ring(ring);
3849 }
3850
3851 return 0;
3852}
3853
3854static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3855{
3856 struct amdgpu_ring *ring;
3857 int r;
3858
3859 ring = &adev->gfx.kiq.ring;
3860
3861 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3862 if (unlikely(r != 0))
3863 return r;
3864
3865 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3866 if (unlikely(r != 0))
3867 return r;
3868
3869 gfx_v9_0_kiq_init_queue(ring);
3870 amdgpu_bo_kunmap(ring->mqd_obj);
3871 ring->mqd_ptr = NULL;
3872 amdgpu_bo_unreserve(ring->mqd_obj);
3873 ring->sched.ready = true;
3874 return 0;
3875}
3876
3877static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3878{
3879 struct amdgpu_ring *ring = NULL;
3880 int r = 0, i;
3881
3882 gfx_v9_0_cp_compute_enable(adev, true);
3883
3884 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3885 ring = &adev->gfx.compute_ring[i];
3886
3887 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3888 if (unlikely(r != 0))
3889 goto done;
3890 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3891 if (!r) {
3892 r = gfx_v9_0_kcq_init_queue(ring);
3893 amdgpu_bo_kunmap(ring->mqd_obj);
3894 ring->mqd_ptr = NULL;
3895 }
3896 amdgpu_bo_unreserve(ring->mqd_obj);
3897 if (r)
3898 goto done;
3899 }
3900
3901 r = amdgpu_gfx_enable_kcq(adev);
3902done:
3903 return r;
3904}
3905
3906static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3907{
3908 int r, i;
3909 struct amdgpu_ring *ring;
3910
3911 if (!(adev->flags & AMD_IS_APU))
3912 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3913
3914 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3915 if (adev->gfx.num_gfx_rings) {
3916
3917 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3918 if (r)
3919 return r;
3920 }
3921
3922 r = gfx_v9_0_cp_compute_load_microcode(adev);
3923 if (r)
3924 return r;
3925 }
3926
3927 r = gfx_v9_0_kiq_resume(adev);
3928 if (r)
3929 return r;
3930
3931 if (adev->gfx.num_gfx_rings) {
3932 r = gfx_v9_0_cp_gfx_resume(adev);
3933 if (r)
3934 return r;
3935 }
3936
3937 r = gfx_v9_0_kcq_resume(adev);
3938 if (r)
3939 return r;
3940
3941 if (adev->gfx.num_gfx_rings) {
3942 ring = &adev->gfx.gfx_ring[0];
3943 r = amdgpu_ring_test_helper(ring);
3944 if (r)
3945 return r;
3946 }
3947
3948 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3949 ring = &adev->gfx.compute_ring[i];
3950 amdgpu_ring_test_helper(ring);
3951 }
3952
3953 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3954
3955 return 0;
3956}
3957
3958static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3959{
3960 u32 tmp;
3961
3962 if (adev->asic_type != CHIP_ARCTURUS &&
3963 adev->asic_type != CHIP_ALDEBARAN)
3964 return;
3965
3966 tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3967 tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3968 adev->df.hash_status.hash_64k);
3969 tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3970 adev->df.hash_status.hash_2m);
3971 tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3972 adev->df.hash_status.hash_1g);
3973 WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3974}
3975
3976static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3977{
3978 if (adev->gfx.num_gfx_rings)
3979 gfx_v9_0_cp_gfx_enable(adev, enable);
3980 gfx_v9_0_cp_compute_enable(adev, enable);
3981}
3982
3983static int gfx_v9_0_hw_init(void *handle)
3984{
3985 int r;
3986 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3987
3988 if (!amdgpu_sriov_vf(adev))
3989 gfx_v9_0_init_golden_registers(adev);
3990
3991 gfx_v9_0_constants_init(adev);
3992
3993 gfx_v9_0_init_tcp_config(adev);
3994
3995 r = adev->gfx.rlc.funcs->resume(adev);
3996 if (r)
3997 return r;
3998
3999 r = gfx_v9_0_cp_resume(adev);
4000 if (r)
4001 return r;
4002
4003 if (adev->asic_type == CHIP_ALDEBARAN)
4004 gfx_v9_4_2_set_power_brake_sequence(adev);
4005
4006 return r;
4007}
4008
4009static int gfx_v9_0_hw_fini(void *handle)
4010{
4011 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4012
4013 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4014 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4015 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4016
4017
4018 if (!amdgpu_ras_intr_triggered())
4019
4020 amdgpu_gfx_disable_kcq(adev);
4021
4022 if (amdgpu_sriov_vf(adev)) {
4023 gfx_v9_0_cp_gfx_enable(adev, false);
4024
4025
4026
4027
4028
4029 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4030 return 0;
4031 }
4032
4033
4034
4035
4036 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4037 mutex_lock(&adev->srbm_mutex);
4038 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
4039 adev->gfx.kiq.ring.pipe,
4040 adev->gfx.kiq.ring.queue, 0);
4041 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
4042 soc15_grbm_select(adev, 0, 0, 0, 0);
4043 mutex_unlock(&adev->srbm_mutex);
4044 }
4045
4046 gfx_v9_0_cp_enable(adev, false);
4047
4048
4049 if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
4050 dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
4051 return 0;
4052 }
4053
4054 adev->gfx.rlc.funcs->stop(adev);
4055 return 0;
4056}
4057
4058static int gfx_v9_0_suspend(void *handle)
4059{
4060 return gfx_v9_0_hw_fini(handle);
4061}
4062
4063static int gfx_v9_0_resume(void *handle)
4064{
4065 return gfx_v9_0_hw_init(handle);
4066}
4067
4068static bool gfx_v9_0_is_idle(void *handle)
4069{
4070 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4071
4072 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4073 GRBM_STATUS, GUI_ACTIVE))
4074 return false;
4075 else
4076 return true;
4077}
4078
4079static int gfx_v9_0_wait_for_idle(void *handle)
4080{
4081 unsigned i;
4082 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4083
4084 for (i = 0; i < adev->usec_timeout; i++) {
4085 if (gfx_v9_0_is_idle(handle))
4086 return 0;
4087 udelay(1);
4088 }
4089 return -ETIMEDOUT;
4090}
4091
4092static int gfx_v9_0_soft_reset(void *handle)
4093{
4094 u32 grbm_soft_reset = 0;
4095 u32 tmp;
4096 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4097
4098
4099 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4100 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4101 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4102 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4103 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4104 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4105 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4106 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4107 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4108 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4109 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4110 }
4111
4112 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4113 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4114 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4115 }
4116
4117
4118 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4119 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4120 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4121 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4122
4123
4124 if (grbm_soft_reset) {
4125
4126 adev->gfx.rlc.funcs->stop(adev);
4127
4128 if (adev->gfx.num_gfx_rings)
4129
4130 gfx_v9_0_cp_gfx_enable(adev, false);
4131
4132
4133 gfx_v9_0_cp_compute_enable(adev, false);
4134
4135 if (grbm_soft_reset) {
4136 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4137 tmp |= grbm_soft_reset;
4138 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4139 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4140 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4141
4142 udelay(50);
4143
4144 tmp &= ~grbm_soft_reset;
4145 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4146 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4147 }
4148
4149
4150 udelay(50);
4151 }
4152 return 0;
4153}
4154
4155static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4156{
4157 signed long r, cnt = 0;
4158 unsigned long flags;
4159 uint32_t seq, reg_val_offs = 0;
4160 uint64_t value = 0;
4161 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4162 struct amdgpu_ring *ring = &kiq->ring;
4163
4164 BUG_ON(!ring->funcs->emit_rreg);
4165
4166 spin_lock_irqsave(&kiq->ring_lock, flags);
4167 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
4168 pr_err("critical bug! too many kiq readers\n");
4169 goto failed_unlock;
4170 }
4171 amdgpu_ring_alloc(ring, 32);
4172 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4173 amdgpu_ring_write(ring, 9 |
4174 (5 << 8) |
4175 (1 << 16) |
4176 (1 << 20));
4177 amdgpu_ring_write(ring, 0);
4178 amdgpu_ring_write(ring, 0);
4179 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4180 reg_val_offs * 4));
4181 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4182 reg_val_offs * 4));
4183 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4184 if (r)
4185 goto failed_undo;
4186
4187 amdgpu_ring_commit(ring);
4188 spin_unlock_irqrestore(&kiq->ring_lock, flags);
4189
4190 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200 if (r < 1 && (amdgpu_in_reset(adev)))
4201 goto failed_kiq_read;
4202
4203 might_sleep();
4204 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4205 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4206 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4207 }
4208
4209 if (cnt > MAX_KIQ_REG_TRY)
4210 goto failed_kiq_read;
4211
4212 mb();
4213 value = (uint64_t)adev->wb.wb[reg_val_offs] |
4214 (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4215 amdgpu_device_wb_free(adev, reg_val_offs);
4216 return value;
4217
4218failed_undo:
4219 amdgpu_ring_undo(ring);
4220failed_unlock:
4221 spin_unlock_irqrestore(&kiq->ring_lock, flags);
4222failed_kiq_read:
4223 if (reg_val_offs)
4224 amdgpu_device_wb_free(adev, reg_val_offs);
4225 pr_err("failed to read gpu clock\n");
4226 return ~0;
4227}
4228
4229static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4230{
4231 uint64_t clock;
4232
4233 amdgpu_gfx_off_ctrl(adev, false);
4234 mutex_lock(&adev->gfx.gpu_clock_mutex);
4235 if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4236 clock = gfx_v9_0_kiq_read_clock(adev);
4237 } else {
4238 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4239 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4240 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4241 }
4242 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4243 amdgpu_gfx_off_ctrl(adev, true);
4244 return clock;
4245}
4246
4247static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4248 uint32_t vmid,
4249 uint32_t gds_base, uint32_t gds_size,
4250 uint32_t gws_base, uint32_t gws_size,
4251 uint32_t oa_base, uint32_t oa_size)
4252{
4253 struct amdgpu_device *adev = ring->adev;
4254
4255
4256 gfx_v9_0_write_data_to_reg(ring, 0, false,
4257 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4258 gds_base);
4259
4260
4261 gfx_v9_0_write_data_to_reg(ring, 0, false,
4262 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4263 gds_size);
4264
4265
4266 gfx_v9_0_write_data_to_reg(ring, 0, false,
4267 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4268 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4269
4270
4271 gfx_v9_0_write_data_to_reg(ring, 0, false,
4272 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4273 (1 << (oa_size + oa_base)) - (1 << oa_base));
4274}
4275
4276static const u32 vgpr_init_compute_shader[] =
4277{
4278 0xb07c0000, 0xbe8000ff,
4279 0x000000f8, 0xbf110800,
4280 0x7e000280, 0x7e020280,
4281 0x7e040280, 0x7e060280,
4282 0x7e080280, 0x7e0a0280,
4283 0x7e0c0280, 0x7e0e0280,
4284 0x80808800, 0xbe803200,
4285 0xbf84fff5, 0xbf9c0000,
4286 0xd28c0001, 0x0001007f,
4287 0xd28d0001, 0x0002027e,
4288 0x10020288, 0xb8810904,
4289 0xb7814000, 0xd1196a01,
4290 0x00000301, 0xbe800087,
4291 0xbefc00c1, 0xd89c4000,
4292 0x00020201, 0xd89cc080,
4293 0x00040401, 0x320202ff,
4294 0x00000800, 0x80808100,
4295 0xbf84fff8, 0x7e020280,
4296 0xbf810000, 0x00000000,
4297};
4298
4299static const u32 sgpr_init_compute_shader[] =
4300{
4301 0xb07c0000, 0xbe8000ff,
4302 0x0000005f, 0xbee50080,
4303 0xbe812c65, 0xbe822c65,
4304 0xbe832c65, 0xbe842c65,
4305 0xbe852c65, 0xb77c0005,
4306 0x80808500, 0xbf84fff8,
4307 0xbe800080, 0xbf810000,
4308};
4309
4310static const u32 vgpr_init_compute_shader_arcturus[] = {
4311 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4312 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4313 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4314 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4315 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4316 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4317 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4318 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4319 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4320 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4321 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4322 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4323 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4324 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4325 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4326 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4327 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4328 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4329 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4330 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4331 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4332 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4333 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4334 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4335 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4336 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4337 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4338 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4339 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4340 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4341 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4342 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4343 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4344 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4345 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4346 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4347 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4348 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4349 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4350 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4351 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4352 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4353 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4354 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4355 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4356 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4357 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4358 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4359 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4360 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4361 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4362 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4363 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4364 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4365 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4366 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4367 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4368 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4369 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4370 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4371 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4372 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4373 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4374 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4375 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4376 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4377 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4378 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4379 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4380 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4381 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4382 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4383 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4384 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4385 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4386 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4387 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4388 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4389 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4390 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4391 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4392 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4393 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4394 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4395 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4396 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4397 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4398 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4399 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4400 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4401 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4402 0xbf84fff8, 0xbf810000,
4403};
4404
4405
4406
4407
4408static const struct soc15_reg_entry vgpr_init_regs[] = {
4409 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4410 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4411 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4412 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4413 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4414 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },
4415 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4416 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4417 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4418 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4419 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4420 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4421 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4422 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4423};
4424
4425static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4426 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4427 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4428 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4429 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4430 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4431 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },
4432 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4433 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4434 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4435 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4436 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4437 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4438 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4439 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4440};
4441
4442static const struct soc15_reg_entry sgpr1_init_regs[] = {
4443 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4444 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4445 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4446 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4447 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 },
4448 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4449 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4450 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4451 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4452 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4453 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4454 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4455 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4456 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4457};
4458
4459static const struct soc15_reg_entry sgpr2_init_regs[] = {
4460 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4461 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4462 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4463 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4464 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 },
4465 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4466 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4467 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4468 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4469 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4470 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4471 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4472 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4473 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4474};
4475
4476static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4477 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4478 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4479 { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4480 { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4481 { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4482 { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4483 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4484 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4485 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4486 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4487 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4488 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4489 { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4490 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4491 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4492 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4493 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4494 { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4495 { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4496 { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4497 { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4498 { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4499 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4500 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4501 { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4502 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4503 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4504 { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4505 { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4506 { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4507 { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4508 { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4509 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4510};
4511
4512static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4513{
4514 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4515 int i, r;
4516
4517
4518 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4519 return 0;
4520
4521 r = amdgpu_ring_alloc(ring, 7);
4522 if (r) {
4523 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4524 ring->name, r);
4525 return r;
4526 }
4527
4528 WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4529 WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4530
4531 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4532 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4533 PACKET3_DMA_DATA_DST_SEL(1) |
4534 PACKET3_DMA_DATA_SRC_SEL(2) |
4535 PACKET3_DMA_DATA_ENGINE(0)));
4536 amdgpu_ring_write(ring, 0);
4537 amdgpu_ring_write(ring, 0);
4538 amdgpu_ring_write(ring, 0);
4539 amdgpu_ring_write(ring, 0);
4540 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4541 adev->gds.gds_size);
4542
4543 amdgpu_ring_commit(ring);
4544
4545 for (i = 0; i < adev->usec_timeout; i++) {
4546 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4547 break;
4548 udelay(1);
4549 }
4550
4551 if (i >= adev->usec_timeout)
4552 r = -ETIMEDOUT;
4553
4554 WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4555
4556 return r;
4557}
4558
4559static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4560{
4561 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4562 struct amdgpu_ib ib;
4563 struct dma_fence *f = NULL;
4564 int r, i;
4565 unsigned total_size, vgpr_offset, sgpr_offset;
4566 u64 gpu_addr;
4567
4568 int compute_dim_x = adev->gfx.config.max_shader_engines *
4569 adev->gfx.config.max_cu_per_sh *
4570 adev->gfx.config.max_sh_per_se;
4571 int sgpr_work_group_size = 5;
4572 int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4573 int vgpr_init_shader_size;
4574 const u32 *vgpr_init_shader_ptr;
4575 const struct soc15_reg_entry *vgpr_init_regs_ptr;
4576
4577
4578 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4579 return 0;
4580
4581
4582 if (!ring->sched.ready)
4583 return 0;
4584
4585 if (adev->asic_type == CHIP_ARCTURUS) {
4586 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4587 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4588 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4589 } else {
4590 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4591 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4592 vgpr_init_regs_ptr = vgpr_init_regs;
4593 }
4594
4595 total_size =
4596 (gpr_reg_size * 3 + 4 + 5 + 2) * 4;
4597 total_size +=
4598 (gpr_reg_size * 3 + 4 + 5 + 2) * 4;
4599 total_size +=
4600 (gpr_reg_size * 3 + 4 + 5 + 2) * 4;
4601 total_size = ALIGN(total_size, 256);
4602 vgpr_offset = total_size;
4603 total_size += ALIGN(vgpr_init_shader_size, 256);
4604 sgpr_offset = total_size;
4605 total_size += sizeof(sgpr_init_compute_shader);
4606
4607
4608 memset(&ib, 0, sizeof(ib));
4609 r = amdgpu_ib_get(adev, NULL, total_size,
4610 AMDGPU_IB_POOL_DIRECT, &ib);
4611 if (r) {
4612 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4613 return r;
4614 }
4615
4616
4617 for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4618 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4619
4620 for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4621 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4622
4623
4624 ib.length_dw = 0;
4625
4626
4627
4628 for (i = 0; i < gpr_reg_size; i++) {
4629 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4630 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4631 - PACKET3_SET_SH_REG_START;
4632 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4633 }
4634
4635 gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4636 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4637 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4638 - PACKET3_SET_SH_REG_START;
4639 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4640 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4641
4642
4643 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4644 ib.ptr[ib.length_dw++] = compute_dim_x * 2;
4645 ib.ptr[ib.length_dw++] = 1;
4646 ib.ptr[ib.length_dw++] = 1;
4647 ib.ptr[ib.length_dw++] =
4648 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4649
4650
4651 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4652 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4653
4654
4655
4656 for (i = 0; i < gpr_reg_size; i++) {
4657 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4658 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4659 - PACKET3_SET_SH_REG_START;
4660 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4661 }
4662
4663 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4664 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4665 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4666 - PACKET3_SET_SH_REG_START;
4667 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4668 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4669
4670
4671 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4672 ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size;
4673 ib.ptr[ib.length_dw++] = 1;
4674 ib.ptr[ib.length_dw++] = 1;
4675 ib.ptr[ib.length_dw++] =
4676 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4677
4678
4679 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4680 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4681
4682
4683
4684 for (i = 0; i < gpr_reg_size; i++) {
4685 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4686 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4687 - PACKET3_SET_SH_REG_START;
4688 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4689 }
4690
4691 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4692 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4693 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4694 - PACKET3_SET_SH_REG_START;
4695 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4696 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4697
4698
4699 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4700 ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size;
4701 ib.ptr[ib.length_dw++] = 1;
4702 ib.ptr[ib.length_dw++] = 1;
4703 ib.ptr[ib.length_dw++] =
4704 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4705
4706
4707 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4708 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4709
4710
4711 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4712 if (r) {
4713 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4714 goto fail;
4715 }
4716
4717
4718 r = dma_fence_wait(f, false);
4719 if (r) {
4720 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4721 goto fail;
4722 }
4723
4724fail:
4725 amdgpu_ib_free(adev, &ib, NULL);
4726 dma_fence_put(f);
4727
4728 return r;
4729}
4730
4731static int gfx_v9_0_early_init(void *handle)
4732{
4733 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4734
4735 if (adev->asic_type == CHIP_ARCTURUS ||
4736 adev->asic_type == CHIP_ALDEBARAN)
4737 adev->gfx.num_gfx_rings = 0;
4738 else
4739 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4740 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4741 AMDGPU_MAX_COMPUTE_RINGS);
4742 gfx_v9_0_set_kiq_pm4_funcs(adev);
4743 gfx_v9_0_set_ring_funcs(adev);
4744 gfx_v9_0_set_irq_funcs(adev);
4745 gfx_v9_0_set_gds_init(adev);
4746 gfx_v9_0_set_rlc_funcs(adev);
4747
4748 return 0;
4749}
4750
4751static int gfx_v9_0_ecc_late_init(void *handle)
4752{
4753 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4754 int r;
4755
4756
4757
4758
4759
4760
4761
4762 if ((!adev->in_suspend) &&
4763 (adev->gds.gds_size)) {
4764 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4765 if (r)
4766 return r;
4767 }
4768
4769
4770 if (adev->asic_type == CHIP_ALDEBARAN)
4771 r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
4772 else
4773 r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4774
4775 if (r)
4776 return r;
4777
4778 if (adev->gfx.ras_funcs &&
4779 adev->gfx.ras_funcs->ras_late_init) {
4780 r = adev->gfx.ras_funcs->ras_late_init(adev);
4781 if (r)
4782 return r;
4783 }
4784
4785 if (adev->gfx.ras_funcs &&
4786 adev->gfx.ras_funcs->enable_watchdog_timer)
4787 adev->gfx.ras_funcs->enable_watchdog_timer(adev);
4788
4789 return 0;
4790}
4791
4792static int gfx_v9_0_late_init(void *handle)
4793{
4794 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4795 int r;
4796
4797 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4798 if (r)
4799 return r;
4800
4801 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4802 if (r)
4803 return r;
4804
4805 r = gfx_v9_0_ecc_late_init(handle);
4806 if (r)
4807 return r;
4808
4809 return 0;
4810}
4811
4812static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4813{
4814 uint32_t rlc_setting;
4815
4816
4817 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4818 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4819 return false;
4820
4821 return true;
4822}
4823
4824static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4825{
4826 uint32_t data;
4827 unsigned i;
4828
4829 data = RLC_SAFE_MODE__CMD_MASK;
4830 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4831 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4832
4833
4834 for (i = 0; i < adev->usec_timeout; i++) {
4835 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4836 break;
4837 udelay(1);
4838 }
4839}
4840
4841static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4842{
4843 uint32_t data;
4844
4845 data = RLC_SAFE_MODE__CMD_MASK;
4846 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4847}
4848
4849static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4850 bool enable)
4851{
4852 amdgpu_gfx_rlc_enter_safe_mode(adev);
4853
4854 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4855 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4856 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4857 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4858 } else {
4859 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4860 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4861 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4862 }
4863
4864 amdgpu_gfx_rlc_exit_safe_mode(adev);
4865}
4866
4867static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4868 bool enable)
4869{
4870
4871
4872
4873 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4874 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4875 else
4876 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4877
4878 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4879 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4880 else
4881 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4882
4883
4884}
4885
4886static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4887 bool enable)
4888{
4889 uint32_t data, def;
4890
4891 amdgpu_gfx_rlc_enter_safe_mode(adev);
4892
4893
4894 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4895
4896 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4897
4898 if (adev->asic_type != CHIP_VEGA12)
4899 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4900
4901 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4902 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4903 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4904
4905
4906 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4907
4908 if (def != data)
4909 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4910
4911
4912 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4913
4914 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4915 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4916 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4917 if (def != data)
4918 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4919 }
4920
4921 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4922 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4923 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4924 if (def != data)
4925 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4926 }
4927 }
4928 } else {
4929
4930 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4931
4932 if (adev->asic_type != CHIP_VEGA12)
4933 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4934
4935 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4936 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4937 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4938 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4939
4940 if (def != data)
4941 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4942
4943
4944 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4945 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4946 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4947 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4948 }
4949
4950
4951 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4952 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4953 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4954 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4955 }
4956 }
4957
4958 amdgpu_gfx_rlc_exit_safe_mode(adev);
4959}
4960
4961static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4962 bool enable)
4963{
4964 uint32_t data, def;
4965
4966 if (!adev->gfx.num_gfx_rings)
4967 return;
4968
4969 amdgpu_gfx_rlc_enter_safe_mode(adev);
4970
4971
4972 if (enable) {
4973
4974 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4975
4976 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4977
4978 if (def != data)
4979 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4980
4981
4982 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4983
4984 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4985 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4986 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4987 else
4988 data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
4989
4990 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4991 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4992 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4993 if (def != data)
4994 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4995
4996
4997 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4998 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4999 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5000 if (def != data)
5001 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5002 } else {
5003
5004 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5005
5006 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
5007 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
5008
5009 if (def != data)
5010 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5011 }
5012
5013 amdgpu_gfx_rlc_exit_safe_mode(adev);
5014}
5015
5016static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5017 bool enable)
5018{
5019 uint32_t def, data;
5020
5021 amdgpu_gfx_rlc_enter_safe_mode(adev);
5022
5023 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5024 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5025
5026 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5027 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5028 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5029 else
5030 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5031
5032 if (def != data)
5033 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5034
5035
5036 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5037
5038 if (adev->asic_type == CHIP_ARCTURUS)
5039 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5040 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5041 else
5042 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5043 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5044 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5045 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5046 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5047 if (def != data)
5048 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5049
5050
5051 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5052 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5053 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5054 if (def != data)
5055 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5056 } else {
5057 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5058
5059 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5060
5061 if (def != data)
5062 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5063 }
5064
5065 amdgpu_gfx_rlc_exit_safe_mode(adev);
5066}
5067
5068static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5069 bool enable)
5070{
5071 if (enable) {
5072
5073
5074
5075 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5076
5077 gfx_v9_0_update_3d_clock_gating(adev, enable);
5078
5079 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5080 } else {
5081
5082
5083
5084 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5085
5086 gfx_v9_0_update_3d_clock_gating(adev, enable);
5087
5088 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5089 }
5090 return 0;
5091}
5092
5093static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5094{
5095 u32 reg, data;
5096
5097 reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5098 if (amdgpu_sriov_is_pp_one_vf(adev))
5099 data = RREG32_NO_KIQ(reg);
5100 else
5101 data = RREG32(reg);
5102
5103 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5104 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5105
5106 if (amdgpu_sriov_is_pp_one_vf(adev))
5107 WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5108 else
5109 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5110}
5111
5112static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5113 uint32_t offset,
5114 struct soc15_reg_rlcg *entries, int arr_size)
5115{
5116 int i;
5117 uint32_t reg;
5118
5119 if (!entries)
5120 return false;
5121
5122 for (i = 0; i < arr_size; i++) {
5123 const struct soc15_reg_rlcg *entry;
5124
5125 entry = &entries[i];
5126 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5127 if (offset == reg)
5128 return true;
5129 }
5130
5131 return false;
5132}
5133
5134static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5135{
5136 return gfx_v9_0_check_rlcg_range(adev, offset,
5137 (void *)rlcg_access_gc_9_0,
5138 ARRAY_SIZE(rlcg_access_gc_9_0));
5139}
5140
5141static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5142 .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5143 .set_safe_mode = gfx_v9_0_set_safe_mode,
5144 .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5145 .init = gfx_v9_0_rlc_init,
5146 .get_csb_size = gfx_v9_0_get_csb_size,
5147 .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5148 .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5149 .resume = gfx_v9_0_rlc_resume,
5150 .stop = gfx_v9_0_rlc_stop,
5151 .reset = gfx_v9_0_rlc_reset,
5152 .start = gfx_v9_0_rlc_start,
5153 .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5154 .sriov_wreg = gfx_v9_0_sriov_wreg,
5155 .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5156};
5157
5158static int gfx_v9_0_set_powergating_state(void *handle,
5159 enum amd_powergating_state state)
5160{
5161 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5162 bool enable = (state == AMD_PG_STATE_GATE);
5163
5164 switch (adev->asic_type) {
5165 case CHIP_RAVEN:
5166 case CHIP_RENOIR:
5167 if (!enable)
5168 amdgpu_gfx_off_ctrl(adev, false);
5169
5170 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5171 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5172 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5173 } else {
5174 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5175 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5176 }
5177
5178 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5179 gfx_v9_0_enable_cp_power_gating(adev, true);
5180 else
5181 gfx_v9_0_enable_cp_power_gating(adev, false);
5182
5183
5184 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5185
5186
5187 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5188
5189 if (enable)
5190 amdgpu_gfx_off_ctrl(adev, true);
5191 break;
5192 case CHIP_VEGA12:
5193 amdgpu_gfx_off_ctrl(adev, enable);
5194 break;
5195 default:
5196 break;
5197 }
5198
5199 return 0;
5200}
5201
5202static int gfx_v9_0_set_clockgating_state(void *handle,
5203 enum amd_clockgating_state state)
5204{
5205 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5206
5207 if (amdgpu_sriov_vf(adev))
5208 return 0;
5209
5210 switch (adev->asic_type) {
5211 case CHIP_VEGA10:
5212 case CHIP_VEGA12:
5213 case CHIP_VEGA20:
5214 case CHIP_RAVEN:
5215 case CHIP_ARCTURUS:
5216 case CHIP_RENOIR:
5217 case CHIP_ALDEBARAN:
5218 gfx_v9_0_update_gfx_clock_gating(adev,
5219 state == AMD_CG_STATE_GATE);
5220 break;
5221 default:
5222 break;
5223 }
5224 return 0;
5225}
5226
5227static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5228{
5229 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5230 int data;
5231
5232 if (amdgpu_sriov_vf(adev))
5233 *flags = 0;
5234
5235
5236 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5237 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5238 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5239
5240
5241 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5242 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5243 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5244
5245
5246 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5247 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5248
5249
5250 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5251 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5252 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5253
5254
5255 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5256 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5257 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5258
5259 if (adev->asic_type != CHIP_ARCTURUS) {
5260
5261 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5262 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5263 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5264
5265
5266 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5267 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5268 }
5269}
5270
5271static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5272{
5273 return ring->adev->wb.wb[ring->rptr_offs];
5274}
5275
5276static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5277{
5278 struct amdgpu_device *adev = ring->adev;
5279 u64 wptr;
5280
5281
5282 if (ring->use_doorbell) {
5283 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5284 } else {
5285 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5286 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5287 }
5288
5289 return wptr;
5290}
5291
5292static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5293{
5294 struct amdgpu_device *adev = ring->adev;
5295
5296 if (ring->use_doorbell) {
5297
5298 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5299 WDOORBELL64(ring->doorbell_index, ring->wptr);
5300 } else {
5301 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5302 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5303 }
5304}
5305
5306static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5307{
5308 struct amdgpu_device *adev = ring->adev;
5309 u32 ref_and_mask, reg_mem_engine;
5310 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5311
5312 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5313 switch (ring->me) {
5314 case 1:
5315 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5316 break;
5317 case 2:
5318 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5319 break;
5320 default:
5321 return;
5322 }
5323 reg_mem_engine = 0;
5324 } else {
5325 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5326 reg_mem_engine = 1;
5327 }
5328
5329 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5330 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5331 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5332 ref_and_mask, ref_and_mask, 0x20);
5333}
5334
5335static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5336 struct amdgpu_job *job,
5337 struct amdgpu_ib *ib,
5338 uint32_t flags)
5339{
5340 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5341 u32 header, control = 0;
5342
5343 if (ib->flags & AMDGPU_IB_FLAG_CE)
5344 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5345 else
5346 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5347
5348 control |= ib->length_dw | (vmid << 24);
5349
5350 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5351 control |= INDIRECT_BUFFER_PRE_ENB(1);
5352
5353 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5354 gfx_v9_0_ring_emit_de_meta(ring);
5355 }
5356
5357 amdgpu_ring_write(ring, header);
5358 BUG_ON(ib->gpu_addr & 0x3);
5359 amdgpu_ring_write(ring,
5360#ifdef __BIG_ENDIAN
5361 (2 << 0) |
5362#endif
5363 lower_32_bits(ib->gpu_addr));
5364 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5365 amdgpu_ring_write(ring, control);
5366}
5367
5368static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5369 struct amdgpu_job *job,
5370 struct amdgpu_ib *ib,
5371 uint32_t flags)
5372{
5373 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5374 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5387 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5388 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5389 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5390 }
5391
5392 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5393 BUG_ON(ib->gpu_addr & 0x3);
5394 amdgpu_ring_write(ring,
5395#ifdef __BIG_ENDIAN
5396 (2 << 0) |
5397#endif
5398 lower_32_bits(ib->gpu_addr));
5399 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5400 amdgpu_ring_write(ring, control);
5401}
5402
5403static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5404 u64 seq, unsigned flags)
5405{
5406 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5407 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5408 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5409
5410
5411 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5412 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5413 EOP_TC_NC_ACTION_EN) :
5414 (EOP_TCL1_ACTION_EN |
5415 EOP_TC_ACTION_EN |
5416 EOP_TC_WB_ACTION_EN |
5417 EOP_TC_MD_ACTION_EN)) |
5418 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5419 EVENT_INDEX(5)));
5420 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5421
5422
5423
5424
5425
5426 if (write64bit)
5427 BUG_ON(addr & 0x7);
5428 else
5429 BUG_ON(addr & 0x3);
5430 amdgpu_ring_write(ring, lower_32_bits(addr));
5431 amdgpu_ring_write(ring, upper_32_bits(addr));
5432 amdgpu_ring_write(ring, lower_32_bits(seq));
5433 amdgpu_ring_write(ring, upper_32_bits(seq));
5434 amdgpu_ring_write(ring, 0);
5435}
5436
5437static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5438{
5439 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5440 uint32_t seq = ring->fence_drv.sync_seq;
5441 uint64_t addr = ring->fence_drv.gpu_addr;
5442
5443 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5444 lower_32_bits(addr), upper_32_bits(addr),
5445 seq, 0xffffffff, 4);
5446}
5447
5448static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5449 unsigned vmid, uint64_t pd_addr)
5450{
5451 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5452
5453
5454 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5455
5456 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5457 amdgpu_ring_write(ring, 0x0);
5458 }
5459}
5460
5461static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5462{
5463 return ring->adev->wb.wb[ring->rptr_offs];
5464}
5465
5466static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5467{
5468 u64 wptr;
5469
5470
5471 if (ring->use_doorbell)
5472 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5473 else
5474 BUG();
5475 return wptr;
5476}
5477
5478static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5479{
5480 struct amdgpu_device *adev = ring->adev;
5481
5482
5483 if (ring->use_doorbell) {
5484 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5485 WDOORBELL64(ring->doorbell_index, ring->wptr);
5486 } else{
5487 BUG();
5488 }
5489}
5490
5491static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5492 u64 seq, unsigned int flags)
5493{
5494 struct amdgpu_device *adev = ring->adev;
5495
5496
5497 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5498
5499
5500 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5501 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5502 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5503 amdgpu_ring_write(ring, lower_32_bits(addr));
5504 amdgpu_ring_write(ring, upper_32_bits(addr));
5505 amdgpu_ring_write(ring, lower_32_bits(seq));
5506
5507 if (flags & AMDGPU_FENCE_FLAG_INT) {
5508
5509 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5510 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5511 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5512 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5513 amdgpu_ring_write(ring, 0);
5514 amdgpu_ring_write(ring, 0x20000000);
5515 }
5516}
5517
5518static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5519{
5520 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5521 amdgpu_ring_write(ring, 0);
5522}
5523
5524static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5525{
5526 struct v9_ce_ib_state ce_payload = {0};
5527 uint64_t csa_addr;
5528 int cnt;
5529
5530 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5531 csa_addr = amdgpu_csa_vaddr(ring->adev);
5532
5533 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5534 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5535 WRITE_DATA_DST_SEL(8) |
5536 WR_CONFIRM) |
5537 WRITE_DATA_CACHE_POLICY(0));
5538 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5539 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5540 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5541}
5542
5543static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5544{
5545 struct v9_de_ib_state de_payload = {0};
5546 uint64_t csa_addr, gds_addr;
5547 int cnt;
5548
5549 csa_addr = amdgpu_csa_vaddr(ring->adev);
5550 gds_addr = csa_addr + 4096;
5551 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5552 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5553
5554 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5555 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5556 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5557 WRITE_DATA_DST_SEL(8) |
5558 WR_CONFIRM) |
5559 WRITE_DATA_CACHE_POLICY(0));
5560 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5561 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5562 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5563}
5564
5565static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5566 bool secure)
5567{
5568 uint32_t v = secure ? FRAME_TMZ : 0;
5569
5570 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5571 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5572}
5573
5574static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5575{
5576 uint32_t dw2 = 0;
5577
5578 if (amdgpu_sriov_vf(ring->adev))
5579 gfx_v9_0_ring_emit_ce_meta(ring);
5580
5581 dw2 |= 0x80000000;
5582 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5583
5584 dw2 |= 0x8001;
5585
5586 dw2 |= 0x01000000;
5587
5588 dw2 |= 0x10002;
5589
5590
5591 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5592 dw2 |= 0x10000000;
5593 } else {
5594
5595
5596
5597 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5598 dw2 |= 0x10000000;
5599 }
5600
5601 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5602 amdgpu_ring_write(ring, dw2);
5603 amdgpu_ring_write(ring, 0);
5604}
5605
5606static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5607{
5608 unsigned ret;
5609 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5610 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5611 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5612 amdgpu_ring_write(ring, 0);
5613 ret = ring->wptr & ring->buf_mask;
5614 amdgpu_ring_write(ring, 0x55aa55aa);
5615 return ret;
5616}
5617
5618static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5619{
5620 unsigned cur;
5621 BUG_ON(offset > ring->buf_mask);
5622 BUG_ON(ring->ring[offset] != 0x55aa55aa);
5623
5624 cur = (ring->wptr & ring->buf_mask) - 1;
5625 if (likely(cur > offset))
5626 ring->ring[offset] = cur - offset;
5627 else
5628 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5629}
5630
5631static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5632 uint32_t reg_val_offs)
5633{
5634 struct amdgpu_device *adev = ring->adev;
5635
5636 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5637 amdgpu_ring_write(ring, 0 |
5638 (5 << 8) |
5639 (1 << 20));
5640 amdgpu_ring_write(ring, reg);
5641 amdgpu_ring_write(ring, 0);
5642 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5643 reg_val_offs * 4));
5644 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5645 reg_val_offs * 4));
5646}
5647
5648static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5649 uint32_t val)
5650{
5651 uint32_t cmd = 0;
5652
5653 switch (ring->funcs->type) {
5654 case AMDGPU_RING_TYPE_GFX:
5655 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5656 break;
5657 case AMDGPU_RING_TYPE_KIQ:
5658 cmd = (1 << 16);
5659 break;
5660 default:
5661 cmd = WR_CONFIRM;
5662 break;
5663 }
5664 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5665 amdgpu_ring_write(ring, cmd);
5666 amdgpu_ring_write(ring, reg);
5667 amdgpu_ring_write(ring, 0);
5668 amdgpu_ring_write(ring, val);
5669}
5670
5671static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5672 uint32_t val, uint32_t mask)
5673{
5674 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5675}
5676
5677static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5678 uint32_t reg0, uint32_t reg1,
5679 uint32_t ref, uint32_t mask)
5680{
5681 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5682 struct amdgpu_device *adev = ring->adev;
5683 bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5684 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5685
5686 if (fw_version_ok)
5687 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5688 ref, mask, 0x20);
5689 else
5690 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5691 ref, mask);
5692}
5693
5694static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5695{
5696 struct amdgpu_device *adev = ring->adev;
5697 uint32_t value = 0;
5698
5699 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5700 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5701 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5702 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5703 WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5704}
5705
5706static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5707 enum amdgpu_interrupt_state state)
5708{
5709 switch (state) {
5710 case AMDGPU_IRQ_STATE_DISABLE:
5711 case AMDGPU_IRQ_STATE_ENABLE:
5712 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5713 TIME_STAMP_INT_ENABLE,
5714 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5715 break;
5716 default:
5717 break;
5718 }
5719}
5720
5721static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5722 int me, int pipe,
5723 enum amdgpu_interrupt_state state)
5724{
5725 u32 mec_int_cntl, mec_int_cntl_reg;
5726
5727
5728
5729
5730
5731
5732
5733 if (me == 1) {
5734 switch (pipe) {
5735 case 0:
5736 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5737 break;
5738 case 1:
5739 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5740 break;
5741 case 2:
5742 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5743 break;
5744 case 3:
5745 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5746 break;
5747 default:
5748 DRM_DEBUG("invalid pipe %d\n", pipe);
5749 return;
5750 }
5751 } else {
5752 DRM_DEBUG("invalid me %d\n", me);
5753 return;
5754 }
5755
5756 switch (state) {
5757 case AMDGPU_IRQ_STATE_DISABLE:
5758 mec_int_cntl = RREG32(mec_int_cntl_reg);
5759 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5760 TIME_STAMP_INT_ENABLE, 0);
5761 WREG32(mec_int_cntl_reg, mec_int_cntl);
5762 break;
5763 case AMDGPU_IRQ_STATE_ENABLE:
5764 mec_int_cntl = RREG32(mec_int_cntl_reg);
5765 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5766 TIME_STAMP_INT_ENABLE, 1);
5767 WREG32(mec_int_cntl_reg, mec_int_cntl);
5768 break;
5769 default:
5770 break;
5771 }
5772}
5773
5774static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5775 struct amdgpu_irq_src *source,
5776 unsigned type,
5777 enum amdgpu_interrupt_state state)
5778{
5779 switch (state) {
5780 case AMDGPU_IRQ_STATE_DISABLE:
5781 case AMDGPU_IRQ_STATE_ENABLE:
5782 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5783 PRIV_REG_INT_ENABLE,
5784 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5785 break;
5786 default:
5787 break;
5788 }
5789
5790 return 0;
5791}
5792
5793static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5794 struct amdgpu_irq_src *source,
5795 unsigned type,
5796 enum amdgpu_interrupt_state state)
5797{
5798 switch (state) {
5799 case AMDGPU_IRQ_STATE_DISABLE:
5800 case AMDGPU_IRQ_STATE_ENABLE:
5801 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5802 PRIV_INSTR_INT_ENABLE,
5803 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5804 break;
5805 default:
5806 break;
5807 }
5808
5809 return 0;
5810}
5811
5812#define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
5813 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5814 CP_ECC_ERROR_INT_ENABLE, 1)
5815
5816#define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
5817 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5818 CP_ECC_ERROR_INT_ENABLE, 0)
5819
5820static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5821 struct amdgpu_irq_src *source,
5822 unsigned type,
5823 enum amdgpu_interrupt_state state)
5824{
5825 switch (state) {
5826 case AMDGPU_IRQ_STATE_DISABLE:
5827 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5828 CP_ECC_ERROR_INT_ENABLE, 0);
5829 DISABLE_ECC_ON_ME_PIPE(1, 0);
5830 DISABLE_ECC_ON_ME_PIPE(1, 1);
5831 DISABLE_ECC_ON_ME_PIPE(1, 2);
5832 DISABLE_ECC_ON_ME_PIPE(1, 3);
5833 break;
5834
5835 case AMDGPU_IRQ_STATE_ENABLE:
5836 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5837 CP_ECC_ERROR_INT_ENABLE, 1);
5838 ENABLE_ECC_ON_ME_PIPE(1, 0);
5839 ENABLE_ECC_ON_ME_PIPE(1, 1);
5840 ENABLE_ECC_ON_ME_PIPE(1, 2);
5841 ENABLE_ECC_ON_ME_PIPE(1, 3);
5842 break;
5843 default:
5844 break;
5845 }
5846
5847 return 0;
5848}
5849
5850
5851static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5852 struct amdgpu_irq_src *src,
5853 unsigned type,
5854 enum amdgpu_interrupt_state state)
5855{
5856 switch (type) {
5857 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5858 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5859 break;
5860 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5861 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5862 break;
5863 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5864 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5865 break;
5866 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5867 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5868 break;
5869 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5870 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5871 break;
5872 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5873 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5874 break;
5875 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5876 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5877 break;
5878 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5879 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5880 break;
5881 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5882 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5883 break;
5884 default:
5885 break;
5886 }
5887 return 0;
5888}
5889
5890static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5891 struct amdgpu_irq_src *source,
5892 struct amdgpu_iv_entry *entry)
5893{
5894 int i;
5895 u8 me_id, pipe_id, queue_id;
5896 struct amdgpu_ring *ring;
5897
5898 DRM_DEBUG("IH: CP EOP\n");
5899 me_id = (entry->ring_id & 0x0c) >> 2;
5900 pipe_id = (entry->ring_id & 0x03) >> 0;
5901 queue_id = (entry->ring_id & 0x70) >> 4;
5902
5903 switch (me_id) {
5904 case 0:
5905 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5906 break;
5907 case 1:
5908 case 2:
5909 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5910 ring = &adev->gfx.compute_ring[i];
5911
5912
5913
5914 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5915 amdgpu_fence_process(ring);
5916 }
5917 break;
5918 }
5919 return 0;
5920}
5921
5922static void gfx_v9_0_fault(struct amdgpu_device *adev,
5923 struct amdgpu_iv_entry *entry)
5924{
5925 u8 me_id, pipe_id, queue_id;
5926 struct amdgpu_ring *ring;
5927 int i;
5928
5929 me_id = (entry->ring_id & 0x0c) >> 2;
5930 pipe_id = (entry->ring_id & 0x03) >> 0;
5931 queue_id = (entry->ring_id & 0x70) >> 4;
5932
5933 switch (me_id) {
5934 case 0:
5935 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5936 break;
5937 case 1:
5938 case 2:
5939 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5940 ring = &adev->gfx.compute_ring[i];
5941 if (ring->me == me_id && ring->pipe == pipe_id &&
5942 ring->queue == queue_id)
5943 drm_sched_fault(&ring->sched);
5944 }
5945 break;
5946 }
5947}
5948
5949static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5950 struct amdgpu_irq_src *source,
5951 struct amdgpu_iv_entry *entry)
5952{
5953 DRM_ERROR("Illegal register access in command stream\n");
5954 gfx_v9_0_fault(adev, entry);
5955 return 0;
5956}
5957
5958static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5959 struct amdgpu_irq_src *source,
5960 struct amdgpu_iv_entry *entry)
5961{
5962 DRM_ERROR("Illegal instruction in command stream\n");
5963 gfx_v9_0_fault(adev, entry);
5964 return 0;
5965}
5966
5967
5968static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5969 { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5970 SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5971 SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5972 },
5973 { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5974 SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5975 SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5976 },
5977 { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5978 SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5979 0, 0
5980 },
5981 { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5982 SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5983 0, 0
5984 },
5985 { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5986 SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5987 SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5988 },
5989 { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5990 SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5991 0, 0
5992 },
5993 { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5994 SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5995 SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5996 },
5997 { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5998 SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5999 SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
6000 },
6001 { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
6002 SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
6003 0, 0
6004 },
6005 { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
6006 SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
6007 0, 0
6008 },
6009 { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
6010 SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
6011 0, 0
6012 },
6013 { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6014 SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
6015 SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
6016 },
6017 { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6018 SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
6019 0, 0
6020 },
6021 { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6022 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
6023 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
6024 },
6025 { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
6026 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6027 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
6028 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
6029 },
6030 { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
6031 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6032 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
6033 0, 0
6034 },
6035 { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6036 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6037 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6038 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6039 },
6040 { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6041 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6042 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6043 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6044 },
6045 { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6046 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6047 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6048 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6049 },
6050 { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6051 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6052 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6053 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6054 },
6055 { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6056 SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6057 0, 0
6058 },
6059 { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6060 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6061 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6062 },
6063 { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6064 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6065 0, 0
6066 },
6067 { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6068 SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6069 0, 0
6070 },
6071 { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6072 SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6073 0, 0
6074 },
6075 { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6076 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6077 0, 0
6078 },
6079 { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6080 SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6081 0, 0
6082 },
6083 { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6084 SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6085 0, 0
6086 },
6087 { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6088 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6089 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6090 },
6091 { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6092 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6093 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6094 },
6095 { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6096 SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6097 SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6098 },
6099 { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6100 SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6101 SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6102 },
6103 { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6104 SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6105 SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6106 },
6107 { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6108 SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6109 0, 0
6110 },
6111 { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6112 SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6113 0, 0
6114 },
6115 { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6116 SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6117 0, 0
6118 },
6119 { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6120 SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6121 0, 0
6122 },
6123 { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6124 SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6125 0, 0
6126 },
6127 { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6128 SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6129 0, 0
6130 },
6131 { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6132 SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6133 0, 0
6134 },
6135 { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6136 SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6137 0, 0
6138 },
6139 { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6140 SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6141 0, 0
6142 },
6143 { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6144 SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6145 0, 0
6146 },
6147 { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6148 SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6149 0, 0
6150 },
6151 { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6152 SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6153 0, 0
6154 },
6155 { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6156 SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6157 0, 0
6158 },
6159 { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6160 SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6161 0, 0
6162 },
6163 { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6164 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6165 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6166 },
6167 { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6168 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6169 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6170 },
6171 { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6172 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6173 0, 0
6174 },
6175 { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6176 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6177 0, 0
6178 },
6179 { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6180 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6181 0, 0
6182 },
6183 { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6184 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6185 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6186 },
6187 { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6188 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6189 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6190 },
6191 { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6192 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6193 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6194 },
6195 { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6196 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6197 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6198 },
6199 { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6200 SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6201 0, 0
6202 },
6203 { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6204 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6205 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6206 },
6207 { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6208 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6209 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6210 },
6211 { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6212 SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6213 SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6214 },
6215 { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6216 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6217 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6218 },
6219 { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6220 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6221 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6222 },
6223 { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6224 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6225 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6226 },
6227 { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6228 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6229 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6230 },
6231 { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6232 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6233 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6234 },
6235 { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6236 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6237 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6238 },
6239 { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6240 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6241 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6242 },
6243 { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6244 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6245 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6246 },
6247 { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6248 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6249 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6250 },
6251 { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6252 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6253 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6254 },
6255 { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6256 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6257 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6258 },
6259 { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6260 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6261 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6262 },
6263 { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6264 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6265 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6266 },
6267 { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6268 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6269 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6270 },
6271 { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6272 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6273 0, 0
6274 },
6275 { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6276 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6277 0, 0
6278 },
6279 { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6280 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6281 0, 0
6282 },
6283 { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6284 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6285 0, 0
6286 },
6287 { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6288 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6289 0, 0
6290 },
6291 { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6292 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6293 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6294 },
6295 { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6296 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6297 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6298 },
6299 { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6300 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6301 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6302 },
6303 { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6304 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6305 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6306 },
6307 { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6308 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6309 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6310 },
6311 { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6312 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6313 0, 0
6314 },
6315 { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6316 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6317 0, 0
6318 },
6319 { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6320 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6321 0, 0
6322 },
6323 { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6324 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6325 0, 0
6326 },
6327 { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6328 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6329 0, 0
6330 },
6331 { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6332 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6333 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6334 },
6335 { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6336 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6337 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6338 },
6339 { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6340 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6341 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6342 },
6343 { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6344 SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6345 SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6346 },
6347 { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6348 SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6349 SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6350 },
6351 { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6352 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6353 0, 0
6354 },
6355 { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6356 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6357 0, 0
6358 },
6359 { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6360 SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6361 0, 0
6362 },
6363 { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6364 SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6365 0, 0
6366 },
6367 { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6368 SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6369 0, 0
6370 },
6371 { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6372 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6373 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6374 },
6375 { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6376 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6377 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6378 },
6379 { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6380 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6381 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6382 },
6383 { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6384 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6385 0, 0
6386 },
6387 { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6388 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6389 0, 0
6390 },
6391 { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6392 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6393 0, 0
6394 },
6395 { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6396 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6397 0, 0
6398 },
6399 { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6400 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6401 0, 0
6402 },
6403 { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6404 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6405 0, 0
6406 }
6407};
6408
6409static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6410 void *inject_if)
6411{
6412 struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6413 int ret;
6414 struct ta_ras_trigger_error_input block_info = { 0 };
6415
6416 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6417 return -EINVAL;
6418
6419 if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6420 return -EINVAL;
6421
6422 if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6423 return -EPERM;
6424
6425 if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6426 info->head.type)) {
6427 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6428 ras_gfx_subblocks[info->head.sub_block_index].name,
6429 info->head.type);
6430 return -EPERM;
6431 }
6432
6433 if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6434 info->head.type)) {
6435 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6436 ras_gfx_subblocks[info->head.sub_block_index].name,
6437 info->head.type);
6438 return -EPERM;
6439 }
6440
6441 block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6442 block_info.sub_block_index =
6443 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6444 block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6445 block_info.address = info->address;
6446 block_info.value = info->value;
6447
6448 mutex_lock(&adev->grbm_idx_mutex);
6449 ret = psp_ras_trigger_error(&adev->psp, &block_info);
6450 mutex_unlock(&adev->grbm_idx_mutex);
6451
6452 return ret;
6453}
6454
6455static const char *vml2_mems[] = {
6456 "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6457 "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6458 "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6459 "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6460 "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6461 "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6462 "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6463 "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6464 "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6465 "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6466 "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6467 "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6468 "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6469 "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6470 "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6471 "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6472};
6473
6474static const char *vml2_walker_mems[] = {
6475 "UTC_VML2_CACHE_PDE0_MEM0",
6476 "UTC_VML2_CACHE_PDE0_MEM1",
6477 "UTC_VML2_CACHE_PDE1_MEM0",
6478 "UTC_VML2_CACHE_PDE1_MEM1",
6479 "UTC_VML2_CACHE_PDE2_MEM0",
6480 "UTC_VML2_CACHE_PDE2_MEM1",
6481 "UTC_VML2_RDIF_LOG_FIFO",
6482};
6483
6484static const char *atc_l2_cache_2m_mems[] = {
6485 "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6486 "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6487 "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6488 "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6489};
6490
6491static const char *atc_l2_cache_4k_mems[] = {
6492 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6493 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6494 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6495 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6496 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6497 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6498 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6499 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6500 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6501 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6502 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6503 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6504 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6505 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6506 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6507 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6508 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6509 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6510 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6511 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6512 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6513 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6514 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6515 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6516 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6517 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6518 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6519 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6520 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6521 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6522 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6523 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6524};
6525
6526static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6527 struct ras_err_data *err_data)
6528{
6529 uint32_t i, data;
6530 uint32_t sec_count, ded_count;
6531
6532 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6533 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6534 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6535 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6536 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6537 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6538 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6539 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6540
6541 for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6542 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6543 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6544
6545 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6546 if (sec_count) {
6547 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6548 "SEC %d\n", i, vml2_mems[i], sec_count);
6549 err_data->ce_count += sec_count;
6550 }
6551
6552 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6553 if (ded_count) {
6554 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6555 "DED %d\n", i, vml2_mems[i], ded_count);
6556 err_data->ue_count += ded_count;
6557 }
6558 }
6559
6560 for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6561 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6562 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6563
6564 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6565 SEC_COUNT);
6566 if (sec_count) {
6567 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6568 "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6569 err_data->ce_count += sec_count;
6570 }
6571
6572 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6573 DED_COUNT);
6574 if (ded_count) {
6575 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6576 "DED %d\n", i, vml2_walker_mems[i], ded_count);
6577 err_data->ue_count += ded_count;
6578 }
6579 }
6580
6581 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6582 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6583 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6584
6585 sec_count = (data & 0x00006000L) >> 0xd;
6586 if (sec_count) {
6587 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6588 "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6589 sec_count);
6590 err_data->ce_count += sec_count;
6591 }
6592 }
6593
6594 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6595 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6596 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6597
6598 sec_count = (data & 0x00006000L) >> 0xd;
6599 if (sec_count) {
6600 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6601 "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6602 sec_count);
6603 err_data->ce_count += sec_count;
6604 }
6605
6606 ded_count = (data & 0x00018000L) >> 0xf;
6607 if (ded_count) {
6608 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6609 "DED %d\n", i, atc_l2_cache_4k_mems[i],
6610 ded_count);
6611 err_data->ue_count += ded_count;
6612 }
6613 }
6614
6615 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6616 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6617 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6618 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6619
6620 return 0;
6621}
6622
6623static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6624 const struct soc15_reg_entry *reg,
6625 uint32_t se_id, uint32_t inst_id, uint32_t value,
6626 uint32_t *sec_count, uint32_t *ded_count)
6627{
6628 uint32_t i;
6629 uint32_t sec_cnt, ded_cnt;
6630
6631 for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6632 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6633 gfx_v9_0_ras_fields[i].seg != reg->seg ||
6634 gfx_v9_0_ras_fields[i].inst != reg->inst)
6635 continue;
6636
6637 sec_cnt = (value &
6638 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6639 gfx_v9_0_ras_fields[i].sec_count_shift;
6640 if (sec_cnt) {
6641 dev_info(adev->dev, "GFX SubBlock %s, "
6642 "Instance[%d][%d], SEC %d\n",
6643 gfx_v9_0_ras_fields[i].name,
6644 se_id, inst_id,
6645 sec_cnt);
6646 *sec_count += sec_cnt;
6647 }
6648
6649 ded_cnt = (value &
6650 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6651 gfx_v9_0_ras_fields[i].ded_count_shift;
6652 if (ded_cnt) {
6653 dev_info(adev->dev, "GFX SubBlock %s, "
6654 "Instance[%d][%d], DED %d\n",
6655 gfx_v9_0_ras_fields[i].name,
6656 se_id, inst_id,
6657 ded_cnt);
6658 *ded_count += ded_cnt;
6659 }
6660 }
6661
6662 return 0;
6663}
6664
6665static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6666{
6667 int i, j, k;
6668
6669 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6670 return;
6671
6672
6673 mutex_lock(&adev->grbm_idx_mutex);
6674 for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6675 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6676 for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6677 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6678 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6679 }
6680 }
6681 }
6682 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6683 mutex_unlock(&adev->grbm_idx_mutex);
6684
6685 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6686 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6687 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6688 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6689 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6690 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6691 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6692 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6693
6694 for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6695 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6696 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6697 }
6698
6699 for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6700 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6701 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6702 }
6703
6704 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6705 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6706 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6707 }
6708
6709 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6710 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6711 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6712 }
6713
6714 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6715 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6716 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6717 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6718}
6719
6720static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6721 void *ras_error_status)
6722{
6723 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6724 uint32_t sec_count = 0, ded_count = 0;
6725 uint32_t i, j, k;
6726 uint32_t reg_value;
6727
6728 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6729 return -EINVAL;
6730
6731 err_data->ue_count = 0;
6732 err_data->ce_count = 0;
6733
6734 mutex_lock(&adev->grbm_idx_mutex);
6735
6736 for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6737 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6738 for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6739 gfx_v9_0_select_se_sh(adev, j, 0, k);
6740 reg_value =
6741 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6742 if (reg_value)
6743 gfx_v9_0_ras_error_count(adev,
6744 &gfx_v9_0_edc_counter_regs[i],
6745 j, k, reg_value,
6746 &sec_count, &ded_count);
6747 }
6748 }
6749 }
6750
6751 err_data->ce_count += sec_count;
6752 err_data->ue_count += ded_count;
6753
6754 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6755 mutex_unlock(&adev->grbm_idx_mutex);
6756
6757 gfx_v9_0_query_utc_edc_status(adev, err_data);
6758
6759 return 0;
6760}
6761
6762static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6763{
6764 const unsigned int cp_coher_cntl =
6765 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6766 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6767 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6768 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6769 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6770
6771
6772 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6773 amdgpu_ring_write(ring, cp_coher_cntl);
6774 amdgpu_ring_write(ring, 0xffffffff);
6775 amdgpu_ring_write(ring, 0xffffff);
6776 amdgpu_ring_write(ring, 0);
6777 amdgpu_ring_write(ring, 0);
6778 amdgpu_ring_write(ring, 0x0000000A);
6779}
6780
6781static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6782 uint32_t pipe, bool enable)
6783{
6784 struct amdgpu_device *adev = ring->adev;
6785 uint32_t val;
6786 uint32_t wcl_cs_reg;
6787
6788
6789 val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
6790
6791 switch (pipe) {
6792 case 0:
6793 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
6794 break;
6795 case 1:
6796 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
6797 break;
6798 case 2:
6799 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
6800 break;
6801 case 3:
6802 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
6803 break;
6804 default:
6805 DRM_DEBUG("invalid pipe %d\n", pipe);
6806 return;
6807 }
6808
6809 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6810
6811}
6812static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6813{
6814 struct amdgpu_device *adev = ring->adev;
6815 uint32_t val;
6816 int i;
6817
6818
6819
6820
6821
6822
6823 val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6824 amdgpu_ring_emit_wreg(ring,
6825 SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
6826 val);
6827
6828
6829
6830
6831
6832
6833 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6834 if (i != ring->pipe)
6835 gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
6836
6837 }
6838}
6839
6840static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6841 .name = "gfx_v9_0",
6842 .early_init = gfx_v9_0_early_init,
6843 .late_init = gfx_v9_0_late_init,
6844 .sw_init = gfx_v9_0_sw_init,
6845 .sw_fini = gfx_v9_0_sw_fini,
6846 .hw_init = gfx_v9_0_hw_init,
6847 .hw_fini = gfx_v9_0_hw_fini,
6848 .suspend = gfx_v9_0_suspend,
6849 .resume = gfx_v9_0_resume,
6850 .is_idle = gfx_v9_0_is_idle,
6851 .wait_for_idle = gfx_v9_0_wait_for_idle,
6852 .soft_reset = gfx_v9_0_soft_reset,
6853 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6854 .set_powergating_state = gfx_v9_0_set_powergating_state,
6855 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6856};
6857
6858static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6859 .type = AMDGPU_RING_TYPE_GFX,
6860 .align_mask = 0xff,
6861 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6862 .support_64bit_ptrs = true,
6863 .vmhub = AMDGPU_GFXHUB_0,
6864 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6865 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6866 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6867 .emit_frame_size =
6868 5 +
6869 7 +
6870 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6871 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6872 2 +
6873 8 +
6874 20 +
6875 4 +
6876
6877
6878 5 +
6879 7 +
6880 4 +
6881 14 +
6882 31 +
6883 3 +
6884 5 +
6885 8 + 8 +
6886 2 +
6887 7,
6888 .emit_ib_size = 4,
6889 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6890 .emit_fence = gfx_v9_0_ring_emit_fence,
6891 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6892 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6893 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6894 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6895 .test_ring = gfx_v9_0_ring_test_ring,
6896 .test_ib = gfx_v9_0_ring_test_ib,
6897 .insert_nop = amdgpu_ring_insert_nop,
6898 .pad_ib = amdgpu_ring_generic_pad_ib,
6899 .emit_switch_buffer = gfx_v9_ring_emit_sb,
6900 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6901 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6902 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6903 .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
6904 .emit_wreg = gfx_v9_0_ring_emit_wreg,
6905 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6906 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6907 .soft_recovery = gfx_v9_0_ring_soft_recovery,
6908 .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6909};
6910
6911static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6912 .type = AMDGPU_RING_TYPE_COMPUTE,
6913 .align_mask = 0xff,
6914 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6915 .support_64bit_ptrs = true,
6916 .vmhub = AMDGPU_GFXHUB_0,
6917 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6918 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6919 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6920 .emit_frame_size =
6921 20 +
6922 7 +
6923 5 +
6924 7 +
6925 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6926 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6927 2 +
6928 8 + 8 + 8 +
6929 7 +
6930 5 +
6931 15,
6932 .emit_ib_size = 7,
6933 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6934 .emit_fence = gfx_v9_0_ring_emit_fence,
6935 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6936 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6937 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6938 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6939 .test_ring = gfx_v9_0_ring_test_ring,
6940 .test_ib = gfx_v9_0_ring_test_ib,
6941 .insert_nop = amdgpu_ring_insert_nop,
6942 .pad_ib = amdgpu_ring_generic_pad_ib,
6943 .emit_wreg = gfx_v9_0_ring_emit_wreg,
6944 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6945 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6946 .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6947 .emit_wave_limit = gfx_v9_0_emit_wave_limit,
6948};
6949
6950static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6951 .type = AMDGPU_RING_TYPE_KIQ,
6952 .align_mask = 0xff,
6953 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6954 .support_64bit_ptrs = true,
6955 .vmhub = AMDGPU_GFXHUB_0,
6956 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6957 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6958 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6959 .emit_frame_size =
6960 20 +
6961 7 +
6962 5 +
6963 7 +
6964 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6965 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6966 2 +
6967 8 + 8 + 8,
6968 .emit_ib_size = 7,
6969 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6970 .test_ring = gfx_v9_0_ring_test_ring,
6971 .insert_nop = amdgpu_ring_insert_nop,
6972 .pad_ib = amdgpu_ring_generic_pad_ib,
6973 .emit_rreg = gfx_v9_0_ring_emit_rreg,
6974 .emit_wreg = gfx_v9_0_ring_emit_wreg,
6975 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6976 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6977};
6978
6979static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6980{
6981 int i;
6982
6983 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6984
6985 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6986 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6987
6988 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6989 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6990}
6991
6992static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6993 .set = gfx_v9_0_set_eop_interrupt_state,
6994 .process = gfx_v9_0_eop_irq,
6995};
6996
6997static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6998 .set = gfx_v9_0_set_priv_reg_fault_state,
6999 .process = gfx_v9_0_priv_reg_irq,
7000};
7001
7002static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
7003 .set = gfx_v9_0_set_priv_inst_fault_state,
7004 .process = gfx_v9_0_priv_inst_irq,
7005};
7006
7007static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
7008 .set = gfx_v9_0_set_cp_ecc_error_state,
7009 .process = amdgpu_gfx_cp_ecc_error_irq,
7010};
7011
7012
7013static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
7014{
7015 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7016 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
7017
7018 adev->gfx.priv_reg_irq.num_types = 1;
7019 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
7020
7021 adev->gfx.priv_inst_irq.num_types = 1;
7022 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
7023
7024 adev->gfx.cp_ecc_error_irq.num_types = 2;
7025 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
7026}
7027
7028static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
7029{
7030 switch (adev->asic_type) {
7031 case CHIP_VEGA10:
7032 case CHIP_VEGA12:
7033 case CHIP_VEGA20:
7034 case CHIP_RAVEN:
7035 case CHIP_ARCTURUS:
7036 case CHIP_RENOIR:
7037 case CHIP_ALDEBARAN:
7038 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7039 break;
7040 default:
7041 break;
7042 }
7043}
7044
7045static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7046{
7047
7048 switch (adev->asic_type) {
7049 case CHIP_VEGA10:
7050 case CHIP_VEGA12:
7051 case CHIP_VEGA20:
7052 adev->gds.gds_size = 0x10000;
7053 break;
7054 case CHIP_RAVEN:
7055 case CHIP_ARCTURUS:
7056 adev->gds.gds_size = 0x1000;
7057 break;
7058 case CHIP_ALDEBARAN:
7059
7060
7061
7062 adev->gds.gds_size = 0;
7063 break;
7064 default:
7065 adev->gds.gds_size = 0x10000;
7066 break;
7067 }
7068
7069 switch (adev->asic_type) {
7070 case CHIP_VEGA10:
7071 case CHIP_VEGA20:
7072 adev->gds.gds_compute_max_wave_id = 0x7ff;
7073 break;
7074 case CHIP_VEGA12:
7075 adev->gds.gds_compute_max_wave_id = 0x27f;
7076 break;
7077 case CHIP_RAVEN:
7078 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7079 adev->gds.gds_compute_max_wave_id = 0x77;
7080 else
7081 adev->gds.gds_compute_max_wave_id = 0x15f;
7082 break;
7083 case CHIP_ARCTURUS:
7084 adev->gds.gds_compute_max_wave_id = 0xfff;
7085 break;
7086 case CHIP_ALDEBARAN:
7087
7088 adev->gds.gds_compute_max_wave_id = 0;
7089 break;
7090 default:
7091
7092 adev->gds.gds_compute_max_wave_id = 0x7ff;
7093 break;
7094 }
7095
7096 adev->gds.gws_size = 64;
7097 adev->gds.oa_size = 16;
7098}
7099
7100static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7101 u32 bitmap)
7102{
7103 u32 data;
7104
7105 if (!bitmap)
7106 return;
7107
7108 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7109 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7110
7111 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7112}
7113
7114static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7115{
7116 u32 data, mask;
7117
7118 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7119 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7120
7121 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7122 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7123
7124 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7125
7126 return (~data) & mask;
7127}
7128
7129static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7130 struct amdgpu_cu_info *cu_info)
7131{
7132 int i, j, k, counter, active_cu_number = 0;
7133 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7134 unsigned disable_masks[4 * 4];
7135
7136 if (!adev || !cu_info)
7137 return -EINVAL;
7138
7139
7140
7141
7142 if (adev->gfx.config.max_shader_engines *
7143 adev->gfx.config.max_sh_per_se > 16)
7144 return -EINVAL;
7145
7146 amdgpu_gfx_parse_disable_cu(disable_masks,
7147 adev->gfx.config.max_shader_engines,
7148 adev->gfx.config.max_sh_per_se);
7149
7150 mutex_lock(&adev->grbm_idx_mutex);
7151 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7152 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7153 mask = 1;
7154 ao_bitmap = 0;
7155 counter = 0;
7156 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
7157 gfx_v9_0_set_user_cu_inactive_bitmap(
7158 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7159 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173 cu_info->bitmap[i % 4][j + i / 4] = bitmap;
7174
7175 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7176 if (bitmap & mask) {
7177 if (counter < adev->gfx.config.max_cu_per_sh)
7178 ao_bitmap |= mask;
7179 counter ++;
7180 }
7181 mask <<= 1;
7182 }
7183 active_cu_number += counter;
7184 if (i < 2 && j < 2)
7185 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7186 cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7187 }
7188 }
7189 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7190 mutex_unlock(&adev->grbm_idx_mutex);
7191
7192 cu_info->number = active_cu_number;
7193 cu_info->ao_cu_mask = ao_cu_mask;
7194 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7195
7196 return 0;
7197}
7198
7199const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7200{
7201 .type = AMD_IP_BLOCK_TYPE_GFX,
7202 .major = 9,
7203 .minor = 0,
7204 .rev = 0,
7205 .funcs = &gfx_v9_0_ip_funcs,
7206};
7207