1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/firmware.h>
24#include <drm/drmP.h>
25#include "amdgpu.h"
26#include "amdgpu_gfx.h"
27#include "amdgpu_psp.h"
28#include "amdgpu_smu.h"
29#include "nv.h"
30#include "nvd.h"
31
32#include "gc/gc_10_1_0_offset.h"
33#include "gc/gc_10_1_0_sh_mask.h"
34#include "navi10_enum.h"
35#include "hdp/hdp_5_0_0_offset.h"
36#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
37
38#include "soc15.h"
39#include "soc15_common.h"
40#include "clearstate_gfx10.h"
41#include "v10_structs.h"
42#include "gfx_v10_0.h"
43#include "nbio_v2_3.h"
44
45
46
47
48
49
50
51
52
53#define GFX10_NUM_GFX_RINGS 2
54#define GFX10_MEC_HPD_SIZE 2048
55
56#define F32_CE_PROGRAM_RAM_SIZE 65536
57#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
58
59MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
60MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
61MODULE_FIRMWARE("amdgpu/navi10_me.bin");
62MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
63MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
64MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
65
66static const struct soc15_reg_golden golden_settings_gc_10_1[] =
67{
68 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
69 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
70 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100),
75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff),
78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000),
79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07900000, 0x04900000),
82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
105};
106
107static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
108{
109
110};
111
112#define DEFAULT_SH_MEM_CONFIG \
113 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
114 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
115 (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
116 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
117
118
119static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
120static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
121static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
122static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev);
123static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
124 struct amdgpu_cu_info *cu_info);
125static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
126static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
127 u32 sh_num, u32 instance);
128static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
129
130static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
131static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev);
132static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
133static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
134static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
135static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
136static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
137
138static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
139{
140 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
141 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
142 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
143 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));
144 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));
145 amdgpu_ring_write(kiq_ring, 0);
146 amdgpu_ring_write(kiq_ring, 0);
147 amdgpu_ring_write(kiq_ring, 0);
148 amdgpu_ring_write(kiq_ring, 0);
149}
150
151static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring,
152 struct amdgpu_ring *ring)
153{
154 struct amdgpu_device *adev = kiq_ring->adev;
155 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
156 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
157 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
158
159 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
160
161 amdgpu_ring_write(kiq_ring,
162 PACKET3_MAP_QUEUES_QUEUE_SEL(0) |
163 PACKET3_MAP_QUEUES_VMID(0) |
164 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
165 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
166 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
167 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
168 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
169 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
170 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
171 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
172 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
173 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
174 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
175 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
176}
177
178static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
179 struct amdgpu_ring *ring,
180 enum amdgpu_unmap_queues_action action,
181 u64 gpu_addr, u64 seq)
182{
183 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
184
185 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
186 amdgpu_ring_write(kiq_ring,
187 PACKET3_UNMAP_QUEUES_ACTION(action) |
188 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
189 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
190 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
191 amdgpu_ring_write(kiq_ring,
192 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
193
194 if (action == PREEMPT_QUEUES_NO_UNMAP) {
195 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
196 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
197 amdgpu_ring_write(kiq_ring, seq);
198 } else {
199 amdgpu_ring_write(kiq_ring, 0);
200 amdgpu_ring_write(kiq_ring, 0);
201 amdgpu_ring_write(kiq_ring, 0);
202 }
203}
204
205static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
206 struct amdgpu_ring *ring,
207 u64 addr,
208 u64 seq)
209{
210 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
211
212 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
213 amdgpu_ring_write(kiq_ring,
214 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
215 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
216 PACKET3_QUERY_STATUS_COMMAND(2));
217 amdgpu_ring_write(kiq_ring,
218 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
219 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
220 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
221 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
222 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
223 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
224}
225
226static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
227 .kiq_set_resources = gfx10_kiq_set_resources,
228 .kiq_map_queues = gfx10_kiq_map_queues,
229 .kiq_unmap_queues = gfx10_kiq_unmap_queues,
230 .kiq_query_status = gfx10_kiq_query_status,
231 .set_resources_size = 8,
232 .map_queues_size = 7,
233 .unmap_queues_size = 6,
234 .query_status_size = 7,
235};
236
237static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
238{
239 adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
240}
241
242static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
243{
244 switch (adev->asic_type) {
245 case CHIP_NAVI10:
246 soc15_program_register_sequence(adev,
247 golden_settings_gc_10_1,
248 (const u32)ARRAY_SIZE(golden_settings_gc_10_1));
249 soc15_program_register_sequence(adev,
250 golden_settings_gc_10_0_nv10,
251 (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
252 break;
253 default:
254 break;
255 }
256}
257
258static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
259{
260 adev->gfx.scratch.num_reg = 8;
261 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
262 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
263}
264
265static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
266 bool wc, uint32_t reg, uint32_t val)
267{
268 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
269 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
270 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
271 amdgpu_ring_write(ring, reg);
272 amdgpu_ring_write(ring, 0);
273 amdgpu_ring_write(ring, val);
274}
275
276static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
277 int mem_space, int opt, uint32_t addr0,
278 uint32_t addr1, uint32_t ref, uint32_t mask,
279 uint32_t inv)
280{
281 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
282 amdgpu_ring_write(ring,
283
284 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
285 WAIT_REG_MEM_OPERATION(opt) |
286 WAIT_REG_MEM_FUNCTION(3) |
287 WAIT_REG_MEM_ENGINE(eng_sel)));
288
289 if (mem_space)
290 BUG_ON(addr0 & 0x3);
291 amdgpu_ring_write(ring, addr0);
292 amdgpu_ring_write(ring, addr1);
293 amdgpu_ring_write(ring, ref);
294 amdgpu_ring_write(ring, mask);
295 amdgpu_ring_write(ring, inv);
296}
297
298static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
299{
300 struct amdgpu_device *adev = ring->adev;
301 uint32_t scratch;
302 uint32_t tmp = 0;
303 unsigned i;
304 int r;
305
306 r = amdgpu_gfx_scratch_get(adev, &scratch);
307 if (r) {
308 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
309 return r;
310 }
311
312 WREG32(scratch, 0xCAFEDEAD);
313
314 r = amdgpu_ring_alloc(ring, 3);
315 if (r) {
316 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
317 ring->idx, r);
318 amdgpu_gfx_scratch_free(adev, scratch);
319 return r;
320 }
321
322 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
323 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
324 amdgpu_ring_write(ring, 0xDEADBEEF);
325 amdgpu_ring_commit(ring);
326
327 for (i = 0; i < adev->usec_timeout; i++) {
328 tmp = RREG32(scratch);
329 if (tmp == 0xDEADBEEF)
330 break;
331 if (amdgpu_emu_mode == 1)
332 msleep(1);
333 else
334 DRM_UDELAY(1);
335 }
336 if (i < adev->usec_timeout) {
337 if (amdgpu_emu_mode == 1)
338 DRM_INFO("ring test on %d succeeded in %d msecs\n",
339 ring->idx, i);
340 else
341 DRM_INFO("ring test on %d succeeded in %d usecs\n",
342 ring->idx, i);
343 } else {
344 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
345 ring->idx, scratch, tmp);
346 r = -EINVAL;
347 }
348 amdgpu_gfx_scratch_free(adev, scratch);
349
350 return r;
351}
352
353static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
354{
355 struct amdgpu_device *adev = ring->adev;
356 struct amdgpu_ib ib;
357 struct dma_fence *f = NULL;
358 uint32_t scratch;
359 uint32_t tmp = 0;
360 long r;
361
362 r = amdgpu_gfx_scratch_get(adev, &scratch);
363 if (r) {
364 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
365 return r;
366 }
367
368 WREG32(scratch, 0xCAFEDEAD);
369
370 memset(&ib, 0, sizeof(ib));
371 r = amdgpu_ib_get(adev, NULL, 256, &ib);
372 if (r) {
373 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
374 goto err1;
375 }
376
377 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
378 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
379 ib.ptr[2] = 0xDEADBEEF;
380 ib.length_dw = 3;
381
382 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
383 if (r)
384 goto err2;
385
386 r = dma_fence_wait_timeout(f, false, timeout);
387 if (r == 0) {
388 DRM_ERROR("amdgpu: IB test timed out.\n");
389 r = -ETIMEDOUT;
390 goto err2;
391 } else if (r < 0) {
392 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
393 goto err2;
394 }
395
396 tmp = RREG32(scratch);
397 if (tmp == 0xDEADBEEF) {
398 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
399 r = 0;
400 } else {
401 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
402 scratch, tmp);
403 r = -EINVAL;
404 }
405err2:
406 amdgpu_ib_free(adev, &ib, NULL);
407 dma_fence_put(f);
408err1:
409 amdgpu_gfx_scratch_free(adev, scratch);
410
411 return r;
412}
413
414static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
415{
416 release_firmware(adev->gfx.pfp_fw);
417 adev->gfx.pfp_fw = NULL;
418 release_firmware(adev->gfx.me_fw);
419 adev->gfx.me_fw = NULL;
420 release_firmware(adev->gfx.ce_fw);
421 adev->gfx.ce_fw = NULL;
422 release_firmware(adev->gfx.rlc_fw);
423 adev->gfx.rlc_fw = NULL;
424 release_firmware(adev->gfx.mec_fw);
425 adev->gfx.mec_fw = NULL;
426 release_firmware(adev->gfx.mec2_fw);
427 adev->gfx.mec2_fw = NULL;
428
429 kfree(adev->gfx.rlc.register_list_format);
430}
431
432static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
433{
434 const struct rlc_firmware_header_v2_1 *rlc_hdr;
435
436 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
437 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
438 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
439 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
440 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
441 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
442 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
443 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
444 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
445 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
446 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
447 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
448 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
449 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
450 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
451}
452
453static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
454{
455 switch (adev->asic_type) {
456 case CHIP_NAVI10:
457 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
458 break;
459 default:
460 break;
461 }
462}
463
464static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
465{
466 const char *chip_name;
467 char fw_name[30];
468 int err;
469 struct amdgpu_firmware_info *info = NULL;
470 const struct common_firmware_header *header = NULL;
471 const struct gfx_firmware_header_v1_0 *cp_hdr;
472 const struct rlc_firmware_header_v2_0 *rlc_hdr;
473 unsigned int *tmp = NULL;
474 unsigned int i = 0;
475 uint16_t version_major;
476 uint16_t version_minor;
477
478 DRM_DEBUG("\n");
479
480 switch (adev->asic_type) {
481 case CHIP_NAVI10:
482 chip_name = "navi10";
483 break;
484 default:
485 BUG();
486 }
487
488 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
489 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
490 if (err)
491 goto out;
492 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
493 if (err)
494 goto out;
495 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
496 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
497 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
498
499 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
500 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
501 if (err)
502 goto out;
503 err = amdgpu_ucode_validate(adev->gfx.me_fw);
504 if (err)
505 goto out;
506 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
507 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
508 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
509
510 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
511 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
512 if (err)
513 goto out;
514 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
515 if (err)
516 goto out;
517 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
518 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
519 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
520
521 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
522 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
523 if (err)
524 goto out;
525 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
526 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
527 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
528 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
529 if (version_major == 2 && version_minor == 1)
530 adev->gfx.rlc.is_rlc_v2_1 = true;
531
532 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
533 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
534 adev->gfx.rlc.save_and_restore_offset =
535 le32_to_cpu(rlc_hdr->save_and_restore_offset);
536 adev->gfx.rlc.clear_state_descriptor_offset =
537 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
538 adev->gfx.rlc.avail_scratch_ram_locations =
539 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
540 adev->gfx.rlc.reg_restore_list_size =
541 le32_to_cpu(rlc_hdr->reg_restore_list_size);
542 adev->gfx.rlc.reg_list_format_start =
543 le32_to_cpu(rlc_hdr->reg_list_format_start);
544 adev->gfx.rlc.reg_list_format_separate_start =
545 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
546 adev->gfx.rlc.starting_offsets_start =
547 le32_to_cpu(rlc_hdr->starting_offsets_start);
548 adev->gfx.rlc.reg_list_format_size_bytes =
549 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
550 adev->gfx.rlc.reg_list_size_bytes =
551 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
552 adev->gfx.rlc.register_list_format =
553 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
554 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
555 if (!adev->gfx.rlc.register_list_format) {
556 err = -ENOMEM;
557 goto out;
558 }
559
560 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
561 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
562 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
563 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
564
565 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
566
567 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
568 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
569 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
570 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
571
572 if (adev->gfx.rlc.is_rlc_v2_1)
573 gfx_v10_0_init_rlc_ext_microcode(adev);
574
575 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
576 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
577 if (err)
578 goto out;
579 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
580 if (err)
581 goto out;
582 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
583 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
584 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
585
586 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
587 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
588 if (!err) {
589 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
590 if (err)
591 goto out;
592 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
593 adev->gfx.mec2_fw->data;
594 adev->gfx.mec2_fw_version =
595 le32_to_cpu(cp_hdr->header.ucode_version);
596 adev->gfx.mec2_feature_version =
597 le32_to_cpu(cp_hdr->ucode_feature_version);
598 } else {
599 err = 0;
600 adev->gfx.mec2_fw = NULL;
601 }
602
603 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
604 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
605 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
606 info->fw = adev->gfx.pfp_fw;
607 header = (const struct common_firmware_header *)info->fw->data;
608 adev->firmware.fw_size +=
609 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
610
611 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
612 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
613 info->fw = adev->gfx.me_fw;
614 header = (const struct common_firmware_header *)info->fw->data;
615 adev->firmware.fw_size +=
616 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
617
618 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
619 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
620 info->fw = adev->gfx.ce_fw;
621 header = (const struct common_firmware_header *)info->fw->data;
622 adev->firmware.fw_size +=
623 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
624
625 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
626 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
627 info->fw = adev->gfx.rlc_fw;
628 header = (const struct common_firmware_header *)info->fw->data;
629 adev->firmware.fw_size +=
630 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
631
632 if (adev->gfx.rlc.is_rlc_v2_1 &&
633 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
634 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
635 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
636 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
637 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
638 info->fw = adev->gfx.rlc_fw;
639 adev->firmware.fw_size +=
640 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
641
642 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
643 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
644 info->fw = adev->gfx.rlc_fw;
645 adev->firmware.fw_size +=
646 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
647
648 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
649 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
650 info->fw = adev->gfx.rlc_fw;
651 adev->firmware.fw_size +=
652 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
653 }
654
655 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
656 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
657 info->fw = adev->gfx.mec_fw;
658 header = (const struct common_firmware_header *)info->fw->data;
659 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
660 adev->firmware.fw_size +=
661 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
662 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
663
664 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
665 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
666 info->fw = adev->gfx.mec_fw;
667 adev->firmware.fw_size +=
668 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
669
670 if (adev->gfx.mec2_fw) {
671 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
672 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
673 info->fw = adev->gfx.mec2_fw;
674 header = (const struct common_firmware_header *)info->fw->data;
675 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
676 adev->firmware.fw_size +=
677 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
678 le32_to_cpu(cp_hdr->jt_size) * 4,
679 PAGE_SIZE);
680 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
681 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
682 info->fw = adev->gfx.mec2_fw;
683 adev->firmware.fw_size +=
684 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
685 PAGE_SIZE);
686 }
687 }
688
689out:
690 if (err) {
691 dev_err(adev->dev,
692 "gfx10: Failed to load firmware \"%s\"\n",
693 fw_name);
694 release_firmware(adev->gfx.pfp_fw);
695 adev->gfx.pfp_fw = NULL;
696 release_firmware(adev->gfx.me_fw);
697 adev->gfx.me_fw = NULL;
698 release_firmware(adev->gfx.ce_fw);
699 adev->gfx.ce_fw = NULL;
700 release_firmware(adev->gfx.rlc_fw);
701 adev->gfx.rlc_fw = NULL;
702 release_firmware(adev->gfx.mec_fw);
703 adev->gfx.mec_fw = NULL;
704 release_firmware(adev->gfx.mec2_fw);
705 adev->gfx.mec2_fw = NULL;
706 }
707
708 gfx_v10_0_check_gfxoff_flag(adev);
709
710 return err;
711}
712
713static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
714{
715 u32 count = 0;
716 const struct cs_section_def *sect = NULL;
717 const struct cs_extent_def *ext = NULL;
718
719
720 count += 2;
721
722 count += 3;
723
724 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
725 for (ext = sect->section; ext->extent != NULL; ++ext) {
726 if (sect->id == SECT_CONTEXT)
727 count += 2 + ext->reg_count;
728 else
729 return 0;
730 }
731 }
732
733
734 count += 3;
735
736 count += 2;
737
738 count += 2;
739
740 return count;
741}
742
743static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
744 volatile u32 *buffer)
745{
746 u32 count = 0, i;
747 const struct cs_section_def *sect = NULL;
748 const struct cs_extent_def *ext = NULL;
749 int ctx_reg_offset;
750
751 if (adev->gfx.rlc.cs_data == NULL)
752 return;
753 if (buffer == NULL)
754 return;
755
756 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
757 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
758
759 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
760 buffer[count++] = cpu_to_le32(0x80000000);
761 buffer[count++] = cpu_to_le32(0x80000000);
762
763 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
764 for (ext = sect->section; ext->extent != NULL; ++ext) {
765 if (sect->id == SECT_CONTEXT) {
766 buffer[count++] =
767 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
768 buffer[count++] = cpu_to_le32(ext->reg_index -
769 PACKET3_SET_CONTEXT_REG_START);
770 for (i = 0; i < ext->reg_count; i++)
771 buffer[count++] = cpu_to_le32(ext->extent[i]);
772 } else {
773 return;
774 }
775 }
776 }
777
778 ctx_reg_offset =
779 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
780 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
781 buffer[count++] = cpu_to_le32(ctx_reg_offset);
782 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
783
784 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
785 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
786
787 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
788 buffer[count++] = cpu_to_le32(0);
789}
790
791static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
792{
793
794 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
795 &adev->gfx.rlc.clear_state_gpu_addr,
796 (void **)&adev->gfx.rlc.cs_ptr);
797
798
799 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
800 &adev->gfx.rlc.cp_table_gpu_addr,
801 (void **)&adev->gfx.rlc.cp_table_ptr);
802}
803
804static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
805{
806 const struct cs_section_def *cs_data;
807 int r;
808
809 adev->gfx.rlc.cs_data = gfx10_cs_data;
810
811 cs_data = adev->gfx.rlc.cs_data;
812
813 if (cs_data) {
814
815 r = amdgpu_gfx_rlc_init_csb(adev);
816 if (r)
817 return r;
818 }
819
820 return 0;
821}
822
823static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
824{
825 int r;
826
827 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
828 if (unlikely(r != 0))
829 return r;
830
831 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
832 AMDGPU_GEM_DOMAIN_VRAM);
833 if (!r)
834 adev->gfx.rlc.clear_state_gpu_addr =
835 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
836
837 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
838
839 return r;
840}
841
842static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
843{
844 int r;
845
846 if (!adev->gfx.rlc.clear_state_obj)
847 return;
848
849 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
850 if (likely(r == 0)) {
851 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
852 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
853 }
854}
855
856static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
857{
858 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
859 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
860}
861
862static int gfx_v10_0_me_init(struct amdgpu_device *adev)
863{
864 int r;
865
866 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
867
868 amdgpu_gfx_graphics_queue_acquire(adev);
869
870 r = gfx_v10_0_init_microcode(adev);
871 if (r)
872 DRM_ERROR("Failed to load gfx firmware!\n");
873
874 return r;
875}
876
877static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
878{
879 int r;
880 u32 *hpd;
881 const __le32 *fw_data = NULL;
882 unsigned fw_size;
883 u32 *fw = NULL;
884 size_t mec_hpd_size;
885
886 const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
887
888 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
889
890
891 amdgpu_gfx_compute_queue_acquire(adev);
892 mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
893
894 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
895 AMDGPU_GEM_DOMAIN_GTT,
896 &adev->gfx.mec.hpd_eop_obj,
897 &adev->gfx.mec.hpd_eop_gpu_addr,
898 (void **)&hpd);
899 if (r) {
900 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
901 gfx_v10_0_mec_fini(adev);
902 return r;
903 }
904
905 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
906
907 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
908 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
909
910 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
911 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
912
913 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
914 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
915 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
916
917 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
918 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
919 &adev->gfx.mec.mec_fw_obj,
920 &adev->gfx.mec.mec_fw_gpu_addr,
921 (void **)&fw);
922 if (r) {
923 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
924 gfx_v10_0_mec_fini(adev);
925 return r;
926 }
927
928 memcpy(fw, fw_data, fw_size);
929
930 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
931 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
932 }
933
934 return 0;
935}
936
937static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
938{
939 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
940 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
941 (address << SQ_IND_INDEX__INDEX__SHIFT));
942 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
943}
944
945static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
946 uint32_t thread, uint32_t regno,
947 uint32_t num, uint32_t *out)
948{
949 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
950 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
951 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
952 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
953 (SQ_IND_INDEX__AUTO_INCR_MASK));
954 while (num--)
955 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
956}
957
958static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
959{
960
961
962
963 WARN_ON(simd != 0);
964
965
966 dst[(*no_fields)++] = 2;
967 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
968 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
969 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
970 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
971 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
972 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
973 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
974 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0);
975 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
976 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
977 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
978 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
979 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
980 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
981 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
982}
983
984static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
985 uint32_t wave, uint32_t start,
986 uint32_t size, uint32_t *dst)
987{
988 WARN_ON(simd != 0);
989
990 wave_read_regs(
991 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
992 dst);
993}
994
995static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
996 uint32_t wave, uint32_t thread,
997 uint32_t start, uint32_t size,
998 uint32_t *dst)
999{
1000 wave_read_regs(
1001 adev, wave, thread,
1002 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1003}
1004
1005static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
1006 u32 me, u32 pipe, u32 q, u32 vm)
1007 {
1008 nv_grbm_select(adev, me, pipe, q, vm);
1009 }
1010
1011
1012static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
1013 .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
1014 .select_se_sh = &gfx_v10_0_select_se_sh,
1015 .read_wave_data = &gfx_v10_0_read_wave_data,
1016 .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
1017 .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
1018 .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
1019};
1020
1021static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1022{
1023 u32 gb_addr_config;
1024
1025 adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
1026
1027 switch (adev->asic_type) {
1028 case CHIP_NAVI10:
1029 adev->gfx.config.max_hw_contexts = 8;
1030 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1031 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1032 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1033 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1034 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1035 break;
1036 default:
1037 BUG();
1038 break;
1039 }
1040
1041 adev->gfx.config.gb_addr_config = gb_addr_config;
1042
1043 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1044 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1045 GB_ADDR_CONFIG, NUM_PIPES);
1046
1047 adev->gfx.config.max_tile_pipes =
1048 adev->gfx.config.gb_addr_config_fields.num_pipes;
1049
1050 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1051 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1052 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
1053 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1054 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1055 GB_ADDR_CONFIG, NUM_RB_PER_SE);
1056 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1057 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1058 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
1059 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1060 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1061 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
1062}
1063
1064static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1065 int me, int pipe, int queue)
1066{
1067 int r;
1068 struct amdgpu_ring *ring;
1069 unsigned int irq_type;
1070
1071 ring = &adev->gfx.gfx_ring[ring_id];
1072
1073 ring->me = me;
1074 ring->pipe = pipe;
1075 ring->queue = queue;
1076
1077 ring->ring_obj = NULL;
1078 ring->use_doorbell = true;
1079
1080 if (!ring_id)
1081 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1082 else
1083 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1084 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1085
1086 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1087 r = amdgpu_ring_init(adev, ring, 1024,
1088 &adev->gfx.eop_irq, irq_type);
1089 if (r)
1090 return r;
1091 return 0;
1092}
1093
1094static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1095 int mec, int pipe, int queue)
1096{
1097 int r;
1098 unsigned irq_type;
1099 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1100
1101 ring = &adev->gfx.compute_ring[ring_id];
1102
1103
1104 ring->me = mec + 1;
1105 ring->pipe = pipe;
1106 ring->queue = queue;
1107
1108 ring->ring_obj = NULL;
1109 ring->use_doorbell = true;
1110 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1111 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1112 + (ring_id * GFX10_MEC_HPD_SIZE);
1113 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1114
1115 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1116 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1117 + ring->pipe;
1118
1119
1120 r = amdgpu_ring_init(adev, ring, 1024,
1121 &adev->gfx.eop_irq, irq_type);
1122 if (r)
1123 return r;
1124
1125 return 0;
1126}
1127
1128static int gfx_v10_0_sw_init(void *handle)
1129{
1130 int i, j, k, r, ring_id = 0;
1131 struct amdgpu_kiq *kiq;
1132 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1133
1134 switch (adev->asic_type) {
1135 case CHIP_NAVI10:
1136 adev->gfx.me.num_me = 1;
1137 adev->gfx.me.num_pipe_per_me = 2;
1138 adev->gfx.me.num_queue_per_pipe = 1;
1139 adev->gfx.mec.num_mec = 2;
1140 adev->gfx.mec.num_pipe_per_mec = 4;
1141 adev->gfx.mec.num_queue_per_pipe = 8;
1142 break;
1143 default:
1144 adev->gfx.me.num_me = 1;
1145 adev->gfx.me.num_pipe_per_me = 1;
1146 adev->gfx.me.num_queue_per_pipe = 1;
1147 adev->gfx.mec.num_mec = 1;
1148 adev->gfx.mec.num_pipe_per_mec = 4;
1149 adev->gfx.mec.num_queue_per_pipe = 8;
1150 break;
1151 }
1152
1153
1154 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1155 GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
1156 &adev->gfx.kiq.irq);
1157 if (r)
1158 return r;
1159
1160
1161 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1162 GFX_10_1__SRCID__CP_EOP_INTERRUPT,
1163 &adev->gfx.eop_irq);
1164 if (r)
1165 return r;
1166
1167
1168 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
1169 &adev->gfx.priv_reg_irq);
1170 if (r)
1171 return r;
1172
1173
1174 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT,
1175 &adev->gfx.priv_inst_irq);
1176 if (r)
1177 return r;
1178
1179 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1180
1181 gfx_v10_0_scratch_init(adev);
1182
1183 r = gfx_v10_0_me_init(adev);
1184 if (r)
1185 return r;
1186
1187 r = gfx_v10_0_rlc_init(adev);
1188 if (r) {
1189 DRM_ERROR("Failed to init rlc BOs!\n");
1190 return r;
1191 }
1192
1193 r = gfx_v10_0_mec_init(adev);
1194 if (r) {
1195 DRM_ERROR("Failed to init MEC BOs!\n");
1196 return r;
1197 }
1198
1199
1200 for (i = 0; i < adev->gfx.me.num_me; i++) {
1201 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1202 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1203 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1204 continue;
1205
1206 r = gfx_v10_0_gfx_ring_init(adev, ring_id,
1207 i, k, j);
1208 if (r)
1209 return r;
1210 ring_id++;
1211 }
1212 }
1213 }
1214
1215 ring_id = 0;
1216
1217 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1218 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1219 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1220 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1221 j))
1222 continue;
1223
1224 r = gfx_v10_0_compute_ring_init(adev, ring_id,
1225 i, k, j);
1226 if (r)
1227 return r;
1228
1229 ring_id++;
1230 }
1231 }
1232 }
1233
1234 r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
1235 if (r) {
1236 DRM_ERROR("Failed to init KIQ BOs!\n");
1237 return r;
1238 }
1239
1240 kiq = &adev->gfx.kiq;
1241 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1242 if (r)
1243 return r;
1244
1245 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
1246 if (r)
1247 return r;
1248
1249
1250 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1251 r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev);
1252 if (r)
1253 return r;
1254 }
1255
1256 adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE;
1257
1258 gfx_v10_0_gpu_early_init(adev);
1259
1260 return 0;
1261}
1262
1263static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev)
1264{
1265 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1266 &adev->gfx.pfp.pfp_fw_gpu_addr,
1267 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1268}
1269
1270static void gfx_v10_0_ce_fini(struct amdgpu_device *adev)
1271{
1272 amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
1273 &adev->gfx.ce.ce_fw_gpu_addr,
1274 (void **)&adev->gfx.ce.ce_fw_ptr);
1275}
1276
1277static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
1278{
1279 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1280 &adev->gfx.me.me_fw_gpu_addr,
1281 (void **)&adev->gfx.me.me_fw_ptr);
1282}
1283
1284static int gfx_v10_0_sw_fini(void *handle)
1285{
1286 int i;
1287 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288
1289 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1290 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1291 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1292 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1293
1294 amdgpu_gfx_mqd_sw_fini(adev);
1295 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1296 amdgpu_gfx_kiq_fini(adev);
1297
1298 gfx_v10_0_pfp_fini(adev);
1299 gfx_v10_0_ce_fini(adev);
1300 gfx_v10_0_me_fini(adev);
1301 gfx_v10_0_rlc_fini(adev);
1302 gfx_v10_0_mec_fini(adev);
1303
1304 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1305 gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
1306
1307 gfx_v10_0_free_microcode(adev);
1308
1309 return 0;
1310}
1311
1312
1313static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
1314{
1315
1316}
1317
1318static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1319 u32 sh_num, u32 instance)
1320{
1321 u32 data;
1322
1323 if (instance == 0xffffffff)
1324 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1325 INSTANCE_BROADCAST_WRITES, 1);
1326 else
1327 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1328 instance);
1329
1330 if (se_num == 0xffffffff)
1331 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1332 1);
1333 else
1334 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1335
1336 if (sh_num == 0xffffffff)
1337 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1338 1);
1339 else
1340 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1341
1342 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1343}
1344
1345static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1346{
1347 u32 data, mask;
1348
1349 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1350 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1351
1352 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1353 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1354
1355 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1356 adev->gfx.config.max_sh_per_se);
1357
1358 return (~data) & mask;
1359}
1360
1361static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
1362{
1363 int i, j;
1364 u32 data;
1365 u32 active_rbs = 0;
1366 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1367 adev->gfx.config.max_sh_per_se;
1368
1369 mutex_lock(&adev->grbm_idx_mutex);
1370 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1371 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1372 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1373 data = gfx_v10_0_get_rb_active_bitmap(adev);
1374 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1375 rb_bitmap_width_per_sh);
1376 }
1377 }
1378 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1379 mutex_unlock(&adev->grbm_idx_mutex);
1380
1381 adev->gfx.config.backend_enable_mask = active_rbs;
1382 adev->gfx.config.num_rbs = hweight32(active_rbs);
1383}
1384
1385static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev)
1386{
1387 uint32_t num_sc;
1388 uint32_t enabled_rb_per_sh;
1389 uint32_t active_rb_bitmap;
1390 uint32_t num_rb_per_sc;
1391 uint32_t num_packer_per_sc;
1392 uint32_t pa_sc_tile_steering_override;
1393
1394
1395 num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
1396 adev->gfx.config.num_sc_per_sh;
1397
1398 active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev);
1399 enabled_rb_per_sh = hweight32(active_rb_bitmap);
1400 num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh;
1401
1402 num_packer_per_sc = adev->gfx.config.num_packer_per_sc;
1403
1404 pa_sc_tile_steering_override = 0;
1405 pa_sc_tile_steering_override |=
1406 (order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) &
1407 PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK;
1408 pa_sc_tile_steering_override |=
1409 (order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) &
1410 PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK;
1411 pa_sc_tile_steering_override |=
1412 (order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) &
1413 PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK;
1414
1415 return pa_sc_tile_steering_override;
1416}
1417
1418#define DEFAULT_SH_MEM_BASES (0x6000)
1419#define FIRST_COMPUTE_VMID (8)
1420#define LAST_COMPUTE_VMID (16)
1421
1422static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1423{
1424 int i;
1425 uint32_t sh_mem_bases;
1426
1427
1428
1429
1430
1431
1432
1433 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1434
1435 mutex_lock(&adev->srbm_mutex);
1436 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1437 nv_grbm_select(adev, 0, 0, 0, i);
1438
1439 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1440 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1441 }
1442 nv_grbm_select(adev, 0, 0, 0, 0);
1443 mutex_unlock(&adev->srbm_mutex);
1444
1445
1446
1447 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1448 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1449 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1450 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1451 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1452 }
1453}
1454
1455static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1456{
1457 int i, j, k;
1458 int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1;
1459 u32 tmp, wgp_active_bitmap = 0;
1460 u32 gcrd_targets_disable_tcp = 0;
1461 u32 utcl_invreq_disable = 0;
1462
1463
1464
1465
1466 u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
1467 2 * max_wgp_per_sh +
1468 max_wgp_per_sh +
1469 4);
1470
1471
1472
1473
1474 u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
1475 2 * max_wgp_per_sh +
1476 2 * max_wgp_per_sh +
1477 4 +
1478 1);
1479
1480 if (adev->asic_type == CHIP_NAVI10) {
1481 mutex_lock(&adev->grbm_idx_mutex);
1482 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1483 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1484 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1485 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
1486
1487
1488
1489
1490 gcrd_targets_disable_tcp = 0;
1491
1492 utcl_invreq_disable = 0;
1493
1494 for (k = 0; k < max_wgp_per_sh; k++) {
1495 if (!(wgp_active_bitmap & (1 << k))) {
1496 gcrd_targets_disable_tcp |= 3 << (2 * k);
1497 utcl_invreq_disable |= (3 << (2 * k)) |
1498 (3 << (2 * (max_wgp_per_sh + k)));
1499 }
1500 }
1501
1502 tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
1503
1504 tmp &= 0xffffffff << (4 * max_wgp_per_sh);
1505 tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
1506 WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
1507
1508 tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
1509
1510 tmp &= 0xffffffff << (2 * max_wgp_per_sh);
1511 tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
1512 WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
1513 }
1514 }
1515
1516 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1517 mutex_unlock(&adev->grbm_idx_mutex);
1518 }
1519}
1520
1521static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1522{
1523 u32 tmp;
1524 int i;
1525
1526 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1527
1528 gfx_v10_0_tiling_mode_table_init(adev);
1529
1530 gfx_v10_0_setup_rb(adev);
1531 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1532 adev->gfx.config.pa_sc_tile_steering_override =
1533 gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1534
1535
1536
1537 mutex_lock(&adev->srbm_mutex);
1538 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1539 nv_grbm_select(adev, 0, 0, 0, i);
1540
1541 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1542 if (i != 0) {
1543 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1544 (adev->gmc.private_aperture_start >> 48));
1545 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1546 (adev->gmc.shared_aperture_start >> 48));
1547 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1548 }
1549 }
1550 nv_grbm_select(adev, 0, 0, 0, 0);
1551
1552 mutex_unlock(&adev->srbm_mutex);
1553
1554 gfx_v10_0_init_compute_vmid(adev);
1555
1556}
1557
1558static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1559 bool enable)
1560{
1561 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1562
1563 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1564 enable ? 1 : 0);
1565 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1566 enable ? 1 : 0);
1567 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1568 enable ? 1 : 0);
1569 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1570 enable ? 1 : 0);
1571
1572 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1573}
1574
1575static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
1576{
1577
1578 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
1579 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1580 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
1581 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1582 WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1583}
1584
1585static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
1586{
1587 gfx_v10_0_init_csb(adev);
1588
1589 amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
1590
1591
1592 return;
1593}
1594
1595void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
1596{
1597 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1598
1599 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1600 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1601}
1602
1603static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev)
1604{
1605 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1606 udelay(50);
1607 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1608 udelay(50);
1609}
1610
1611static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1612 bool enable)
1613{
1614 uint32_t rlc_pg_cntl;
1615
1616 rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
1617
1618 if (!enable) {
1619
1620
1621
1622
1623
1624
1625
1626
1627 rlc_pg_cntl |= 0x80000;
1628 } else
1629 rlc_pg_cntl &= ~0x80000;
1630 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
1631}
1632
1633static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
1634{
1635
1636
1637 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1638 gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
1639
1640 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1641 udelay(50);
1642}
1643
1644static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
1645{
1646 uint32_t tmp;
1647
1648
1649 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1650 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1651 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1652 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1653}
1654
1655static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
1656{
1657 const struct rlc_firmware_header_v2_0 *hdr;
1658 const __le32 *fw_data;
1659 unsigned i, fw_size;
1660
1661 if (!adev->gfx.rlc_fw)
1662 return -EINVAL;
1663
1664 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1665 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1666
1667 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1668 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1669 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1670
1671 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1672 RLCG_UCODE_LOADING_START_ADDRESS);
1673
1674 for (i = 0; i < fw_size; i++)
1675 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA,
1676 le32_to_cpup(fw_data++));
1677
1678 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1679
1680 return 0;
1681}
1682
1683static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
1684{
1685 int r;
1686
1687 if (amdgpu_sriov_vf(adev))
1688 return 0;
1689
1690 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1691 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1692 if (r)
1693 return r;
1694 gfx_v10_0_init_pg(adev);
1695
1696
1697 gfx_v10_0_rlc_enable_srm(adev);
1698
1699 } else {
1700 adev->gfx.rlc.funcs->stop(adev);
1701
1702
1703 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1704
1705
1706 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1707
1708 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1709
1710 r = gfx_v10_0_rlc_load_microcode(adev);
1711 if (r)
1712 return r;
1713 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1714
1715 r = gfx_v10_0_rlc_backdoor_autoload_enable(adev);
1716 if (r)
1717 return r;
1718 }
1719
1720 gfx_v10_0_init_pg(adev);
1721 adev->gfx.rlc.funcs->start(adev);
1722
1723 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1724 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1725 if (r)
1726 return r;
1727 }
1728 }
1729 return 0;
1730}
1731
1732static struct {
1733 FIRMWARE_ID id;
1734 unsigned int offset;
1735 unsigned int size;
1736} rlc_autoload_info[FIRMWARE_ID_MAX];
1737
1738static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
1739{
1740 int ret;
1741 RLC_TABLE_OF_CONTENT *rlc_toc;
1742
1743 ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
1744 AMDGPU_GEM_DOMAIN_GTT,
1745 &adev->gfx.rlc.rlc_toc_bo,
1746 &adev->gfx.rlc.rlc_toc_gpu_addr,
1747 (void **)&adev->gfx.rlc.rlc_toc_buf);
1748 if (ret) {
1749 dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret);
1750 return ret;
1751 }
1752
1753
1754 memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
1755
1756 rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
1757 while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
1758 (rlc_toc->id < FIRMWARE_ID_MAX)) {
1759 if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) &&
1760 (rlc_toc->id <= FIRMWARE_ID_CP_MES)) {
1761
1762 rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
1763 }
1764
1765 rlc_autoload_info[rlc_toc->id].id = rlc_toc->id;
1766 rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4;
1767 rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
1768
1769 rlc_toc++;
1770 };
1771
1772 return 0;
1773}
1774
1775static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev)
1776{
1777 uint32_t total_size = 0;
1778 FIRMWARE_ID id;
1779 int ret;
1780
1781 ret = gfx_v10_0_parse_rlc_toc(adev);
1782 if (ret) {
1783 dev_err(adev->dev, "failed to parse rlc toc\n");
1784 return 0;
1785 }
1786
1787 for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++)
1788 total_size += rlc_autoload_info[id].size;
1789
1790
1791 if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset)
1792 total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset +
1793 rlc_autoload_info[FIRMWARE_ID_MAX-1].size;
1794
1795 return total_size;
1796}
1797
1798static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev)
1799{
1800 int r;
1801 uint32_t total_size;
1802
1803 total_size = gfx_v10_0_calc_toc_total_size(adev);
1804
1805 r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
1806 AMDGPU_GEM_DOMAIN_GTT,
1807 &adev->gfx.rlc.rlc_autoload_bo,
1808 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1809 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1810 if (r) {
1811 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1812 return r;
1813 }
1814
1815 return 0;
1816}
1817
1818static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev)
1819{
1820 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo,
1821 &adev->gfx.rlc.rlc_toc_gpu_addr,
1822 (void **)&adev->gfx.rlc.rlc_toc_buf);
1823 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1824 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1825 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1826}
1827
1828static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1829 FIRMWARE_ID id,
1830 const void *fw_data,
1831 uint32_t fw_size)
1832{
1833 uint32_t toc_offset;
1834 uint32_t toc_fw_size;
1835 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1836
1837 if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX)
1838 return;
1839
1840 toc_offset = rlc_autoload_info[id].offset;
1841 toc_fw_size = rlc_autoload_info[id].size;
1842
1843 if (fw_size == 0)
1844 fw_size = toc_fw_size;
1845
1846 if (fw_size > toc_fw_size)
1847 fw_size = toc_fw_size;
1848
1849 memcpy(ptr + toc_offset, fw_data, fw_size);
1850
1851 if (fw_size < toc_fw_size)
1852 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1853}
1854
1855static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
1856{
1857 void *data;
1858 uint32_t size;
1859
1860 data = adev->gfx.rlc.rlc_toc_buf;
1861 size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size;
1862
1863 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1864 FIRMWARE_ID_RLC_TOC,
1865 data, size);
1866}
1867
1868static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
1869{
1870 const __le32 *fw_data;
1871 uint32_t fw_size;
1872 const struct gfx_firmware_header_v1_0 *cp_hdr;
1873 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1874
1875
1876 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1877 adev->gfx.pfp_fw->data;
1878 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1879 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1880 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1881 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1882 FIRMWARE_ID_CP_PFP,
1883 fw_data, fw_size);
1884
1885
1886 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1887 adev->gfx.ce_fw->data;
1888 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
1889 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1890 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1891 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1892 FIRMWARE_ID_CP_CE,
1893 fw_data, fw_size);
1894
1895
1896 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1897 adev->gfx.me_fw->data;
1898 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1899 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1900 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1901 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1902 FIRMWARE_ID_CP_ME,
1903 fw_data, fw_size);
1904
1905
1906 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1907 adev->gfx.rlc_fw->data;
1908 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1909 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1910 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1911 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1912 FIRMWARE_ID_RLC_G_UCODE,
1913 fw_data, fw_size);
1914
1915
1916 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1917 adev->gfx.mec_fw->data;
1918 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1919 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1920 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1921 cp_hdr->jt_size * 4;
1922 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1923 FIRMWARE_ID_CP_MEC,
1924 fw_data, fw_size);
1925
1926}
1927
1928
1929static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1930{
1931 const __le32 *fw_data;
1932 uint32_t fw_size;
1933 const struct sdma_firmware_header_v1_0 *sdma_hdr;
1934 int i;
1935
1936 for (i = 0; i < adev->sdma.num_instances; i++) {
1937 sdma_hdr = (const struct sdma_firmware_header_v1_0 *)
1938 adev->sdma.instance[i].fw->data;
1939 fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
1940 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1941 fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes);
1942
1943 if (i == 0) {
1944 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1945 FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size);
1946 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1947 FIRMWARE_ID_SDMA0_JT,
1948 (uint32_t *)fw_data +
1949 sdma_hdr->jt_offset,
1950 sdma_hdr->jt_size * 4);
1951 } else if (i == 1) {
1952 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1953 FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size);
1954 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1955 FIRMWARE_ID_SDMA1_JT,
1956 (uint32_t *)fw_data +
1957 sdma_hdr->jt_offset,
1958 sdma_hdr->jt_size * 4);
1959 }
1960 }
1961}
1962
1963static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1964{
1965 uint32_t rlc_g_offset, rlc_g_size, tmp;
1966 uint64_t gpu_addr;
1967
1968 gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
1969 gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1970 gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1971
1972 rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset;
1973 rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size;
1974 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1975
1976 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr));
1977 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr));
1978 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size);
1979
1980 tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR);
1981 if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK |
1982 RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) {
1983 DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n");
1984 return -EINVAL;
1985 }
1986
1987 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1988 if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) {
1989 DRM_ERROR("RLC ROM should halt itself\n");
1990 return -EINVAL;
1991 }
1992
1993 return 0;
1994}
1995
1996static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev)
1997{
1998 uint32_t usec_timeout = 50000;
1999 uint32_t tmp;
2000 int i;
2001 uint64_t addr;
2002
2003
2004 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2005 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2006 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2007
2008
2009 for (i = 0; i < usec_timeout; i++) {
2010 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2011 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2012 INVALIDATE_CACHE_COMPLETE))
2013 break;
2014 udelay(1);
2015 }
2016
2017 if (i >= usec_timeout) {
2018 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2019 return -EINVAL;
2020 }
2021
2022
2023 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2024 rlc_autoload_info[FIRMWARE_ID_CP_ME].offset;
2025 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2026 lower_32_bits(addr) & 0xFFFFF000);
2027 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2028 upper_32_bits(addr));
2029
2030 return 0;
2031}
2032
2033static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev)
2034{
2035 uint32_t usec_timeout = 50000;
2036 uint32_t tmp;
2037 int i;
2038 uint64_t addr;
2039
2040
2041 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2042 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2043 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2044
2045
2046 for (i = 0; i < usec_timeout; i++) {
2047 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2048 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2049 INVALIDATE_CACHE_COMPLETE))
2050 break;
2051 udelay(1);
2052 }
2053
2054 if (i >= usec_timeout) {
2055 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2056 return -EINVAL;
2057 }
2058
2059
2060 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2061 rlc_autoload_info[FIRMWARE_ID_CP_CE].offset;
2062 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2063 lower_32_bits(addr) & 0xFFFFF000);
2064 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2065 upper_32_bits(addr));
2066
2067 return 0;
2068}
2069
2070static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev)
2071{
2072 uint32_t usec_timeout = 50000;
2073 uint32_t tmp;
2074 int i;
2075 uint64_t addr;
2076
2077
2078 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2079 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2080 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2081
2082
2083 for (i = 0; i < usec_timeout; i++) {
2084 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2085 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2086 INVALIDATE_CACHE_COMPLETE))
2087 break;
2088 udelay(1);
2089 }
2090
2091 if (i >= usec_timeout) {
2092 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2093 return -EINVAL;
2094 }
2095
2096
2097 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2098 rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset;
2099 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2100 lower_32_bits(addr) & 0xFFFFF000);
2101 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2102 upper_32_bits(addr));
2103
2104 return 0;
2105}
2106
2107static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev)
2108{
2109 uint32_t usec_timeout = 50000;
2110 uint32_t tmp;
2111 int i;
2112 uint64_t addr;
2113
2114
2115 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2116 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2117 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2118
2119
2120 for (i = 0; i < usec_timeout; i++) {
2121 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2122 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2123 INVALIDATE_CACHE_COMPLETE))
2124 break;
2125 udelay(1);
2126 }
2127
2128 if (i >= usec_timeout) {
2129 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2130 return -EINVAL;
2131 }
2132
2133
2134 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2135 rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset;
2136 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2137 lower_32_bits(addr) & 0xFFFFF000);
2138 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2139 upper_32_bits(addr));
2140
2141 return 0;
2142}
2143
2144static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2145{
2146 uint32_t cp_status;
2147 uint32_t bootload_status;
2148 int i, r;
2149
2150 for (i = 0; i < adev->usec_timeout; i++) {
2151 cp_status = RREG32_SOC15(GC, 0, mmCP_STAT);
2152 bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS);
2153 if ((cp_status == 0) &&
2154 (REG_GET_FIELD(bootload_status,
2155 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2156 break;
2157 }
2158 udelay(1);
2159 }
2160
2161 if (i >= adev->usec_timeout) {
2162 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2163 return -ETIMEDOUT;
2164 }
2165
2166 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2167 r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev);
2168 if (r)
2169 return r;
2170
2171 r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev);
2172 if (r)
2173 return r;
2174
2175 r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev);
2176 if (r)
2177 return r;
2178
2179 r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev);
2180 if (r)
2181 return r;
2182 }
2183
2184 return 0;
2185}
2186
2187static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2188{
2189 int i;
2190 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2191
2192 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2193 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2194 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2195 if (!enable) {
2196 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2197 adev->gfx.gfx_ring[i].sched.ready = false;
2198 }
2199 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2200 udelay(50);
2201}
2202
2203static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2204{
2205 int r;
2206 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2207 const __le32 *fw_data;
2208 unsigned i, fw_size;
2209 uint32_t tmp;
2210 uint32_t usec_timeout = 50000;
2211
2212 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2213 adev->gfx.pfp_fw->data;
2214
2215 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2216
2217 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2218 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2219 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2220
2221 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2222 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2223 &adev->gfx.pfp.pfp_fw_obj,
2224 &adev->gfx.pfp.pfp_fw_gpu_addr,
2225 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2226 if (r) {
2227 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2228 gfx_v10_0_pfp_fini(adev);
2229 return r;
2230 }
2231
2232 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2233
2234 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2235 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2236
2237
2238 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2239 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2240 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2241
2242
2243 for (i = 0; i < usec_timeout; i++) {
2244 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2245 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2246 INVALIDATE_CACHE_COMPLETE))
2247 break;
2248 udelay(1);
2249 }
2250
2251 if (i >= usec_timeout) {
2252 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2253 return -EINVAL;
2254 }
2255
2256 if (amdgpu_emu_mode == 1)
2257 adev->nbio_funcs->hdp_flush(adev, NULL);
2258
2259 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
2260 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2261 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2262 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2263 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2264 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp);
2265 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2266 adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000);
2267 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2268 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2269
2270 return 0;
2271}
2272
2273static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
2274{
2275 int r;
2276 const struct gfx_firmware_header_v1_0 *ce_hdr;
2277 const __le32 *fw_data;
2278 unsigned i, fw_size;
2279 uint32_t tmp;
2280 uint32_t usec_timeout = 50000;
2281
2282 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2283 adev->gfx.ce_fw->data;
2284
2285 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2286
2287 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2288 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2289 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes);
2290
2291 r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes,
2292 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2293 &adev->gfx.ce.ce_fw_obj,
2294 &adev->gfx.ce.ce_fw_gpu_addr,
2295 (void **)&adev->gfx.ce.ce_fw_ptr);
2296 if (r) {
2297 dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r);
2298 gfx_v10_0_ce_fini(adev);
2299 return r;
2300 }
2301
2302 memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
2303
2304 amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
2305 amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
2306
2307
2308 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2309 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2310 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2311
2312
2313 for (i = 0; i < usec_timeout; i++) {
2314 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2315 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2316 INVALIDATE_CACHE_COMPLETE))
2317 break;
2318 udelay(1);
2319 }
2320
2321 if (i >= usec_timeout) {
2322 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2323 return -EINVAL;
2324 }
2325
2326 if (amdgpu_emu_mode == 1)
2327 adev->nbio_funcs->hdp_flush(adev, NULL);
2328
2329 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
2330 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
2331 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0);
2332 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0);
2333 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2334 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2335 adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
2336 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2337 upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
2338
2339 return 0;
2340}
2341
2342static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2343{
2344 int r;
2345 const struct gfx_firmware_header_v1_0 *me_hdr;
2346 const __le32 *fw_data;
2347 unsigned i, fw_size;
2348 uint32_t tmp;
2349 uint32_t usec_timeout = 50000;
2350
2351 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2352 adev->gfx.me_fw->data;
2353
2354 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2355
2356 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2357 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2358 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2359
2360 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2361 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2362 &adev->gfx.me.me_fw_obj,
2363 &adev->gfx.me.me_fw_gpu_addr,
2364 (void **)&adev->gfx.me.me_fw_ptr);
2365 if (r) {
2366 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2367 gfx_v10_0_me_fini(adev);
2368 return r;
2369 }
2370
2371 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2372
2373 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2374 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2375
2376
2377 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2378 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2379 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2380
2381
2382 for (i = 0; i < usec_timeout; i++) {
2383 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2384 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2385 INVALIDATE_CACHE_COMPLETE))
2386 break;
2387 udelay(1);
2388 }
2389
2390 if (i >= usec_timeout) {
2391 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2392 return -EINVAL;
2393 }
2394
2395 if (amdgpu_emu_mode == 1)
2396 adev->nbio_funcs->hdp_flush(adev, NULL);
2397
2398 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
2399 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2400 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2401 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2402 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2403 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2404 adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000);
2405 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2406 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2407
2408 return 0;
2409}
2410
2411static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2412{
2413 int r;
2414
2415 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2416 return -EINVAL;
2417
2418 gfx_v10_0_cp_gfx_enable(adev, false);
2419
2420 r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev);
2421 if (r) {
2422 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2423 return r;
2424 }
2425
2426 r = gfx_v10_0_cp_gfx_load_ce_microcode(adev);
2427 if (r) {
2428 dev_err(adev->dev, "(%d) failed to load ce fw\n", r);
2429 return r;
2430 }
2431
2432 r = gfx_v10_0_cp_gfx_load_me_microcode(adev);
2433 if (r) {
2434 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2435 return r;
2436 }
2437
2438 return 0;
2439}
2440
2441static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
2442{
2443 struct amdgpu_ring *ring;
2444 const struct cs_section_def *sect = NULL;
2445 const struct cs_extent_def *ext = NULL;
2446 int r, i;
2447 int ctx_reg_offset;
2448
2449
2450 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT,
2451 adev->gfx.config.max_hw_contexts - 1);
2452 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2453
2454 gfx_v10_0_cp_gfx_enable(adev, true);
2455
2456 ring = &adev->gfx.gfx_ring[0];
2457 r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
2458 if (r) {
2459 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2460 return r;
2461 }
2462
2463 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2464 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2465
2466 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2467 amdgpu_ring_write(ring, 0x80000000);
2468 amdgpu_ring_write(ring, 0x80000000);
2469
2470 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
2471 for (ext = sect->section; ext->extent != NULL; ++ext) {
2472 if (sect->id == SECT_CONTEXT) {
2473 amdgpu_ring_write(ring,
2474 PACKET3(PACKET3_SET_CONTEXT_REG,
2475 ext->reg_count));
2476 amdgpu_ring_write(ring, ext->reg_index -
2477 PACKET3_SET_CONTEXT_REG_START);
2478 for (i = 0; i < ext->reg_count; i++)
2479 amdgpu_ring_write(ring, ext->extent[i]);
2480 }
2481 }
2482 }
2483
2484 ctx_reg_offset =
2485 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
2486 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2487 amdgpu_ring_write(ring, ctx_reg_offset);
2488 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
2489
2490 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2491 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2492
2493 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2494 amdgpu_ring_write(ring, 0);
2495
2496 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2497 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2498 amdgpu_ring_write(ring, 0x8000);
2499 amdgpu_ring_write(ring, 0x8000);
2500
2501 amdgpu_ring_commit(ring);
2502
2503
2504 ring = &adev->gfx.gfx_ring[1];
2505 r = amdgpu_ring_alloc(ring, 2);
2506 if (r) {
2507 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2508 return r;
2509 }
2510
2511 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2512 amdgpu_ring_write(ring, 0);
2513
2514 amdgpu_ring_commit(ring);
2515
2516 return 0;
2517}
2518
2519static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2520 CP_PIPE_ID pipe)
2521{
2522 u32 tmp;
2523
2524 tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL);
2525 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2526
2527 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp);
2528}
2529
2530static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2531 struct amdgpu_ring *ring)
2532{
2533 u32 tmp;
2534
2535 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2536 if (ring->use_doorbell) {
2537 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2538 DOORBELL_OFFSET, ring->doorbell_index);
2539 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2540 DOORBELL_EN, 1);
2541 } else {
2542 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2543 DOORBELL_EN, 0);
2544 }
2545 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2546 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2547 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2548 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2549
2550 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2551 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2552}
2553
2554static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
2555{
2556 struct amdgpu_ring *ring;
2557 u32 tmp;
2558 u32 rb_bufsz;
2559 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2560 u32 i;
2561
2562
2563 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2564
2565
2566 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2567
2568
2569 mutex_lock(&adev->srbm_mutex);
2570 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2571 mutex_unlock(&adev->srbm_mutex);
2572
2573 ring = &adev->gfx.gfx_ring[0];
2574 rb_bufsz = order_base_2(ring->ring_size / 8);
2575 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2576 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2577#ifdef __BIG_ENDIAN
2578 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2579#endif
2580 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2581
2582
2583 ring->wptr = 0;
2584 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2585 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2586
2587
2588 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2589 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2590 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2591 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2592
2593 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2594 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2595 lower_32_bits(wptr_gpu_addr));
2596 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2597 upper_32_bits(wptr_gpu_addr));
2598
2599 mdelay(1);
2600 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2601
2602 rb_addr = ring->gpu_addr >> 8;
2603 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2604 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2605
2606 WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
2607
2608 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2609
2610
2611 mutex_lock(&adev->srbm_mutex);
2612 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
2613 mutex_unlock(&adev->srbm_mutex);
2614 ring = &adev->gfx.gfx_ring[1];
2615 rb_bufsz = order_base_2(ring->ring_size / 8);
2616 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
2617 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
2618 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2619
2620 ring->wptr = 0;
2621 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2622 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
2623
2624 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2625 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2626 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2627 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2628 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2629 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2630 lower_32_bits(wptr_gpu_addr));
2631 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2632 upper_32_bits(wptr_gpu_addr));
2633
2634 mdelay(1);
2635 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2636
2637 rb_addr = ring->gpu_addr >> 8;
2638 WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
2639 WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
2640 WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
2641
2642 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2643
2644
2645 mutex_lock(&adev->srbm_mutex);
2646 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2647 mutex_unlock(&adev->srbm_mutex);
2648
2649
2650 gfx_v10_0_cp_gfx_start(adev);
2651
2652 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2653 ring = &adev->gfx.gfx_ring[i];
2654 ring->sched.ready = true;
2655 }
2656
2657 return 0;
2658}
2659
2660static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2661{
2662 int i;
2663
2664 if (enable) {
2665 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2666 } else {
2667 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2668 (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2669 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2670 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2671 adev->gfx.compute_ring[i].sched.ready = false;
2672 adev->gfx.kiq.ring.sched.ready = false;
2673 }
2674 udelay(50);
2675}
2676
2677static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2678{
2679 const struct gfx_firmware_header_v1_0 *mec_hdr;
2680 const __le32 *fw_data;
2681 unsigned i;
2682 u32 tmp;
2683 u32 usec_timeout = 50000;
2684
2685 if (!adev->gfx.mec_fw)
2686 return -EINVAL;
2687
2688 gfx_v10_0_cp_compute_enable(adev, false);
2689
2690 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2691 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2692
2693 fw_data = (const __le32 *)
2694 (adev->gfx.mec_fw->data +
2695 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2696
2697
2698 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2699 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2700 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2701
2702
2703 for (i = 0; i < usec_timeout; i++) {
2704 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2705 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2706 INVALIDATE_CACHE_COMPLETE))
2707 break;
2708 udelay(1);
2709 }
2710
2711 if (i >= usec_timeout) {
2712 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2713 return -EINVAL;
2714 }
2715
2716 if (amdgpu_emu_mode == 1)
2717 adev->nbio_funcs->hdp_flush(adev, NULL);
2718
2719 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
2720 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2721 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2722 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2723 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2724
2725 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr &
2726 0xFFFFF000);
2727 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2728 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2729
2730
2731 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0);
2732
2733 for (i = 0; i < mec_hdr->jt_size; i++)
2734 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2735 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2736
2737 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
2738
2739
2740
2741
2742
2743
2744 return 0;
2745}
2746
2747static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
2748{
2749 uint32_t tmp;
2750 struct amdgpu_device *adev = ring->adev;
2751
2752
2753 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2754 tmp &= 0xffffff00;
2755 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2756 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2757 tmp |= 0x80;
2758 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2759}
2760
2761static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
2762{
2763 struct amdgpu_device *adev = ring->adev;
2764 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2765 uint64_t hqd_gpu_addr, wb_gpu_addr;
2766 uint32_t tmp;
2767 uint32_t rb_bufsz;
2768
2769
2770 mqd->cp_gfx_hqd_wptr = 0;
2771 mqd->cp_gfx_hqd_wptr_hi = 0;
2772
2773
2774 mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
2775 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2776
2777
2778 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL);
2779 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
2780 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
2781 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
2782 mqd->cp_gfx_mqd_control = tmp;
2783
2784
2785 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID);
2786 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
2787 mqd->cp_gfx_hqd_vmid = 0;
2788
2789
2790
2791 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
2792 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
2793 mqd->cp_gfx_hqd_queue_priority = tmp;
2794
2795
2796 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);
2797 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
2798 mqd->cp_gfx_hqd_quantum = tmp;
2799
2800
2801 hqd_gpu_addr = ring->gpu_addr >> 8;
2802 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
2803 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
2804
2805
2806 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2807 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
2808 mqd->cp_gfx_hqd_rptr_addr_hi =
2809 upper_32_bits(wb_gpu_addr) & 0xffff;
2810
2811
2812 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2813 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2814 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2815
2816
2817 rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
2818 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL);
2819 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
2820 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
2821#ifdef __BIG_ENDIAN
2822 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
2823#endif
2824 mqd->cp_gfx_hqd_cntl = tmp;
2825
2826
2827 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2828 if (ring->use_doorbell) {
2829 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2830 DOORBELL_OFFSET, ring->doorbell_index);
2831 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2832 DOORBELL_EN, 1);
2833 } else
2834 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2835 DOORBELL_EN, 0);
2836 mqd->cp_rb_doorbell_control = tmp;
2837
2838
2839 ring->wptr = 0;
2840 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR);
2841
2842
2843 mqd->cp_gfx_hqd_active = 1;
2844
2845 return 0;
2846}
2847
2848#ifdef BRING_UP_DEBUG
2849static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
2850{
2851 struct amdgpu_device *adev = ring->adev;
2852 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2853
2854
2855 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
2856 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
2857
2858
2859 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
2860 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
2861
2862
2863 WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
2864
2865
2866 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
2867
2868 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
2869 mqd->cp_gfx_hqd_queue_priority);
2870 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
2871
2872
2873 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
2874 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
2875
2876
2877 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
2878 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
2879
2880
2881 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
2882
2883
2884 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
2885 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
2886
2887
2888 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
2889
2890
2891 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
2892
2893 return 0;
2894}
2895#endif
2896
2897static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
2898{
2899 struct amdgpu_device *adev = ring->adev;
2900 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2901
2902 if (!adev->in_gpu_reset && !adev->in_suspend) {
2903 memset((void *)mqd, 0, sizeof(*mqd));
2904 mutex_lock(&adev->srbm_mutex);
2905 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2906 gfx_v10_0_gfx_mqd_init(ring);
2907#ifdef BRING_UP_DEBUG
2908 gfx_v10_0_gfx_queue_init_register(ring);
2909#endif
2910 nv_grbm_select(adev, 0, 0, 0, 0);
2911 mutex_unlock(&adev->srbm_mutex);
2912 if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
2913 memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
2914 } else if (adev->in_gpu_reset) {
2915
2916 if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
2917 memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
2918
2919 ring->wptr = 0;
2920 amdgpu_ring_clear_ring(ring);
2921#ifdef BRING_UP_DEBUG
2922 mutex_lock(&adev->srbm_mutex);
2923 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2924 gfx_v10_0_gfx_queue_init_register(ring);
2925 nv_grbm_select(adev, 0, 0, 0, 0);
2926 mutex_unlock(&adev->srbm_mutex);
2927#endif
2928 } else {
2929 amdgpu_ring_clear_ring(ring);
2930 }
2931
2932 return 0;
2933}
2934
2935#ifndef BRING_UP_DEBUG
2936static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
2937{
2938 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
2939 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2940 int r, i;
2941
2942 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
2943 return -EINVAL;
2944
2945 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
2946 adev->gfx.num_gfx_rings);
2947 if (r) {
2948 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2949 return r;
2950 }
2951
2952 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2953 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
2954
2955 r = amdgpu_ring_test_ring(kiq_ring);
2956 if (r) {
2957 DRM_ERROR("kfq enable failed\n");
2958 kiq_ring->sched.ready = false;
2959 }
2960 return r;
2961}
2962#endif
2963
2964static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
2965{
2966 int r, i;
2967 struct amdgpu_ring *ring;
2968
2969 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2970 ring = &adev->gfx.gfx_ring[i];
2971
2972 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2973 if (unlikely(r != 0))
2974 goto done;
2975
2976 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2977 if (!r) {
2978 r = gfx_v10_0_gfx_init_queue(ring);
2979 amdgpu_bo_kunmap(ring->mqd_obj);
2980 ring->mqd_ptr = NULL;
2981 }
2982 amdgpu_bo_unreserve(ring->mqd_obj);
2983 if (r)
2984 goto done;
2985 }
2986#ifndef BRING_UP_DEBUG
2987 r = gfx_v10_0_kiq_enable_kgq(adev);
2988 if (r)
2989 goto done;
2990#endif
2991 r = gfx_v10_0_cp_gfx_start(adev);
2992 if (r)
2993 goto done;
2994
2995 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2996 ring = &adev->gfx.gfx_ring[i];
2997 ring->sched.ready = true;
2998 }
2999done:
3000 return r;
3001}
3002
3003static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
3004{
3005 struct amdgpu_device *adev = ring->adev;
3006 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3007 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3008 uint32_t tmp;
3009
3010 mqd->header = 0xC0310800;
3011 mqd->compute_pipelinestat_enable = 0x00000001;
3012 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3013 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3014 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3015 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3016 mqd->compute_misc_reserved = 0x00000003;
3017
3018 eop_base_addr = ring->eop_gpu_addr >> 8;
3019 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3020 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3021
3022
3023 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3024 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3025 (order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1));
3026
3027 mqd->cp_hqd_eop_control = tmp;
3028
3029
3030 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3031
3032 if (ring->use_doorbell) {
3033 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3034 DOORBELL_OFFSET, ring->doorbell_index);
3035 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3036 DOORBELL_EN, 1);
3037 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3038 DOORBELL_SOURCE, 0);
3039 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3040 DOORBELL_HIT, 0);
3041 } else {
3042 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3043 DOORBELL_EN, 0);
3044 }
3045
3046 mqd->cp_hqd_pq_doorbell_control = tmp;
3047
3048
3049 ring->wptr = 0;
3050 mqd->cp_hqd_dequeue_request = 0;
3051 mqd->cp_hqd_pq_rptr = 0;
3052 mqd->cp_hqd_pq_wptr_lo = 0;
3053 mqd->cp_hqd_pq_wptr_hi = 0;
3054
3055
3056 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3057 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3058
3059
3060 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3061 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3062 mqd->cp_mqd_control = tmp;
3063
3064
3065 hqd_gpu_addr = ring->gpu_addr >> 8;
3066 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3067 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3068
3069
3070 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3071 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3072 (order_base_2(ring->ring_size / 4) - 1));
3073 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3074 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3075#ifdef __BIG_ENDIAN
3076 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3077#endif
3078 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3079 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3080 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3081 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3082 mqd->cp_hqd_pq_control = tmp;
3083
3084
3085 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3086 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3087 mqd->cp_hqd_pq_rptr_report_addr_hi =
3088 upper_32_bits(wb_gpu_addr) & 0xffff;
3089
3090
3091 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3092 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3093 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3094
3095 tmp = 0;
3096
3097 if (ring->use_doorbell) {
3098 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3099 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3100 DOORBELL_OFFSET, ring->doorbell_index);
3101
3102 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3103 DOORBELL_EN, 1);
3104 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3105 DOORBELL_SOURCE, 0);
3106 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3107 DOORBELL_HIT, 0);
3108 }
3109
3110 mqd->cp_hqd_pq_doorbell_control = tmp;
3111
3112
3113 ring->wptr = 0;
3114 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3115
3116
3117 mqd->cp_hqd_vmid = 0;
3118
3119 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3120 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3121 mqd->cp_hqd_persistent_state = tmp;
3122
3123
3124 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3125 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3126 mqd->cp_hqd_ib_control = tmp;
3127
3128
3129 mqd->cp_hqd_active = 1;
3130
3131 return 0;
3132}
3133
3134static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
3135{
3136 struct amdgpu_device *adev = ring->adev;
3137 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3138 int j;
3139
3140
3141 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3142
3143
3144 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3145 mqd->cp_hqd_eop_base_addr_lo);
3146 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3147 mqd->cp_hqd_eop_base_addr_hi);
3148
3149
3150 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3151 mqd->cp_hqd_eop_control);
3152
3153
3154 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3155 mqd->cp_hqd_pq_doorbell_control);
3156
3157
3158 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3159 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3160 for (j = 0; j < adev->usec_timeout; j++) {
3161 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3162 break;
3163 udelay(1);
3164 }
3165 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3166 mqd->cp_hqd_dequeue_request);
3167 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3168 mqd->cp_hqd_pq_rptr);
3169 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3170 mqd->cp_hqd_pq_wptr_lo);
3171 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3172 mqd->cp_hqd_pq_wptr_hi);
3173 }
3174
3175
3176 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3177 mqd->cp_mqd_base_addr_lo);
3178 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3179 mqd->cp_mqd_base_addr_hi);
3180
3181
3182 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3183 mqd->cp_mqd_control);
3184
3185
3186 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3187 mqd->cp_hqd_pq_base_lo);
3188 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3189 mqd->cp_hqd_pq_base_hi);
3190
3191
3192 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3193 mqd->cp_hqd_pq_control);
3194
3195
3196 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3197 mqd->cp_hqd_pq_rptr_report_addr_lo);
3198 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3199 mqd->cp_hqd_pq_rptr_report_addr_hi);
3200
3201
3202 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3203 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3204 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3205 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3206
3207
3208 if (ring->use_doorbell) {
3209 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3210 (adev->doorbell_index.kiq * 2) << 2);
3211 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3212 (adev->doorbell_index.userqueue_end * 2) << 2);
3213 }
3214
3215 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3216 mqd->cp_hqd_pq_doorbell_control);
3217
3218
3219 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3220 mqd->cp_hqd_pq_wptr_lo);
3221 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3222 mqd->cp_hqd_pq_wptr_hi);
3223
3224
3225 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3226
3227 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3228 mqd->cp_hqd_persistent_state);
3229
3230
3231 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3232 mqd->cp_hqd_active);
3233
3234 if (ring->use_doorbell)
3235 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3236
3237 return 0;
3238}
3239
3240static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
3241{
3242 struct amdgpu_device *adev = ring->adev;
3243 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3244 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3245
3246 gfx_v10_0_kiq_setting(ring);
3247
3248 if (adev->in_gpu_reset) {
3249
3250 if (adev->gfx.mec.mqd_backup[mqd_idx])
3251 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3252
3253
3254 ring->wptr = 0;
3255 amdgpu_ring_clear_ring(ring);
3256
3257 mutex_lock(&adev->srbm_mutex);
3258 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3259 gfx_v10_0_kiq_init_register(ring);
3260 nv_grbm_select(adev, 0, 0, 0, 0);
3261 mutex_unlock(&adev->srbm_mutex);
3262 } else {
3263 memset((void *)mqd, 0, sizeof(*mqd));
3264 mutex_lock(&adev->srbm_mutex);
3265 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3266 gfx_v10_0_compute_mqd_init(ring);
3267 gfx_v10_0_kiq_init_register(ring);
3268 nv_grbm_select(adev, 0, 0, 0, 0);
3269 mutex_unlock(&adev->srbm_mutex);
3270
3271 if (adev->gfx.mec.mqd_backup[mqd_idx])
3272 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3273 }
3274
3275 return 0;
3276}
3277
3278static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
3279{
3280 struct amdgpu_device *adev = ring->adev;
3281 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3282 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3283
3284 if (!adev->in_gpu_reset && !adev->in_suspend) {
3285 memset((void *)mqd, 0, sizeof(*mqd));
3286 mutex_lock(&adev->srbm_mutex);
3287 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3288 gfx_v10_0_compute_mqd_init(ring);
3289 nv_grbm_select(adev, 0, 0, 0, 0);
3290 mutex_unlock(&adev->srbm_mutex);
3291
3292 if (adev->gfx.mec.mqd_backup[mqd_idx])
3293 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3294 } else if (adev->in_gpu_reset) {
3295
3296 if (adev->gfx.mec.mqd_backup[mqd_idx])
3297 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3298
3299
3300 ring->wptr = 0;
3301 amdgpu_ring_clear_ring(ring);
3302 } else {
3303 amdgpu_ring_clear_ring(ring);
3304 }
3305
3306 return 0;
3307}
3308
3309static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
3310{
3311 struct amdgpu_ring *ring;
3312 int r;
3313
3314 ring = &adev->gfx.kiq.ring;
3315
3316 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3317 if (unlikely(r != 0))
3318 return r;
3319
3320 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3321 if (unlikely(r != 0))
3322 return r;
3323
3324 gfx_v10_0_kiq_init_queue(ring);
3325 amdgpu_bo_kunmap(ring->mqd_obj);
3326 ring->mqd_ptr = NULL;
3327 amdgpu_bo_unreserve(ring->mqd_obj);
3328 ring->sched.ready = true;
3329 return 0;
3330}
3331
3332static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
3333{
3334 struct amdgpu_ring *ring = NULL;
3335 int r = 0, i;
3336
3337 gfx_v10_0_cp_compute_enable(adev, true);
3338
3339 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3340 ring = &adev->gfx.compute_ring[i];
3341
3342 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3343 if (unlikely(r != 0))
3344 goto done;
3345 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3346 if (!r) {
3347 r = gfx_v10_0_kcq_init_queue(ring);
3348 amdgpu_bo_kunmap(ring->mqd_obj);
3349 ring->mqd_ptr = NULL;
3350 }
3351 amdgpu_bo_unreserve(ring->mqd_obj);
3352 if (r)
3353 goto done;
3354 }
3355
3356 r = amdgpu_gfx_enable_kcq(adev);
3357done:
3358 return r;
3359}
3360
3361static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
3362{
3363 int r, i;
3364 struct amdgpu_ring *ring;
3365
3366 if (!(adev->flags & AMD_IS_APU))
3367 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3368
3369 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3370
3371 r = gfx_v10_0_cp_gfx_load_microcode(adev);
3372 if (r)
3373 return r;
3374
3375 r = gfx_v10_0_cp_compute_load_microcode(adev);
3376 if (r)
3377 return r;
3378 }
3379
3380 r = gfx_v10_0_kiq_resume(adev);
3381 if (r)
3382 return r;
3383
3384 r = gfx_v10_0_kcq_resume(adev);
3385 if (r)
3386 return r;
3387
3388 if (!amdgpu_async_gfx_ring) {
3389 r = gfx_v10_0_cp_gfx_resume(adev);
3390 if (r)
3391 return r;
3392 } else {
3393 r = gfx_v10_0_cp_async_gfx_ring_resume(adev);
3394 if (r)
3395 return r;
3396 }
3397
3398 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3399 ring = &adev->gfx.gfx_ring[i];
3400 DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
3401 i, ring->me, ring->pipe, ring->queue);
3402 r = amdgpu_ring_test_ring(ring);
3403 if (r) {
3404 ring->sched.ready = false;
3405 return r;
3406 }
3407 }
3408
3409 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3410 ring = &adev->gfx.compute_ring[i];
3411 ring->sched.ready = true;
3412 DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
3413 i, ring->me, ring->pipe, ring->queue);
3414 r = amdgpu_ring_test_ring(ring);
3415 if (r)
3416 ring->sched.ready = false;
3417 }
3418
3419 return 0;
3420}
3421
3422static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable)
3423{
3424 gfx_v10_0_cp_gfx_enable(adev, enable);
3425 gfx_v10_0_cp_compute_enable(adev, enable);
3426}
3427
3428static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
3429{
3430 uint32_t data, pattern = 0xDEADBEEF;
3431
3432
3433
3434 data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
3435
3436 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
3437
3438 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
3439
3440 if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
3441 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
3442 return true;
3443 } else {
3444 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
3445 return false;
3446 }
3447}
3448
3449static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
3450{
3451 uint32_t data;
3452
3453
3454
3455 WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
3456
3457
3458 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
3459 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3460 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
3461 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3462 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3463 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3464
3465
3466 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
3467 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3468 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
3469 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3470 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3471 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3472
3473
3474 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
3475 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3476 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
3477 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3478 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3479 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3480
3481
3482 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
3483 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3484 (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
3485 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3486 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3487 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3488
3489
3490 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
3491 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3492 (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
3493 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3494 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3495 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3496
3497
3498 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
3499 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3500 (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
3501 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3502 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3503 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3504
3505
3506 data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
3507 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3508 (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
3509 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3510 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3511 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3512}
3513
3514static int gfx_v10_0_hw_init(void *handle)
3515{
3516 int r;
3517 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3518
3519 r = gfx_v10_0_csb_vram_pin(adev);
3520 if (r)
3521 return r;
3522
3523 if (!amdgpu_emu_mode)
3524 gfx_v10_0_init_golden_registers(adev);
3525
3526 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3527
3528
3529
3530
3531
3532 r = smu_load_microcode(&adev->smu);
3533 if (r)
3534 return r;
3535
3536 r = smu_check_fw_status(&adev->smu);
3537 if (r) {
3538 pr_err("SMC firmware status is not correct\n");
3539 return r;
3540 }
3541 }
3542
3543
3544 if (!gfx_v10_0_check_grbm_cam_remapping(adev))
3545 gfx_v10_0_setup_grbm_cam_remapping(adev);
3546
3547 gfx_v10_0_constants_init(adev);
3548
3549 r = gfx_v10_0_rlc_resume(adev);
3550 if (r)
3551 return r;
3552
3553
3554
3555
3556
3557 gfx_v10_0_tcp_harvest(adev);
3558
3559 r = gfx_v10_0_cp_resume(adev);
3560 if (r)
3561 return r;
3562
3563 return r;
3564}
3565
3566#ifndef BRING_UP_DEBUG
3567static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
3568{
3569 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3570 struct amdgpu_ring *kiq_ring = &kiq->ring;
3571 int i;
3572
3573 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3574 return -EINVAL;
3575
3576 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
3577 adev->gfx.num_gfx_rings))
3578 return -ENOMEM;
3579
3580 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3581 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
3582 PREEMPT_QUEUES, 0, 0);
3583
3584 return amdgpu_ring_test_ring(kiq_ring);
3585}
3586#endif
3587
3588static int gfx_v10_0_hw_fini(void *handle)
3589{
3590 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3591 int r;
3592
3593 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3594 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3595#ifndef BRING_UP_DEBUG
3596 if (amdgpu_async_gfx_ring) {
3597 r = gfx_v10_0_kiq_disable_kgq(adev);
3598 if (r)
3599 DRM_ERROR("KGQ disable failed\n");
3600 }
3601#endif
3602 if (amdgpu_gfx_disable_kcq(adev))
3603 DRM_ERROR("KCQ disable failed\n");
3604 if (amdgpu_sriov_vf(adev)) {
3605 pr_debug("For SRIOV client, shouldn't do anything.\n");
3606 return 0;
3607 }
3608 gfx_v10_0_cp_enable(adev, false);
3609 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3610 gfx_v10_0_csb_vram_unpin(adev);
3611
3612 return 0;
3613}
3614
3615static int gfx_v10_0_suspend(void *handle)
3616{
3617 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3618
3619 adev->in_suspend = true;
3620 return gfx_v10_0_hw_fini(adev);
3621}
3622
3623static int gfx_v10_0_resume(void *handle)
3624{
3625 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3626 int r;
3627
3628 r = gfx_v10_0_hw_init(adev);
3629 adev->in_suspend = false;
3630 return r;
3631}
3632
3633static bool gfx_v10_0_is_idle(void *handle)
3634{
3635 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3636
3637 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3638 GRBM_STATUS, GUI_ACTIVE))
3639 return false;
3640 else
3641 return true;
3642}
3643
3644static int gfx_v10_0_wait_for_idle(void *handle)
3645{
3646 unsigned i;
3647 u32 tmp;
3648 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3649
3650 for (i = 0; i < adev->usec_timeout; i++) {
3651
3652 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3653 GRBM_STATUS__GUI_ACTIVE_MASK;
3654
3655 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3656 return 0;
3657 udelay(1);
3658 }
3659 return -ETIMEDOUT;
3660}
3661
3662static int gfx_v10_0_soft_reset(void *handle)
3663{
3664 u32 grbm_soft_reset = 0;
3665 u32 tmp;
3666 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3667
3668
3669 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3670 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3671 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3672 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
3673 GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
3674 GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
3675 | GRBM_STATUS__BCI_BUSY_MASK)) {
3676 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3677 GRBM_SOFT_RESET, SOFT_RESET_CP,
3678 1);
3679 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3680 GRBM_SOFT_RESET, SOFT_RESET_GFX,
3681 1);
3682 }
3683
3684 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3685 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3686 GRBM_SOFT_RESET, SOFT_RESET_CP,
3687 1);
3688 }
3689
3690
3691 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3692 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3693 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3694 GRBM_SOFT_RESET, SOFT_RESET_RLC,
3695 1);
3696
3697 if (grbm_soft_reset) {
3698
3699 gfx_v10_0_rlc_stop(adev);
3700
3701
3702 gfx_v10_0_cp_gfx_enable(adev, false);
3703
3704
3705 gfx_v10_0_cp_compute_enable(adev, false);
3706
3707 if (grbm_soft_reset) {
3708 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3709 tmp |= grbm_soft_reset;
3710 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3711 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3712 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3713
3714 udelay(50);
3715
3716 tmp &= ~grbm_soft_reset;
3717 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3718 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3719 }
3720
3721
3722 udelay(50);
3723 }
3724 return 0;
3725}
3726
3727static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3728{
3729 uint64_t clock;
3730
3731 mutex_lock(&adev->gfx.gpu_clock_mutex);
3732 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3733 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3734 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3735 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3736 return clock;
3737}
3738
3739static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3740 uint32_t vmid,
3741 uint32_t gds_base, uint32_t gds_size,
3742 uint32_t gws_base, uint32_t gws_size,
3743 uint32_t oa_base, uint32_t oa_size)
3744{
3745 struct amdgpu_device *adev = ring->adev;
3746
3747
3748 gfx_v10_0_write_data_to_reg(ring, 0, false,
3749 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3750 gds_base);
3751
3752
3753 gfx_v10_0_write_data_to_reg(ring, 0, false,
3754 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3755 gds_size);
3756
3757
3758 gfx_v10_0_write_data_to_reg(ring, 0, false,
3759 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3760 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3761
3762
3763 gfx_v10_0_write_data_to_reg(ring, 0, false,
3764 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3765 (1 << (oa_size + oa_base)) - (1 << oa_base));
3766}
3767
3768static int gfx_v10_0_early_init(void *handle)
3769{
3770 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3771
3772 adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
3773 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3774
3775 gfx_v10_0_set_kiq_pm4_funcs(adev);
3776 gfx_v10_0_set_ring_funcs(adev);
3777 gfx_v10_0_set_irq_funcs(adev);
3778 gfx_v10_0_set_gds_init(adev);
3779 gfx_v10_0_set_rlc_funcs(adev);
3780
3781 return 0;
3782}
3783
3784static int gfx_v10_0_late_init(void *handle)
3785{
3786 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3787 int r;
3788
3789 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3790 if (r)
3791 return r;
3792
3793 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3794 if (r)
3795 return r;
3796
3797 return 0;
3798}
3799
3800static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
3801{
3802 uint32_t rlc_cntl;
3803
3804
3805 rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3806 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
3807}
3808
3809static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
3810{
3811 uint32_t data;
3812 unsigned i;
3813
3814 data = RLC_SAFE_MODE__CMD_MASK;
3815 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3816 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3817
3818
3819 for (i = 0; i < adev->usec_timeout; i++) {
3820 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3821 break;
3822 udelay(1);
3823 }
3824}
3825
3826static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
3827{
3828 uint32_t data;
3829
3830 data = RLC_SAFE_MODE__CMD_MASK;
3831 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3832}
3833
3834static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3835 bool enable)
3836{
3837 uint32_t data, def;
3838
3839
3840 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3841
3842 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3843 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3844 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3845 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3846
3847
3848 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3849
3850 if (def != data)
3851 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3852
3853
3854 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3855
3856 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3857 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3858 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3859 if (def != data)
3860 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3861 }
3862
3863 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3864 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3865 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3866 if (def != data)
3867 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3868 }
3869 }
3870 } else {
3871
3872 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3873 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3874 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3875 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3876 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3877 if (def != data)
3878 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3879
3880
3881 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3882 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3883 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3884 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3885 }
3886
3887
3888 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3889 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3890 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3891 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3892 }
3893 }
3894}
3895
3896static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
3897 bool enable)
3898{
3899 uint32_t data, def;
3900
3901
3902 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3903
3904 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3905
3906 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3907
3908 if (def != data)
3909 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3910
3911 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3912 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3913 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3914 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3915 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3916 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3917 if (def != data)
3918 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3919
3920
3921 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3922 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3923 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3924 if (def != data)
3925 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3926 } else {
3927
3928 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3929
3930 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3931 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3932
3933 if (def != data)
3934 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3935 }
3936}
3937
3938static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3939 bool enable)
3940{
3941 uint32_t def, data;
3942
3943 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3944 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3945
3946 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3947 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3948 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3949 else
3950 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3951
3952 if (def != data)
3953 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3954
3955
3956 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3957 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3958 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3959 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3960 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3961 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3962 if (def != data)
3963 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3964
3965
3966 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3967 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3968 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3969 if (def != data)
3970 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3971 } else {
3972 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3973
3974 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3975
3976 if (def != data)
3977 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3978 }
3979}
3980
3981static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3982 bool enable)
3983{
3984 amdgpu_gfx_rlc_enter_safe_mode(adev);
3985
3986 if (enable) {
3987
3988
3989
3990 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
3991
3992 gfx_v10_0_update_3d_clock_gating(adev, enable);
3993
3994 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
3995 } else {
3996
3997
3998
3999 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4000
4001 gfx_v10_0_update_3d_clock_gating(adev, enable);
4002
4003 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4004 }
4005
4006 if (adev->cg_flags &
4007 (AMD_CG_SUPPORT_GFX_MGCG |
4008 AMD_CG_SUPPORT_GFX_CGLS |
4009 AMD_CG_SUPPORT_GFX_CGCG |
4010 AMD_CG_SUPPORT_GFX_CGLS |
4011 AMD_CG_SUPPORT_GFX_3D_CGCG |
4012 AMD_CG_SUPPORT_GFX_3D_CGLS))
4013 gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
4014
4015 amdgpu_gfx_rlc_exit_safe_mode(adev);
4016
4017 return 0;
4018}
4019
4020static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
4021 .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
4022 .set_safe_mode = gfx_v10_0_set_safe_mode,
4023 .unset_safe_mode = gfx_v10_0_unset_safe_mode,
4024 .init = gfx_v10_0_rlc_init,
4025 .get_csb_size = gfx_v10_0_get_csb_size,
4026 .get_csb_buffer = gfx_v10_0_get_csb_buffer,
4027 .resume = gfx_v10_0_rlc_resume,
4028 .stop = gfx_v10_0_rlc_stop,
4029 .reset = gfx_v10_0_rlc_reset,
4030 .start = gfx_v10_0_rlc_start
4031};
4032
4033static int gfx_v10_0_set_powergating_state(void *handle,
4034 enum amd_powergating_state state)
4035{
4036 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4037 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
4038 switch (adev->asic_type) {
4039 case CHIP_NAVI10:
4040 if (!enable) {
4041 amdgpu_gfx_off_ctrl(adev, false);
4042 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4043 } else
4044 amdgpu_gfx_off_ctrl(adev, true);
4045 break;
4046 default:
4047 break;
4048 }
4049 return 0;
4050}
4051
4052static int gfx_v10_0_set_clockgating_state(void *handle,
4053 enum amd_clockgating_state state)
4054{
4055 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4056
4057 switch (adev->asic_type) {
4058 case CHIP_NAVI10:
4059 gfx_v10_0_update_gfx_clock_gating(adev,
4060 state == AMD_CG_STATE_GATE ? true : false);
4061 break;
4062 default:
4063 break;
4064 }
4065 return 0;
4066}
4067
4068static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
4069{
4070 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4071 int data;
4072
4073
4074 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4075 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4076 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4077
4078
4079 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4080 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4081 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4082
4083
4084 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4085 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4086
4087
4088 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4089 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4090 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4091
4092
4093 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4094 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4095 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4096
4097
4098 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4099 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4100 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4101
4102
4103 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4104 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4105}
4106
4107static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4108{
4109 return ring->adev->wb.wb[ring->rptr_offs];
4110}
4111
4112static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4113{
4114 struct amdgpu_device *adev = ring->adev;
4115 u64 wptr;
4116
4117
4118 if (ring->use_doorbell) {
4119 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4120 } else {
4121 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4122 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4123 }
4124
4125 return wptr;
4126}
4127
4128static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4129{
4130 struct amdgpu_device *adev = ring->adev;
4131
4132 if (ring->use_doorbell) {
4133
4134 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4135 WDOORBELL64(ring->doorbell_index, ring->wptr);
4136 } else {
4137 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4138 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4139 }
4140}
4141
4142static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4143{
4144 return ring->adev->wb.wb[ring->rptr_offs];
4145}
4146
4147static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4148{
4149 u64 wptr;
4150
4151
4152 if (ring->use_doorbell)
4153 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4154 else
4155 BUG();
4156 return wptr;
4157}
4158
4159static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4160{
4161 struct amdgpu_device *adev = ring->adev;
4162
4163
4164 if (ring->use_doorbell) {
4165 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4166 WDOORBELL64(ring->doorbell_index, ring->wptr);
4167 } else {
4168 BUG();
4169 }
4170}
4171
4172static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4173{
4174 struct amdgpu_device *adev = ring->adev;
4175 u32 ref_and_mask, reg_mem_engine;
4176 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
4177
4178 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4179 switch (ring->me) {
4180 case 1:
4181 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4182 break;
4183 case 2:
4184 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4185 break;
4186 default:
4187 return;
4188 }
4189 reg_mem_engine = 0;
4190 } else {
4191 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4192 reg_mem_engine = 1;
4193 }
4194
4195 gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4196 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
4197 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
4198 ref_and_mask, ref_and_mask, 0x20);
4199}
4200
4201static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4202 struct amdgpu_job *job,
4203 struct amdgpu_ib *ib,
4204 uint32_t flags)
4205{
4206 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4207 u32 header, control = 0;
4208
4209 if (ib->flags & AMDGPU_IB_FLAG_CE)
4210 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4211 else
4212 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4213
4214 control |= ib->length_dw | (vmid << 24);
4215
4216 if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4217 control |= INDIRECT_BUFFER_PRE_ENB(1);
4218
4219 if (flags & AMDGPU_IB_PREEMPTED)
4220 control |= INDIRECT_BUFFER_PRE_RESUME(1);
4221
4222 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4223 gfx_v10_0_ring_emit_de_meta(ring,
4224 flags & AMDGPU_IB_PREEMPTED ? true : false);
4225 }
4226
4227 amdgpu_ring_write(ring, header);
4228 BUG_ON(ib->gpu_addr & 0x3);
4229 amdgpu_ring_write(ring,
4230#ifdef __BIG_ENDIAN
4231 (2 << 0) |
4232#endif
4233 lower_32_bits(ib->gpu_addr));
4234 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4235 amdgpu_ring_write(ring, control);
4236}
4237
4238static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4239 struct amdgpu_job *job,
4240 struct amdgpu_ib *ib,
4241 uint32_t flags)
4242{
4243 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4244 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4257 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4258 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4259 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4260 }
4261
4262 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4263 BUG_ON(ib->gpu_addr & 0x3);
4264 amdgpu_ring_write(ring,
4265#ifdef __BIG_ENDIAN
4266 (2 << 0) |
4267#endif
4268 lower_32_bits(ib->gpu_addr));
4269 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4270 amdgpu_ring_write(ring, control);
4271}
4272
4273static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4274 u64 seq, unsigned flags)
4275{
4276 struct amdgpu_device *adev = ring->adev;
4277 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4278 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4279
4280
4281 if (adev->pdev->device == 0x50)
4282 int_sel = false;
4283
4284
4285 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4286 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4287 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4288 PACKET3_RELEASE_MEM_GCR_GLM_INV |
4289 PACKET3_RELEASE_MEM_GCR_GLM_WB |
4290 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4291 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4292 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4293 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4294 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4295
4296
4297
4298
4299
4300 if (write64bit)
4301 BUG_ON(addr & 0x7);
4302 else
4303 BUG_ON(addr & 0x3);
4304 amdgpu_ring_write(ring, lower_32_bits(addr));
4305 amdgpu_ring_write(ring, upper_32_bits(addr));
4306 amdgpu_ring_write(ring, lower_32_bits(seq));
4307 amdgpu_ring_write(ring, upper_32_bits(seq));
4308 amdgpu_ring_write(ring, 0);
4309}
4310
4311static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4312{
4313 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4314 uint32_t seq = ring->fence_drv.sync_seq;
4315 uint64_t addr = ring->fence_drv.gpu_addr;
4316
4317 gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4318 upper_32_bits(addr), seq, 0xffffffff, 4);
4319}
4320
4321static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4322 unsigned vmid, uint64_t pd_addr)
4323{
4324 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4325
4326
4327 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4328
4329 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4330 amdgpu_ring_write(ring, 0x0);
4331 }
4332}
4333
4334static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4335 u64 seq, unsigned int flags)
4336{
4337 struct amdgpu_device *adev = ring->adev;
4338
4339
4340 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4341
4342
4343 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4344 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4345 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4346 amdgpu_ring_write(ring, lower_32_bits(addr));
4347 amdgpu_ring_write(ring, upper_32_bits(addr));
4348 amdgpu_ring_write(ring, lower_32_bits(seq));
4349
4350 if (flags & AMDGPU_FENCE_FLAG_INT) {
4351
4352 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4353 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4354 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4355 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4356 amdgpu_ring_write(ring, 0);
4357 amdgpu_ring_write(ring, 0x20000000);
4358 }
4359}
4360
4361static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
4362{
4363 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4364 amdgpu_ring_write(ring, 0);
4365}
4366
4367static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4368{
4369 uint32_t dw2 = 0;
4370
4371 if (amdgpu_mcbp)
4372 gfx_v10_0_ring_emit_ce_meta(ring,
4373 flags & AMDGPU_IB_PREEMPTED ? true : false);
4374
4375 gfx_v10_0_ring_emit_tmz(ring, true);
4376
4377 dw2 |= 0x80000000;
4378 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4379
4380 dw2 |= 0x8001;
4381
4382 dw2 |= 0x01000000;
4383
4384 dw2 |= 0x10002;
4385
4386
4387 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4388 dw2 |= 0x10000000;
4389 } else {
4390
4391
4392
4393 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4394 dw2 |= 0x10000000;
4395 }
4396
4397 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4398 amdgpu_ring_write(ring, dw2);
4399 amdgpu_ring_write(ring, 0);
4400}
4401
4402static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4403{
4404 unsigned ret;
4405
4406 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4407 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4408 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4409 amdgpu_ring_write(ring, 0);
4410 ret = ring->wptr & ring->buf_mask;
4411 amdgpu_ring_write(ring, 0x55aa55aa);
4412
4413 return ret;
4414}
4415
4416static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4417{
4418 unsigned cur;
4419 BUG_ON(offset > ring->buf_mask);
4420 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4421
4422 cur = (ring->wptr - 1) & ring->buf_mask;
4423 if (likely(cur > offset))
4424 ring->ring[offset] = cur - offset;
4425 else
4426 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
4427}
4428
4429static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
4430{
4431 int i, r = 0;
4432 struct amdgpu_device *adev = ring->adev;
4433 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4434 struct amdgpu_ring *kiq_ring = &kiq->ring;
4435
4436 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4437 return -EINVAL;
4438
4439 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
4440 return -ENOMEM;
4441
4442
4443 amdgpu_ring_set_preempt_cond_exec(ring, false);
4444
4445
4446 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4447 ring->trail_fence_gpu_addr,
4448 ++ring->trail_seq);
4449 amdgpu_ring_commit(kiq_ring);
4450
4451
4452 for (i = 0; i < adev->usec_timeout; i++) {
4453 if (ring->trail_seq ==
4454 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4455 break;
4456 DRM_UDELAY(1);
4457 }
4458
4459 if (i >= adev->usec_timeout) {
4460 r = -EINVAL;
4461 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4462 }
4463
4464
4465 amdgpu_ring_set_preempt_cond_exec(ring, true);
4466 return r;
4467}
4468
4469static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
4470{
4471 struct amdgpu_device *adev = ring->adev;
4472 struct v10_ce_ib_state ce_payload = {0};
4473 uint64_t csa_addr;
4474 int cnt;
4475
4476 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4477 csa_addr = amdgpu_csa_vaddr(ring->adev);
4478
4479 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4480 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4481 WRITE_DATA_DST_SEL(8) |
4482 WR_CONFIRM) |
4483 WRITE_DATA_CACHE_POLICY(0));
4484 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4485 offsetof(struct v10_gfx_meta_data, ce_payload)));
4486 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4487 offsetof(struct v10_gfx_meta_data, ce_payload)));
4488
4489 if (resume)
4490 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4491 offsetof(struct v10_gfx_meta_data,
4492 ce_payload),
4493 sizeof(ce_payload) >> 2);
4494 else
4495 amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
4496 sizeof(ce_payload) >> 2);
4497}
4498
4499static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
4500{
4501 struct amdgpu_device *adev = ring->adev;
4502 struct v10_de_ib_state de_payload = {0};
4503 uint64_t csa_addr, gds_addr;
4504 int cnt;
4505
4506 csa_addr = amdgpu_csa_vaddr(ring->adev);
4507 gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size,
4508 PAGE_SIZE);
4509 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4510 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4511
4512 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4513 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4514 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4515 WRITE_DATA_DST_SEL(8) |
4516 WR_CONFIRM) |
4517 WRITE_DATA_CACHE_POLICY(0));
4518 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4519 offsetof(struct v10_gfx_meta_data, de_payload)));
4520 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4521 offsetof(struct v10_gfx_meta_data, de_payload)));
4522
4523 if (resume)
4524 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4525 offsetof(struct v10_gfx_meta_data,
4526 de_payload),
4527 sizeof(de_payload) >> 2);
4528 else
4529 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
4530 sizeof(de_payload) >> 2);
4531}
4532
4533static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4534{
4535 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4536 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1));
4537}
4538
4539static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4540{
4541 struct amdgpu_device *adev = ring->adev;
4542
4543 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4544 amdgpu_ring_write(ring, 0 |
4545 (5 << 8) |
4546 (1 << 20));
4547 amdgpu_ring_write(ring, reg);
4548 amdgpu_ring_write(ring, 0);
4549 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4550 adev->virt.reg_val_offs * 4));
4551 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4552 adev->virt.reg_val_offs * 4));
4553}
4554
4555static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4556 uint32_t val)
4557{
4558 uint32_t cmd = 0;
4559
4560 switch (ring->funcs->type) {
4561 case AMDGPU_RING_TYPE_GFX:
4562 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4563 break;
4564 case AMDGPU_RING_TYPE_KIQ:
4565 cmd = (1 << 16);
4566 break;
4567 default:
4568 cmd = WR_CONFIRM;
4569 break;
4570 }
4571 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4572 amdgpu_ring_write(ring, cmd);
4573 amdgpu_ring_write(ring, reg);
4574 amdgpu_ring_write(ring, 0);
4575 amdgpu_ring_write(ring, val);
4576}
4577
4578static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4579 uint32_t val, uint32_t mask)
4580{
4581 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4582}
4583
4584static void
4585gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4586 uint32_t me, uint32_t pipe,
4587 enum amdgpu_interrupt_state state)
4588{
4589 uint32_t cp_int_cntl, cp_int_cntl_reg;
4590
4591 if (!me) {
4592 switch (pipe) {
4593 case 0:
4594 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
4595 break;
4596 case 1:
4597 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
4598 break;
4599 default:
4600 DRM_DEBUG("invalid pipe %d\n", pipe);
4601 return;
4602 }
4603 } else {
4604 DRM_DEBUG("invalid me %d\n", me);
4605 return;
4606 }
4607
4608 switch (state) {
4609 case AMDGPU_IRQ_STATE_DISABLE:
4610 cp_int_cntl = RREG32(cp_int_cntl_reg);
4611 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4612 TIME_STAMP_INT_ENABLE, 0);
4613 WREG32(cp_int_cntl_reg, cp_int_cntl);
4614 break;
4615 case AMDGPU_IRQ_STATE_ENABLE:
4616 cp_int_cntl = RREG32(cp_int_cntl_reg);
4617 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4618 TIME_STAMP_INT_ENABLE, 1);
4619 WREG32(cp_int_cntl_reg, cp_int_cntl);
4620 break;
4621 default:
4622 break;
4623 }
4624}
4625
4626static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4627 int me, int pipe,
4628 enum amdgpu_interrupt_state state)
4629{
4630 u32 mec_int_cntl, mec_int_cntl_reg;
4631
4632
4633
4634
4635
4636
4637
4638 if (me == 1) {
4639 switch (pipe) {
4640 case 0:
4641 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4642 break;
4643 case 1:
4644 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4645 break;
4646 case 2:
4647 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4648 break;
4649 case 3:
4650 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4651 break;
4652 default:
4653 DRM_DEBUG("invalid pipe %d\n", pipe);
4654 return;
4655 }
4656 } else {
4657 DRM_DEBUG("invalid me %d\n", me);
4658 return;
4659 }
4660
4661 switch (state) {
4662 case AMDGPU_IRQ_STATE_DISABLE:
4663 mec_int_cntl = RREG32(mec_int_cntl_reg);
4664 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4665 TIME_STAMP_INT_ENABLE, 0);
4666 WREG32(mec_int_cntl_reg, mec_int_cntl);
4667 break;
4668 case AMDGPU_IRQ_STATE_ENABLE:
4669 mec_int_cntl = RREG32(mec_int_cntl_reg);
4670 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4671 TIME_STAMP_INT_ENABLE, 1);
4672 WREG32(mec_int_cntl_reg, mec_int_cntl);
4673 break;
4674 default:
4675 break;
4676 }
4677}
4678
4679static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4680 struct amdgpu_irq_src *src,
4681 unsigned type,
4682 enum amdgpu_interrupt_state state)
4683{
4684 switch (type) {
4685 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4686 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
4687 break;
4688 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
4689 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
4690 break;
4691 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4692 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4693 break;
4694 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4695 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4696 break;
4697 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4698 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4699 break;
4700 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4701 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4702 break;
4703 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4704 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4705 break;
4706 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4707 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4708 break;
4709 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4710 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4711 break;
4712 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4713 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4714 break;
4715 default:
4716 break;
4717 }
4718 return 0;
4719}
4720
4721static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
4722 struct amdgpu_irq_src *source,
4723 struct amdgpu_iv_entry *entry)
4724{
4725 int i;
4726 u8 me_id, pipe_id, queue_id;
4727 struct amdgpu_ring *ring;
4728
4729 DRM_DEBUG("IH: CP EOP\n");
4730 me_id = (entry->ring_id & 0x0c) >> 2;
4731 pipe_id = (entry->ring_id & 0x03) >> 0;
4732 queue_id = (entry->ring_id & 0x70) >> 4;
4733
4734 switch (me_id) {
4735 case 0:
4736 if (pipe_id == 0)
4737 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4738 else
4739 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
4740 break;
4741 case 1:
4742 case 2:
4743 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4744 ring = &adev->gfx.compute_ring[i];
4745
4746
4747
4748 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4749 amdgpu_fence_process(ring);
4750 }
4751 break;
4752 }
4753 return 0;
4754}
4755
4756static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4757 struct amdgpu_irq_src *source,
4758 unsigned type,
4759 enum amdgpu_interrupt_state state)
4760{
4761 switch (state) {
4762 case AMDGPU_IRQ_STATE_DISABLE:
4763 case AMDGPU_IRQ_STATE_ENABLE:
4764 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4765 PRIV_REG_INT_ENABLE,
4766 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4767 break;
4768 default:
4769 break;
4770 }
4771
4772 return 0;
4773}
4774
4775static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4776 struct amdgpu_irq_src *source,
4777 unsigned type,
4778 enum amdgpu_interrupt_state state)
4779{
4780 switch (state) {
4781 case AMDGPU_IRQ_STATE_DISABLE:
4782 case AMDGPU_IRQ_STATE_ENABLE:
4783 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4784 PRIV_INSTR_INT_ENABLE,
4785 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4786 default:
4787 break;
4788 }
4789
4790 return 0;
4791}
4792
4793static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
4794 struct amdgpu_iv_entry *entry)
4795{
4796 u8 me_id, pipe_id, queue_id;
4797 struct amdgpu_ring *ring;
4798 int i;
4799
4800 me_id = (entry->ring_id & 0x0c) >> 2;
4801 pipe_id = (entry->ring_id & 0x03) >> 0;
4802 queue_id = (entry->ring_id & 0x70) >> 4;
4803
4804 switch (me_id) {
4805 case 0:
4806 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4807 ring = &adev->gfx.gfx_ring[i];
4808
4809 if (ring->me == me_id && ring->pipe == pipe_id)
4810 drm_sched_fault(&ring->sched);
4811 }
4812 break;
4813 case 1:
4814 case 2:
4815 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4816 ring = &adev->gfx.compute_ring[i];
4817 if (ring->me == me_id && ring->pipe == pipe_id &&
4818 ring->queue == queue_id)
4819 drm_sched_fault(&ring->sched);
4820 }
4821 break;
4822 default:
4823 BUG();
4824 }
4825}
4826
4827static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
4828 struct amdgpu_irq_src *source,
4829 struct amdgpu_iv_entry *entry)
4830{
4831 DRM_ERROR("Illegal register access in command stream\n");
4832 gfx_v10_0_handle_priv_fault(adev, entry);
4833 return 0;
4834}
4835
4836static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
4837 struct amdgpu_irq_src *source,
4838 struct amdgpu_iv_entry *entry)
4839{
4840 DRM_ERROR("Illegal instruction in command stream\n");
4841 gfx_v10_0_handle_priv_fault(adev, entry);
4842 return 0;
4843}
4844
4845static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4846 struct amdgpu_irq_src *src,
4847 unsigned int type,
4848 enum amdgpu_interrupt_state state)
4849{
4850 uint32_t tmp, target;
4851 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4852
4853 if (ring->me == 1)
4854 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4855 else
4856 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4857 target += ring->pipe;
4858
4859 switch (type) {
4860 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4861 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4862 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4863 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4864 GENERIC2_INT_ENABLE, 0);
4865 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4866
4867 tmp = RREG32(target);
4868 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4869 GENERIC2_INT_ENABLE, 0);
4870 WREG32(target, tmp);
4871 } else {
4872 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4873 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4874 GENERIC2_INT_ENABLE, 1);
4875 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4876
4877 tmp = RREG32(target);
4878 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4879 GENERIC2_INT_ENABLE, 1);
4880 WREG32(target, tmp);
4881 }
4882 break;
4883 default:
4884 BUG();
4885 break;
4886 }
4887 return 0;
4888}
4889
4890static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
4891 struct amdgpu_irq_src *source,
4892 struct amdgpu_iv_entry *entry)
4893{
4894 u8 me_id, pipe_id, queue_id;
4895 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4896
4897 me_id = (entry->ring_id & 0x0c) >> 2;
4898 pipe_id = (entry->ring_id & 0x03) >> 0;
4899 queue_id = (entry->ring_id & 0x70) >> 4;
4900 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4901 me_id, pipe_id, queue_id);
4902
4903 amdgpu_fence_process(ring);
4904 return 0;
4905}
4906
4907static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
4908 .name = "gfx_v10_0",
4909 .early_init = gfx_v10_0_early_init,
4910 .late_init = gfx_v10_0_late_init,
4911 .sw_init = gfx_v10_0_sw_init,
4912 .sw_fini = gfx_v10_0_sw_fini,
4913 .hw_init = gfx_v10_0_hw_init,
4914 .hw_fini = gfx_v10_0_hw_fini,
4915 .suspend = gfx_v10_0_suspend,
4916 .resume = gfx_v10_0_resume,
4917 .is_idle = gfx_v10_0_is_idle,
4918 .wait_for_idle = gfx_v10_0_wait_for_idle,
4919 .soft_reset = gfx_v10_0_soft_reset,
4920 .set_clockgating_state = gfx_v10_0_set_clockgating_state,
4921 .set_powergating_state = gfx_v10_0_set_powergating_state,
4922 .get_clockgating_state = gfx_v10_0_get_clockgating_state,
4923};
4924
4925static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
4926 .type = AMDGPU_RING_TYPE_GFX,
4927 .align_mask = 0xff,
4928 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4929 .support_64bit_ptrs = true,
4930 .vmhub = AMDGPU_GFXHUB,
4931 .get_rptr = gfx_v10_0_ring_get_rptr_gfx,
4932 .get_wptr = gfx_v10_0_ring_get_wptr_gfx,
4933 .set_wptr = gfx_v10_0_ring_set_wptr_gfx,
4934 .emit_frame_size =
4935 5 +
4936 7 +
4937 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4938 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4939 2 +
4940 8 +
4941 20 +
4942 4 +
4943
4944
4945
4946 5 +
4947 7 +
4948 4 +
4949 14 +
4950 31 +
4951 3 +
4952 5 +
4953 8 + 8 +
4954 2,
4955 .emit_ib_size = 4,
4956 .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
4957 .emit_fence = gfx_v10_0_ring_emit_fence,
4958 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
4959 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
4960 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
4961 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
4962 .test_ring = gfx_v10_0_ring_test_ring,
4963 .test_ib = gfx_v10_0_ring_test_ib,
4964 .insert_nop = amdgpu_ring_insert_nop,
4965 .pad_ib = amdgpu_ring_generic_pad_ib,
4966 .emit_switch_buffer = gfx_v10_0_ring_emit_sb,
4967 .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
4968 .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
4969 .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
4970 .preempt_ib = gfx_v10_0_ring_preempt_ib,
4971 .emit_tmz = gfx_v10_0_ring_emit_tmz,
4972 .emit_wreg = gfx_v10_0_ring_emit_wreg,
4973 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
4974};
4975
4976static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
4977 .type = AMDGPU_RING_TYPE_COMPUTE,
4978 .align_mask = 0xff,
4979 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4980 .support_64bit_ptrs = true,
4981 .vmhub = AMDGPU_GFXHUB,
4982 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
4983 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
4984 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
4985 .emit_frame_size =
4986 20 +
4987 7 +
4988 5 +
4989 7 +
4990 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4991 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4992 2 +
4993 8 + 8 + 8,
4994 .emit_ib_size = 7,
4995 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
4996 .emit_fence = gfx_v10_0_ring_emit_fence,
4997 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
4998 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
4999 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5000 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5001 .test_ring = gfx_v10_0_ring_test_ring,
5002 .test_ib = gfx_v10_0_ring_test_ib,
5003 .insert_nop = amdgpu_ring_insert_nop,
5004 .pad_ib = amdgpu_ring_generic_pad_ib,
5005 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5006 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5007};
5008
5009static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5010 .type = AMDGPU_RING_TYPE_KIQ,
5011 .align_mask = 0xff,
5012 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5013 .support_64bit_ptrs = true,
5014 .vmhub = AMDGPU_GFXHUB,
5015 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5016 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5017 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5018 .emit_frame_size =
5019 20 +
5020 7 +
5021 5 +
5022 7 +
5023 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5024 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5025 2 +
5026 8 + 8 + 8,
5027 .emit_ib_size = 7,
5028 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5029 .emit_fence = gfx_v10_0_ring_emit_fence_kiq,
5030 .test_ring = gfx_v10_0_ring_test_ring,
5031 .test_ib = gfx_v10_0_ring_test_ib,
5032 .insert_nop = amdgpu_ring_insert_nop,
5033 .pad_ib = amdgpu_ring_generic_pad_ib,
5034 .emit_rreg = gfx_v10_0_ring_emit_rreg,
5035 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5036 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5037};
5038
5039static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
5040{
5041 int i;
5042
5043 adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
5044
5045 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5046 adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
5047
5048 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5049 adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute;
5050}
5051
5052static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = {
5053 .set = gfx_v10_0_set_eop_interrupt_state,
5054 .process = gfx_v10_0_eop_irq,
5055};
5056
5057static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
5058 .set = gfx_v10_0_set_priv_reg_fault_state,
5059 .process = gfx_v10_0_priv_reg_irq,
5060};
5061
5062static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
5063 .set = gfx_v10_0_set_priv_inst_fault_state,
5064 .process = gfx_v10_0_priv_inst_irq,
5065};
5066
5067static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = {
5068 .set = gfx_v10_0_kiq_set_interrupt_state,
5069 .process = gfx_v10_0_kiq_irq,
5070};
5071
5072static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
5073{
5074 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5075 adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
5076
5077 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
5078 adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
5079
5080 adev->gfx.priv_reg_irq.num_types = 1;
5081 adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
5082
5083 adev->gfx.priv_inst_irq.num_types = 1;
5084 adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
5085}
5086
5087static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
5088{
5089 switch (adev->asic_type) {
5090 case CHIP_NAVI10:
5091 adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
5092 break;
5093 default:
5094 break;
5095 }
5096}
5097
5098static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5099{
5100
5101 switch (adev->asic_type) {
5102 case CHIP_NAVI10:
5103 default:
5104 adev->gds.gds_size = 0x10000;
5105 adev->gds.gds_compute_max_wave_id = 0x4ff;
5106 break;
5107 }
5108
5109 adev->gds.gws_size = 64;
5110 adev->gds.oa_size = 16;
5111}
5112
5113static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5114 u32 bitmap)
5115{
5116 u32 data;
5117
5118 if (!bitmap)
5119 return;
5120
5121 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5122 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5123
5124 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5125}
5126
5127static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5128{
5129 u32 data, wgp_bitmask;
5130 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5131 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5132
5133 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5134 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5135
5136 wgp_bitmask =
5137 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5138
5139 return (~data) & wgp_bitmask;
5140}
5141
5142static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5143{
5144 u32 wgp_idx, wgp_active_bitmap;
5145 u32 cu_bitmap_per_wgp, cu_active_bitmap;
5146
5147 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
5148 cu_active_bitmap = 0;
5149
5150 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5151
5152 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5153 if (wgp_active_bitmap & (1 << wgp_idx))
5154 cu_active_bitmap |= cu_bitmap_per_wgp;
5155 }
5156
5157 return cu_active_bitmap;
5158}
5159
5160static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
5161 struct amdgpu_cu_info *cu_info)
5162{
5163 int i, j, k, counter, active_cu_number = 0;
5164 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5165 unsigned disable_masks[4 * 2];
5166
5167 if (!adev || !cu_info)
5168 return -EINVAL;
5169
5170 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5171
5172 mutex_lock(&adev->grbm_idx_mutex);
5173 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5174 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5175 mask = 1;
5176 ao_bitmap = 0;
5177 counter = 0;
5178 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
5179 if (i < 4 && j < 2)
5180 gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
5181 adev, disable_masks[i * 2 + j]);
5182 bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
5183 cu_info->bitmap[i][j] = bitmap;
5184
5185 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5186 if (bitmap & mask) {
5187 if (counter < adev->gfx.config.max_cu_per_sh)
5188 ao_bitmap |= mask;
5189 counter++;
5190 }
5191 mask <<= 1;
5192 }
5193 active_cu_number += counter;
5194 if (i < 2 && j < 2)
5195 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5196 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5197 }
5198 }
5199 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5200 mutex_unlock(&adev->grbm_idx_mutex);
5201
5202 cu_info->number = active_cu_number;
5203 cu_info->ao_cu_mask = ao_cu_mask;
5204 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5205
5206 return 0;
5207}
5208
5209const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
5210{
5211 .type = AMD_IP_BLOCK_TYPE_GFX,
5212 .major = 10,
5213 .minor = 0,
5214 .rev = 0,
5215 .funcs = &gfx_v10_0_ip_funcs,
5216};
5217