1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/firmware.h>
26#include "amdgpu.h"
27#include "amdgpu_gfx.h"
28#include "amdgpu_rlc.h"
29
30
31
32
33
34
35
36
37void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
38{
39 if (adev->gfx.rlc.in_safe_mode)
40 return;
41
42
43 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
44 return;
45
46 if (adev->cg_flags &
47 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
48 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
49 adev->gfx.rlc.funcs->set_safe_mode(adev);
50 adev->gfx.rlc.in_safe_mode = true;
51 }
52}
53
54
55
56
57
58
59
60
61void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
62{
63 if (!(adev->gfx.rlc.in_safe_mode))
64 return;
65
66
67 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
68 return;
69
70 if (adev->cg_flags &
71 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
72 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
73 adev->gfx.rlc.funcs->unset_safe_mode(adev);
74 adev->gfx.rlc.in_safe_mode = false;
75 }
76}
77
78
79
80
81
82
83
84
85
86
87int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
88{
89 const u32 *src_ptr;
90 volatile u32 *dst_ptr;
91 u32 i;
92 int r;
93
94
95 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
96 AMDGPU_GEM_DOMAIN_VRAM,
97 &adev->gfx.rlc.save_restore_obj,
98 &adev->gfx.rlc.save_restore_gpu_addr,
99 (void **)&adev->gfx.rlc.sr_ptr);
100 if (r) {
101 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
102 amdgpu_gfx_rlc_fini(adev);
103 return r;
104 }
105
106
107 src_ptr = adev->gfx.rlc.reg_list;
108 dst_ptr = adev->gfx.rlc.sr_ptr;
109 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
110 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
111 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
112 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
113
114 return 0;
115}
116
117
118
119
120
121
122
123
124
125int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
126{
127 u32 dws;
128 int r;
129
130
131 adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
132 r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
133 AMDGPU_GEM_DOMAIN_VRAM,
134 &adev->gfx.rlc.clear_state_obj,
135 &adev->gfx.rlc.clear_state_gpu_addr,
136 (void **)&adev->gfx.rlc.cs_ptr);
137 if (r) {
138 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
139 amdgpu_gfx_rlc_fini(adev);
140 return r;
141 }
142
143 return 0;
144}
145
146
147
148
149
150
151
152
153
154int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
155{
156 int r;
157
158 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
159 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
160 &adev->gfx.rlc.cp_table_obj,
161 &adev->gfx.rlc.cp_table_gpu_addr,
162 (void **)&adev->gfx.rlc.cp_table_ptr);
163 if (r) {
164 dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
165 amdgpu_gfx_rlc_fini(adev);
166 return r;
167 }
168
169
170 amdgpu_gfx_rlc_setup_cp_table(adev);
171 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
172 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
173
174 return 0;
175}
176
177
178
179
180
181
182
183
184void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
185{
186 const __le32 *fw_data;
187 volatile u32 *dst_ptr;
188 int me, i, max_me;
189 u32 bo_offset = 0;
190 u32 table_offset, table_size;
191
192 max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
193
194
195 dst_ptr = adev->gfx.rlc.cp_table_ptr;
196 for (me = 0; me < max_me; me++) {
197 if (me == 0) {
198 const struct gfx_firmware_header_v1_0 *hdr =
199 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
200 fw_data = (const __le32 *)
201 (adev->gfx.ce_fw->data +
202 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
203 table_offset = le32_to_cpu(hdr->jt_offset);
204 table_size = le32_to_cpu(hdr->jt_size);
205 } else if (me == 1) {
206 const struct gfx_firmware_header_v1_0 *hdr =
207 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
208 fw_data = (const __le32 *)
209 (adev->gfx.pfp_fw->data +
210 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
211 table_offset = le32_to_cpu(hdr->jt_offset);
212 table_size = le32_to_cpu(hdr->jt_size);
213 } else if (me == 2) {
214 const struct gfx_firmware_header_v1_0 *hdr =
215 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
216 fw_data = (const __le32 *)
217 (adev->gfx.me_fw->data +
218 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
219 table_offset = le32_to_cpu(hdr->jt_offset);
220 table_size = le32_to_cpu(hdr->jt_size);
221 } else if (me == 3) {
222 const struct gfx_firmware_header_v1_0 *hdr =
223 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
224 fw_data = (const __le32 *)
225 (adev->gfx.mec_fw->data +
226 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
227 table_offset = le32_to_cpu(hdr->jt_offset);
228 table_size = le32_to_cpu(hdr->jt_size);
229 } else if (me == 4) {
230 const struct gfx_firmware_header_v1_0 *hdr =
231 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
232 fw_data = (const __le32 *)
233 (adev->gfx.mec2_fw->data +
234 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
235 table_offset = le32_to_cpu(hdr->jt_offset);
236 table_size = le32_to_cpu(hdr->jt_size);
237 }
238
239 for (i = 0; i < table_size; i ++) {
240 dst_ptr[bo_offset + i] =
241 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
242 }
243
244 bo_offset += table_size;
245 }
246}
247
248
249
250
251
252
253
254
255
256void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
257{
258
259 if (adev->gfx.rlc.save_restore_obj) {
260 amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
261 &adev->gfx.rlc.save_restore_gpu_addr,
262 (void **)&adev->gfx.rlc.sr_ptr);
263 }
264
265
266 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
267 &adev->gfx.rlc.clear_state_gpu_addr,
268 (void **)&adev->gfx.rlc.cs_ptr);
269
270
271 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
272 &adev->gfx.rlc.cp_table_gpu_addr,
273 (void **)&adev->gfx.rlc.cp_table_ptr);
274}
275