1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef __AMDGPU_GMC_H__
27#define __AMDGPU_GMC_H__
28
29#include <linux/types.h>
30
31#include "amdgpu_irq.h"
32
33
34#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
35#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
36
37
38
39
40
41
42
43
44#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
45
46
47
48
49#define AMDGPU_GMC_FAULT_RING_ORDER 8
50#define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER)
51
52
53
54
55#define AMDGPU_GMC_FAULT_HASH_ORDER 8
56#define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
57
58
59
60
61#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
62
63struct firmware;
64
65
66
67
68struct amdgpu_gmc_fault {
69 uint64_t timestamp;
70 uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
71 uint64_t key:52;
72};
73
74
75
76
77struct amdgpu_vmhub {
78 uint32_t ctx0_ptb_addr_lo32;
79 uint32_t ctx0_ptb_addr_hi32;
80 uint32_t vm_inv_eng0_sem;
81 uint32_t vm_inv_eng0_req;
82 uint32_t vm_inv_eng0_ack;
83 uint32_t vm_context0_cntl;
84 uint32_t vm_l2_pro_fault_status;
85 uint32_t vm_l2_pro_fault_cntl;
86};
87
88
89
90
91struct amdgpu_gmc_funcs {
92
93 void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
94 uint32_t vmhub, uint32_t flush_type);
95
96 int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
97 uint32_t flush_type, bool all_hub);
98
99 uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
100 uint64_t pd_addr);
101
102 void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
103 unsigned pasid);
104
105 void (*set_prt)(struct amdgpu_device *adev, bool enable);
106
107 uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
108
109 void (*get_vm_pde)(struct amdgpu_device *adev, int level,
110 u64 *dst, u64 *flags);
111
112 void (*get_vm_pte)(struct amdgpu_device *adev,
113 struct amdgpu_bo_va_mapping *mapping,
114 uint64_t *flags);
115};
116
117struct amdgpu_xgmi {
118
119 u64 node_id;
120 u64 hive_id;
121
122 u64 node_segment_size;
123
124 unsigned physical_node_id;
125
126 unsigned num_physical_nodes;
127
128 struct list_head head;
129 bool supported;
130 struct ras_common_if *ras_if;
131};
132
133struct amdgpu_gmc {
134
135
136
137
138
139 resource_size_t aper_size;
140 resource_size_t aper_base;
141
142
143 u64 mc_vram_size;
144 u64 visible_vram_size;
145
146
147
148
149
150
151
152
153
154
155 u64 agp_size;
156 u64 agp_start;
157 u64 agp_end;
158
159
160
161
162
163
164
165
166 u64 gart_size;
167 u64 gart_start;
168 u64 gart_end;
169
170
171
172
173
174
175
176 u64 vram_start;
177 u64 vram_end;
178
179
180
181
182
183
184 u64 fb_start;
185 u64 fb_end;
186 unsigned vram_width;
187 u64 real_vram_size;
188 int vram_mtrr;
189 u64 mc_mask;
190 const struct firmware *fw;
191 uint32_t fw_version;
192 struct amdgpu_irq_src vm_fault;
193 uint32_t vram_type;
194 uint8_t vram_vendor;
195 uint32_t srbm_soft_reset;
196 bool prt_warning;
197 uint64_t stolen_size;
198 uint32_t sdpif_register;
199
200 u64 shared_aperture_start;
201 u64 shared_aperture_end;
202 u64 private_aperture_start;
203 u64 private_aperture_end;
204
205 spinlock_t invalidate_lock;
206 bool translate_further;
207 struct kfd_vm_fault_info *vm_fault_info;
208 atomic_t vm_fault_info_updated;
209
210 struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
211 struct {
212 uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
213 } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
214 uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
215
216 bool tmz_enabled;
217
218 const struct amdgpu_gmc_funcs *gmc_funcs;
219
220 struct amdgpu_xgmi xgmi;
221 struct amdgpu_irq_src ecc_irq;
222};
223
224#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
225#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
226 ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
227 ((adev), (pasid), (type), (allhub)))
228#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
229#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
230#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
231#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
232#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
233
234
235
236
237
238
239
240
241
242static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
243{
244 WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
245
246 return (gmc->real_vram_size == gmc->visible_vram_size);
247}
248
249
250
251
252
253
254static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
255{
256 if (addr >= AMDGPU_GMC_HOLE_START)
257 addr |= AMDGPU_GMC_HOLE_END;
258
259 return addr;
260}
261
262void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
263 uint64_t *addr, uint64_t *flags);
264int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
265 uint32_t gpu_page_idx, uint64_t addr,
266 uint64_t flags);
267uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
268uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
269void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
270 u64 base);
271void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
272 struct amdgpu_gmc *mc);
273void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
274 struct amdgpu_gmc *mc);
275bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
276 uint16_t pasid, uint64_t timestamp);
277int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
278void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
279int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
280
281extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
282
283#endif
284