1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/io-64-nonatomic-lo-hi.h>
28
29#include "amdgpu.h"
30
31
32
33
34
35
36
37
38
39
40
41void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
42 uint64_t *addr, uint64_t *flags)
43{
44 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
45 struct ttm_dma_tt *ttm;
46
47 switch (bo->tbo.mem.mem_type) {
48 case TTM_PL_TT:
49 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
50 *addr = ttm->dma_address[0];
51 break;
52 case TTM_PL_VRAM:
53 *addr = amdgpu_bo_gpu_offset(bo);
54 break;
55 default:
56 *addr = 0;
57 break;
58 }
59 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
60 amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
61}
62
63
64
65
66
67uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
68{
69 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
70 uint64_t pd_addr;
71
72
73 if (adev->asic_type >= CHIP_VEGA10) {
74 uint64_t flags = AMDGPU_PTE_VALID;
75
76 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
77 pd_addr |= flags;
78 } else {
79 pd_addr = amdgpu_bo_gpu_offset(bo);
80 }
81 return pd_addr;
82}
83
84
85
86
87
88
89
90
91
92
93
94
95int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
96 uint32_t gpu_page_idx, uint64_t addr,
97 uint64_t flags)
98{
99 void __iomem *ptr = (void *)cpu_pt_addr;
100 uint64_t value;
101
102
103
104
105 value = addr & 0x0000FFFFFFFFF000ULL;
106 value |= flags;
107 writeq(value, ptr + (gpu_page_idx * 8));
108 return 0;
109}
110
111
112
113
114
115
116
117
118
119uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
120{
121 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
122 struct ttm_dma_tt *ttm;
123
124 if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
125 return AMDGPU_BO_INVALID_OFFSET;
126
127 ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
128 if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
129 return AMDGPU_BO_INVALID_OFFSET;
130
131 return adev->gmc.agp_start + ttm->dma_address[0];
132}
133
134
135
136
137
138
139
140
141
142
143
144void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
145 u64 base)
146{
147 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
148
149 mc->vram_start = base;
150 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
151 if (limit && limit < mc->real_vram_size)
152 mc->real_vram_size = limit;
153
154 if (mc->xgmi.num_physical_nodes == 0) {
155 mc->fb_start = mc->vram_start;
156 mc->fb_end = mc->vram_end;
157 }
158 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
159 mc->mc_vram_size >> 20, mc->vram_start,
160 mc->vram_end, mc->real_vram_size >> 20);
161}
162
163
164
165
166
167
168
169
170
171
172
173
174void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
175{
176 const uint64_t four_gb = 0x100000000ULL;
177 u64 size_af, size_bf;
178
179 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
180
181 mc->gart_size += adev->pm.smu_prv_buffer_size;
182
183
184
185
186 size_bf = mc->fb_start;
187 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
188
189 if (mc->gart_size > max(size_bf, size_af)) {
190 dev_warn(adev->dev, "limiting GART\n");
191 mc->gart_size = max(size_bf, size_af);
192 }
193
194 if ((size_bf >= mc->gart_size && size_bf < size_af) ||
195 (size_af < mc->gart_size))
196 mc->gart_start = 0;
197 else
198 mc->gart_start = max_mc_address - mc->gart_size + 1;
199
200 mc->gart_start &= ~(four_gb - 1);
201 mc->gart_end = mc->gart_start + mc->gart_size - 1;
202 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
203 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
204}
205
206
207
208
209
210
211
212
213
214
215
216
217void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
218{
219 const uint64_t sixteen_gb = 1ULL << 34;
220 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
221 u64 size_af, size_bf;
222
223 if (amdgpu_sriov_vf(adev)) {
224 mc->agp_start = 0xffffffff;
225 mc->agp_end = 0x0;
226 mc->agp_size = 0;
227
228 return;
229 }
230
231 if (mc->fb_start > mc->gart_start) {
232 size_bf = (mc->fb_start & sixteen_gb_mask) -
233 ALIGN(mc->gart_end + 1, sixteen_gb);
234 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
235 } else {
236 size_bf = mc->fb_start & sixteen_gb_mask;
237 size_af = (mc->gart_start & sixteen_gb_mask) -
238 ALIGN(mc->fb_end + 1, sixteen_gb);
239 }
240
241 if (size_bf > size_af) {
242 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
243 mc->agp_size = size_bf;
244 } else {
245 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
246 mc->agp_size = size_af;
247 }
248
249 mc->agp_end = mc->agp_start + mc->agp_size - 1;
250 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
251 mc->agp_size >> 20, mc->agp_start, mc->agp_end);
252}
253
254
255
256
257
258
259
260
261
262
263
264
265
266bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
267 uint16_t pasid, uint64_t timestamp)
268{
269 struct amdgpu_gmc *gmc = &adev->gmc;
270
271 uint64_t stamp, key = addr << 4 | pasid;
272 struct amdgpu_gmc_fault *fault;
273 uint32_t hash;
274
275
276 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
277 AMDGPU_GMC_FAULT_TIMEOUT;
278 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
279 return true;
280
281
282 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
283 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
284 while (fault->timestamp >= stamp) {
285 uint64_t tmp;
286
287 if (fault->key == key)
288 return true;
289
290 tmp = fault->timestamp;
291 fault = &gmc->fault_ring[fault->next];
292
293
294 if (fault->timestamp >= tmp)
295 break;
296 }
297
298
299 fault = &gmc->fault_ring[gmc->last_fault];
300 fault->key = key;
301 fault->timestamp = timestamp;
302
303
304 fault->next = gmc->fault_hash[hash].idx;
305 gmc->fault_hash[hash].idx = gmc->last_fault++;
306 return false;
307}
308