1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/radix-tree.h>
14#include <linux/memremap.h>
15#include <linux/device.h>
16#include <linux/types.h>
17#include <linux/pfn_t.h>
18#include <linux/io.h>
19#include <linux/mm.h>
20#include <linux/memory_hotplug.h>
21
22#ifndef ioremap_cache
23
24__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
25{
26 return ioremap(offset, size);
27}
28#endif
29
30#ifndef arch_memremap_wb
31static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
32{
33 return (__force void *)ioremap_cache(offset, size);
34}
35#endif
36
37static void *try_ram_remap(resource_size_t offset, size_t size)
38{
39 unsigned long pfn = PHYS_PFN(offset);
40
41
42 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
43 return __va(offset);
44 return NULL;
45}
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74void *memremap(resource_size_t offset, size_t size, unsigned long flags)
75{
76 int is_ram = region_intersects(offset, size,
77 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
78 void *addr = NULL;
79
80 if (!flags)
81 return NULL;
82
83 if (is_ram == REGION_MIXED) {
84 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
85 &offset, (unsigned long) size);
86 return NULL;
87 }
88
89
90 if (flags & MEMREMAP_WB) {
91
92
93
94
95
96
97 if (is_ram == REGION_INTERSECTS)
98 addr = try_ram_remap(offset, size);
99 if (!addr)
100 addr = arch_memremap_wb(offset, size);
101 }
102
103
104
105
106
107
108
109 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
110 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
111 &offset, (unsigned long) size);
112 return NULL;
113 }
114
115 if (!addr && (flags & MEMREMAP_WT))
116 addr = ioremap_wt(offset, size);
117
118 if (!addr && (flags & MEMREMAP_WC))
119 addr = ioremap_wc(offset, size);
120
121 return addr;
122}
123EXPORT_SYMBOL(memremap);
124
125void memunmap(void *addr)
126{
127 if (is_vmalloc_addr(addr))
128 iounmap((void __iomem *) addr);
129}
130EXPORT_SYMBOL(memunmap);
131
132static void devm_memremap_release(struct device *dev, void *res)
133{
134 memunmap(*(void **)res);
135}
136
137static int devm_memremap_match(struct device *dev, void *res, void *match_data)
138{
139 return *(void **)res == match_data;
140}
141
142void *devm_memremap(struct device *dev, resource_size_t offset,
143 size_t size, unsigned long flags)
144{
145 void **ptr, *addr;
146
147 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
148 dev_to_node(dev));
149 if (!ptr)
150 return ERR_PTR(-ENOMEM);
151
152 addr = memremap(offset, size, flags);
153 if (addr) {
154 *ptr = addr;
155 devres_add(dev, ptr);
156 } else {
157 devres_free(ptr);
158 return ERR_PTR(-ENXIO);
159 }
160
161 return addr;
162}
163EXPORT_SYMBOL(devm_memremap);
164
165void devm_memunmap(struct device *dev, void *addr)
166{
167 WARN_ON(devres_release(dev, devm_memremap_release,
168 devm_memremap_match, addr));
169}
170EXPORT_SYMBOL(devm_memunmap);
171
172#ifdef CONFIG_ZONE_DEVICE
173static DEFINE_MUTEX(pgmap_lock);
174static RADIX_TREE(pgmap_radix, GFP_KERNEL);
175#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
176#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
177
178struct page_map {
179 struct resource res;
180 struct percpu_ref *ref;
181 struct dev_pagemap pgmap;
182 struct vmem_altmap altmap;
183};
184
185void get_zone_device_page(struct page *page)
186{
187 percpu_ref_get(page->pgmap->ref);
188}
189EXPORT_SYMBOL(get_zone_device_page);
190
191void put_zone_device_page(struct page *page)
192{
193 put_dev_pagemap(page->pgmap);
194}
195EXPORT_SYMBOL(put_zone_device_page);
196
197static void pgmap_radix_release(struct resource *res)
198{
199 resource_size_t key, align_start, align_size, align_end;
200
201 align_start = res->start & ~(SECTION_SIZE - 1);
202 align_size = ALIGN(resource_size(res), SECTION_SIZE);
203 align_end = align_start + align_size - 1;
204
205 mutex_lock(&pgmap_lock);
206 for (key = res->start; key <= res->end; key += SECTION_SIZE)
207 radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
208 mutex_unlock(&pgmap_lock);
209}
210
211static unsigned long pfn_first(struct page_map *page_map)
212{
213 struct dev_pagemap *pgmap = &page_map->pgmap;
214 const struct resource *res = &page_map->res;
215 struct vmem_altmap *altmap = pgmap->altmap;
216 unsigned long pfn;
217
218 pfn = res->start >> PAGE_SHIFT;
219 if (altmap)
220 pfn += vmem_altmap_offset(altmap);
221 return pfn;
222}
223
224static unsigned long pfn_end(struct page_map *page_map)
225{
226 const struct resource *res = &page_map->res;
227
228 return (res->start + resource_size(res)) >> PAGE_SHIFT;
229}
230
231#define for_each_device_pfn(pfn, map) \
232 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
233
234static void devm_memremap_pages_release(struct device *dev, void *data)
235{
236 struct page_map *page_map = data;
237 struct resource *res = &page_map->res;
238 resource_size_t align_start, align_size;
239 struct dev_pagemap *pgmap = &page_map->pgmap;
240
241 if (percpu_ref_tryget_live(pgmap->ref)) {
242 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
243 percpu_ref_put(pgmap->ref);
244 }
245
246
247 align_start = res->start & ~(SECTION_SIZE - 1);
248 align_size = ALIGN(resource_size(res), SECTION_SIZE);
249 arch_remove_memory(align_start, align_size);
250 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
251 pgmap_radix_release(res);
252 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
253 "%s: failed to free all reserved pages\n", __func__);
254}
255
256
257struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
258{
259 struct page_map *page_map;
260
261 WARN_ON_ONCE(!rcu_read_lock_held());
262
263 page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
264 return page_map ? &page_map->pgmap : NULL;
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282void *devm_memremap_pages(struct device *dev, struct resource *res,
283 struct percpu_ref *ref, struct vmem_altmap *altmap)
284{
285 resource_size_t key, align_start, align_size, align_end;
286 pgprot_t pgprot = PAGE_KERNEL;
287 struct dev_pagemap *pgmap;
288 struct page_map *page_map;
289 int error, nid, is_ram;
290 unsigned long pfn;
291
292 align_start = res->start & ~(SECTION_SIZE - 1);
293 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
294 - align_start;
295 is_ram = region_intersects(align_start, align_size,
296 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
297
298 if (is_ram == REGION_MIXED) {
299 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
300 __func__, res);
301 return ERR_PTR(-ENXIO);
302 }
303
304 if (is_ram == REGION_INTERSECTS)
305 return __va(res->start);
306
307 if (!ref)
308 return ERR_PTR(-EINVAL);
309
310 page_map = devres_alloc_node(devm_memremap_pages_release,
311 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
312 if (!page_map)
313 return ERR_PTR(-ENOMEM);
314 pgmap = &page_map->pgmap;
315
316 memcpy(&page_map->res, res, sizeof(*res));
317
318 pgmap->dev = dev;
319 if (altmap) {
320 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
321 pgmap->altmap = &page_map->altmap;
322 }
323 pgmap->ref = ref;
324 pgmap->res = &page_map->res;
325
326 mutex_lock(&pgmap_lock);
327 error = 0;
328 align_end = align_start + align_size - 1;
329 for (key = align_start; key <= align_end; key += SECTION_SIZE) {
330 struct dev_pagemap *dup;
331
332 rcu_read_lock();
333 dup = find_dev_pagemap(key);
334 rcu_read_unlock();
335 if (dup) {
336 dev_err(dev, "%s: %pr collides with mapping for %s\n",
337 __func__, res, dev_name(dup->dev));
338 error = -EBUSY;
339 break;
340 }
341 error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
342 page_map);
343 if (error) {
344 dev_err(dev, "%s: failed: %d\n", __func__, error);
345 break;
346 }
347 }
348 mutex_unlock(&pgmap_lock);
349 if (error)
350 goto err_radix;
351
352 nid = dev_to_node(dev);
353 if (nid < 0)
354 nid = numa_mem_id();
355
356 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
357 align_size);
358 if (error)
359 goto err_pfn_remap;
360
361 error = arch_add_memory(nid, align_start, align_size, true);
362 if (error)
363 goto err_add_memory;
364
365 for_each_device_pfn(pfn, page_map) {
366 struct page *page = pfn_to_page(pfn);
367
368
369
370
371
372
373
374 list_del(&page->lru);
375 page->pgmap = pgmap;
376 }
377 devres_add(dev, page_map);
378 return __va(res->start);
379
380 err_add_memory:
381 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
382 err_pfn_remap:
383 err_radix:
384 pgmap_radix_release(res);
385 devres_free(page_map);
386 return ERR_PTR(error);
387}
388EXPORT_SYMBOL(devm_memremap_pages);
389
390unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
391{
392
393 return altmap->reserve + altmap->free;
394}
395
396void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
397{
398 altmap->alloc -= nr_pfns;
399}
400
401struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
402{
403
404
405
406
407
408
409
410
411 struct page *page = (struct page *) memmap_start;
412 struct dev_pagemap *pgmap;
413
414
415
416
417
418
419
420 rcu_read_lock();
421 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
422 rcu_read_unlock();
423
424 return pgmap ? pgmap->altmap : NULL;
425}
426#endif
427