1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#ifdef CONFIG_CMA_DEBUG
18#ifndef DEBUG
19# define DEBUG
20#endif
21#endif
22#define CREATE_TRACE_POINTS
23
24#include <linux/memblock.h>
25#include <linux/err.h>
26#include <linux/mm.h>
27#include <linux/mutex.h>
28#include <linux/sizes.h>
29#include <linux/slab.h>
30#include <linux/log2.h>
31#include <linux/cma.h>
32#include <linux/highmem.h>
33#include <linux/io.h>
34#include <linux/kmemleak.h>
35#include <trace/events/cma.h>
36
37#include "cma.h"
38
39struct cma cma_areas[MAX_CMA_AREAS];
40unsigned cma_area_count;
41static DEFINE_MUTEX(cma_mutex);
42
43phys_addr_t cma_get_base(const struct cma *cma)
44{
45 return PFN_PHYS(cma->base_pfn);
46}
47
48unsigned long cma_get_size(const struct cma *cma)
49{
50 return cma->count << PAGE_SHIFT;
51}
52
53const char *cma_get_name(const struct cma *cma)
54{
55 return cma->name ? cma->name : "(undefined)";
56}
57
58static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 unsigned int align_order)
60{
61 if (align_order <= cma->order_per_bit)
62 return 0;
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
64}
65
66
67
68
69
70static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 unsigned int align_order)
72{
73 return (cma->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
75}
76
77static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 unsigned long pages)
79{
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81}
82
83static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84 unsigned int count)
85{
86 unsigned long bitmap_no, bitmap_count;
87
88 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
89 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
90
91 mutex_lock(&cma->lock);
92 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
93 mutex_unlock(&cma->lock);
94}
95
96static int __init cma_activate_area(struct cma *cma)
97{
98 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
99 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
100 unsigned i = cma->count >> pageblock_order;
101 struct zone *zone;
102
103 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
104
105 if (!cma->bitmap) {
106 cma->count = 0;
107 return -ENOMEM;
108 }
109
110 WARN_ON_ONCE(!pfn_valid(pfn));
111 zone = page_zone(pfn_to_page(pfn));
112
113 do {
114 unsigned j;
115
116 base_pfn = pfn;
117 for (j = pageblock_nr_pages; j; --j, pfn++) {
118 WARN_ON_ONCE(!pfn_valid(pfn));
119
120
121
122
123
124
125 if (page_zone(pfn_to_page(pfn)) != zone)
126 goto not_in_zone;
127 }
128 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
129 } while (--i);
130
131 mutex_init(&cma->lock);
132
133#ifdef CONFIG_CMA_DEBUGFS
134 INIT_HLIST_HEAD(&cma->mem_head);
135 spin_lock_init(&cma->mem_head_lock);
136#endif
137
138 return 0;
139
140not_in_zone:
141 pr_err("CMA area %s could not be activated\n", cma->name);
142 kfree(cma->bitmap);
143 cma->count = 0;
144 return -EINVAL;
145}
146
147static int __init cma_init_reserved_areas(void)
148{
149 int i;
150
151 for (i = 0; i < cma_area_count; i++) {
152 int ret = cma_activate_area(&cma_areas[i]);
153
154 if (ret)
155 return ret;
156 }
157
158 return 0;
159}
160core_initcall(cma_init_reserved_areas);
161
162
163
164
165
166
167
168
169
170
171
172
173
174int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175 unsigned int order_per_bit,
176 const char *name,
177 struct cma **res_cma)
178{
179 struct cma *cma;
180 phys_addr_t alignment;
181
182
183 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
184 pr_err("Not enough slots for CMA reserved regions!\n");
185 return -ENOSPC;
186 }
187
188 if (!size || !memblock_is_region_reserved(base, size))
189 return -EINVAL;
190
191
192 alignment = PAGE_SIZE <<
193 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
194
195
196 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
197 return -EINVAL;
198
199 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
200 return -EINVAL;
201
202
203
204
205
206 cma = &cma_areas[cma_area_count];
207 if (name) {
208 cma->name = name;
209 } else {
210 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
211 if (!cma->name)
212 return -ENOMEM;
213 }
214 cma->base_pfn = PFN_DOWN(base);
215 cma->count = size >> PAGE_SHIFT;
216 cma->order_per_bit = order_per_bit;
217 *res_cma = cma;
218 cma_area_count++;
219 totalcma_pages += (size / PAGE_SIZE);
220
221 return 0;
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243int __init cma_declare_contiguous(phys_addr_t base,
244 phys_addr_t size, phys_addr_t limit,
245 phys_addr_t alignment, unsigned int order_per_bit,
246 bool fixed, const char *name, struct cma **res_cma)
247{
248 phys_addr_t memblock_end = memblock_end_of_DRAM();
249 phys_addr_t highmem_start;
250 int ret = 0;
251
252
253
254
255
256
257
258 highmem_start = __pa(high_memory - 1) + 1;
259 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
260 __func__, &size, &base, &limit, &alignment);
261
262 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
263 pr_err("Not enough slots for CMA reserved regions!\n");
264 return -ENOSPC;
265 }
266
267 if (!size)
268 return -EINVAL;
269
270 if (alignment && !is_power_of_2(alignment))
271 return -EINVAL;
272
273
274
275
276
277
278
279 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
280 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
281 base = ALIGN(base, alignment);
282 size = ALIGN(size, alignment);
283 limit &= ~(alignment - 1);
284
285 if (!base)
286 fixed = false;
287
288
289 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
290 return -EINVAL;
291
292
293
294
295
296 if (fixed && base < highmem_start && base + size > highmem_start) {
297 ret = -EINVAL;
298 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
299 &base, &highmem_start);
300 goto err;
301 }
302
303
304
305
306
307
308 if (limit == 0 || limit > memblock_end)
309 limit = memblock_end;
310
311
312 if (fixed) {
313 if (memblock_is_region_reserved(base, size) ||
314 memblock_reserve(base, size) < 0) {
315 ret = -EBUSY;
316 goto err;
317 }
318 } else {
319 phys_addr_t addr = 0;
320
321
322
323
324
325
326
327 if (base < highmem_start && limit > highmem_start) {
328 addr = memblock_phys_alloc_range(size, alignment,
329 highmem_start, limit);
330 limit = highmem_start;
331 }
332
333 if (!addr) {
334 addr = memblock_phys_alloc_range(size, alignment, base,
335 limit);
336 if (!addr) {
337 ret = -ENOMEM;
338 goto err;
339 }
340 }
341
342
343
344
345
346 kmemleak_ignore_phys(addr);
347 base = addr;
348 }
349
350 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
351 if (ret)
352 goto free_mem;
353
354 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
355 &base);
356 return 0;
357
358free_mem:
359 memblock_free(base, size);
360err:
361 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
362 return ret;
363}
364
365#ifdef CONFIG_CMA_DEBUG
366static void cma_debug_show_areas(struct cma *cma)
367{
368 unsigned long next_zero_bit, next_set_bit, nr_zero;
369 unsigned long start = 0;
370 unsigned long nr_part, nr_total = 0;
371 unsigned long nbits = cma_bitmap_maxno(cma);
372
373 mutex_lock(&cma->lock);
374 pr_info("number of available pages: ");
375 for (;;) {
376 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
377 if (next_zero_bit >= nbits)
378 break;
379 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
380 nr_zero = next_set_bit - next_zero_bit;
381 nr_part = nr_zero << cma->order_per_bit;
382 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
383 next_zero_bit);
384 nr_total += nr_part;
385 start = next_zero_bit + nr_zero;
386 }
387 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
388 mutex_unlock(&cma->lock);
389}
390#else
391static inline void cma_debug_show_areas(struct cma *cma) { }
392#endif
393
394
395
396
397
398
399
400
401
402
403
404struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
405 bool no_warn)
406{
407 unsigned long mask, offset;
408 unsigned long pfn = -1;
409 unsigned long start = 0;
410 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
411 size_t i;
412 struct page *page = NULL;
413 int ret = -ENOMEM;
414
415 if (!cma || !cma->count)
416 return NULL;
417
418 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
419 count, align);
420
421 if (!count)
422 return NULL;
423
424 mask = cma_bitmap_aligned_mask(cma, align);
425 offset = cma_bitmap_aligned_offset(cma, align);
426 bitmap_maxno = cma_bitmap_maxno(cma);
427 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
428
429 if (bitmap_count > bitmap_maxno)
430 return NULL;
431
432 for (;;) {
433 mutex_lock(&cma->lock);
434 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
435 bitmap_maxno, start, bitmap_count, mask,
436 offset);
437 if (bitmap_no >= bitmap_maxno) {
438 mutex_unlock(&cma->lock);
439 break;
440 }
441 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
442
443
444
445
446
447 mutex_unlock(&cma->lock);
448
449 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
450 mutex_lock(&cma_mutex);
451 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
452 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
453 mutex_unlock(&cma_mutex);
454 if (ret == 0) {
455 page = pfn_to_page(pfn);
456 break;
457 }
458
459 cma_clear_bitmap(cma, pfn, count);
460 if (ret != -EBUSY)
461 break;
462
463 pr_debug("%s(): memory range at %p is busy, retrying\n",
464 __func__, pfn_to_page(pfn));
465
466 start = bitmap_no + mask + 1;
467 }
468
469 trace_cma_alloc(pfn, page, count, align);
470
471
472
473
474
475
476 if (page) {
477 for (i = 0; i < count; i++)
478 page_kasan_tag_reset(page + i);
479 }
480
481 if (ret && !no_warn) {
482 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
483 __func__, count, ret);
484 cma_debug_show_areas(cma);
485 }
486
487 pr_debug("%s(): returned %p\n", __func__, page);
488 return page;
489}
490
491
492
493
494
495
496
497
498
499
500
501bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
502{
503 unsigned long pfn;
504
505 if (!cma || !pages)
506 return false;
507
508 pr_debug("%s(page %p)\n", __func__, (void *)pages);
509
510 pfn = page_to_pfn(pages);
511
512 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
513 return false;
514
515 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
516
517 free_contig_range(pfn, count);
518 cma_clear_bitmap(cma, pfn, count);
519 trace_cma_release(pfn, pages, count);
520
521 return true;
522}
523
524int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
525{
526 int i;
527
528 for (i = 0; i < cma_area_count; i++) {
529 int ret = it(&cma_areas[i], data);
530
531 if (ret)
532 return ret;
533 }
534
535 return 0;
536}
537