1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
26#define CREATE_TRACE_POINTS
27
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
36#include <linux/highmem.h>
37#include <linux/io.h>
38#include <trace/events/cma.h>
39
40#include "cma.h"
41
42struct cma cma_areas[MAX_CMA_AREAS];
43unsigned cma_area_count;
44static DEFINE_MUTEX(cma_mutex);
45
46phys_addr_t cma_get_base(const struct cma *cma)
47{
48 return PFN_PHYS(cma->base_pfn);
49}
50
51unsigned long cma_get_size(const struct cma *cma)
52{
53 return cma->count << PAGE_SHIFT;
54}
55
56const char *cma_get_name(const struct cma *cma)
57{
58 return cma->name ? cma->name : "(undefined)";
59}
60
61static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
62 int align_order)
63{
64 if (align_order <= cma->order_per_bit)
65 return 0;
66 return (1UL << (align_order - cma->order_per_bit)) - 1;
67}
68
69
70
71
72
73static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
74 int align_order)
75{
76 if (align_order <= cma->order_per_bit)
77 return 0;
78
79 return (ALIGN(cma->base_pfn, (1UL << align_order))
80 - cma->base_pfn) >> cma->order_per_bit;
81}
82
83static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
84 unsigned long pages)
85{
86 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
87}
88
89static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
90 unsigned int count)
91{
92 unsigned long bitmap_no, bitmap_count;
93
94 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
95 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
96
97 mutex_lock(&cma->lock);
98 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
99 mutex_unlock(&cma->lock);
100}
101
102static int __init cma_activate_area(struct cma *cma)
103{
104 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
105 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
106 unsigned i = cma->count >> pageblock_order;
107 struct zone *zone;
108
109 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
110
111 if (!cma->bitmap)
112 return -ENOMEM;
113
114 WARN_ON_ONCE(!pfn_valid(pfn));
115 zone = page_zone(pfn_to_page(pfn));
116
117 do {
118 unsigned j;
119
120 base_pfn = pfn;
121 for (j = pageblock_nr_pages; j; --j, pfn++) {
122 WARN_ON_ONCE(!pfn_valid(pfn));
123
124
125
126
127
128
129 if (page_zone(pfn_to_page(pfn)) != zone)
130 goto err;
131 }
132 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
133 } while (--i);
134
135 mutex_init(&cma->lock);
136
137#ifdef CONFIG_CMA_DEBUGFS
138 INIT_HLIST_HEAD(&cma->mem_head);
139 spin_lock_init(&cma->mem_head_lock);
140#endif
141
142 return 0;
143
144err:
145 kfree(cma->bitmap);
146 cma->count = 0;
147 return -EINVAL;
148}
149
150static int __init cma_init_reserved_areas(void)
151{
152 int i;
153
154 for (i = 0; i < cma_area_count; i++) {
155 int ret = cma_activate_area(&cma_areas[i]);
156
157 if (ret)
158 return ret;
159 }
160
161 return 0;
162}
163core_initcall(cma_init_reserved_areas);
164
165
166
167
168
169
170
171
172
173
174int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175 unsigned int order_per_bit,
176 const char *name,
177 struct cma **res_cma)
178{
179 struct cma *cma;
180 phys_addr_t alignment;
181
182
183 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
184 pr_err("Not enough slots for CMA reserved regions!\n");
185 return -ENOSPC;
186 }
187
188 if (!size || !memblock_is_region_reserved(base, size))
189 return -EINVAL;
190
191
192 alignment = PAGE_SIZE <<
193 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
194
195
196 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
197 return -EINVAL;
198
199 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
200 return -EINVAL;
201
202
203
204
205
206 cma = &cma_areas[cma_area_count];
207 if (name) {
208 cma->name = name;
209 } else {
210 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
211 if (!cma->name)
212 return -ENOMEM;
213 }
214 cma->base_pfn = PFN_DOWN(base);
215 cma->count = size >> PAGE_SHIFT;
216 cma->order_per_bit = order_per_bit;
217 *res_cma = cma;
218 cma_area_count++;
219 totalcma_pages += (size / PAGE_SIZE);
220
221 return 0;
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242int __init cma_declare_contiguous(phys_addr_t base,
243 phys_addr_t size, phys_addr_t limit,
244 phys_addr_t alignment, unsigned int order_per_bit,
245 bool fixed, const char *name, struct cma **res_cma)
246{
247 phys_addr_t memblock_end = memblock_end_of_DRAM();
248 phys_addr_t highmem_start;
249 int ret = 0;
250
251
252
253
254
255
256
257 highmem_start = __pa(high_memory - 1) + 1;
258 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
259 __func__, &size, &base, &limit, &alignment);
260
261 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
262 pr_err("Not enough slots for CMA reserved regions!\n");
263 return -ENOSPC;
264 }
265
266 if (!size)
267 return -EINVAL;
268
269 if (alignment && !is_power_of_2(alignment))
270 return -EINVAL;
271
272
273
274
275
276
277
278 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
279 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
280 base = ALIGN(base, alignment);
281 size = ALIGN(size, alignment);
282 limit &= ~(alignment - 1);
283
284 if (!base)
285 fixed = false;
286
287
288 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
289 return -EINVAL;
290
291
292
293
294
295 if (fixed && base < highmem_start && base + size > highmem_start) {
296 ret = -EINVAL;
297 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
298 &base, &highmem_start);
299 goto err;
300 }
301
302
303
304
305
306
307 if (limit == 0 || limit > memblock_end)
308 limit = memblock_end;
309
310
311 if (fixed) {
312 if (memblock_is_region_reserved(base, size) ||
313 memblock_reserve(base, size) < 0) {
314 ret = -EBUSY;
315 goto err;
316 }
317 } else {
318 phys_addr_t addr = 0;
319
320
321
322
323
324
325
326 if (base < highmem_start && limit > highmem_start) {
327 addr = memblock_alloc_range(size, alignment,
328 highmem_start, limit,
329 MEMBLOCK_NONE);
330 limit = highmem_start;
331 }
332
333 if (!addr) {
334 addr = memblock_alloc_range(size, alignment, base,
335 limit,
336 MEMBLOCK_NONE);
337 if (!addr) {
338 ret = -ENOMEM;
339 goto err;
340 }
341 }
342
343
344
345
346
347 kmemleak_ignore_phys(addr);
348 base = addr;
349 }
350
351 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
352 if (ret)
353 goto err;
354
355 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
356 &base);
357 return 0;
358
359err:
360 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
361 return ret;
362}
363
364#ifdef CONFIG_CMA_DEBUG
365static void cma_debug_show_areas(struct cma *cma)
366{
367 unsigned long next_zero_bit, next_set_bit;
368 unsigned long start = 0;
369 unsigned int nr_zero, nr_total = 0;
370
371 mutex_lock(&cma->lock);
372 pr_info("number of available pages: ");
373 for (;;) {
374 next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
375 if (next_zero_bit >= cma->count)
376 break;
377 next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
378 nr_zero = next_set_bit - next_zero_bit;
379 pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
380 nr_total += nr_zero;
381 start = next_zero_bit + nr_zero;
382 }
383 pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
384 mutex_unlock(&cma->lock);
385}
386#else
387static inline void cma_debug_show_areas(struct cma *cma) { }
388#endif
389
390
391
392
393
394
395
396
397
398
399struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
400 gfp_t gfp_mask)
401{
402 unsigned long mask, offset;
403 unsigned long pfn = -1;
404 unsigned long start = 0;
405 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
406 struct page *page = NULL;
407 int ret = -ENOMEM;
408
409 if (!cma || !cma->count)
410 return NULL;
411
412 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
413 count, align);
414
415 if (!count)
416 return NULL;
417
418 mask = cma_bitmap_aligned_mask(cma, align);
419 offset = cma_bitmap_aligned_offset(cma, align);
420 bitmap_maxno = cma_bitmap_maxno(cma);
421 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
422
423 if (bitmap_count > bitmap_maxno)
424 return NULL;
425
426 for (;;) {
427 mutex_lock(&cma->lock);
428 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
429 bitmap_maxno, start, bitmap_count, mask,
430 offset);
431 if (bitmap_no >= bitmap_maxno) {
432 mutex_unlock(&cma->lock);
433 break;
434 }
435 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
436
437
438
439
440
441 mutex_unlock(&cma->lock);
442
443 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
444 mutex_lock(&cma_mutex);
445 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
446 gfp_mask);
447 mutex_unlock(&cma_mutex);
448 if (ret == 0) {
449 page = pfn_to_page(pfn);
450 break;
451 }
452
453 cma_clear_bitmap(cma, pfn, count);
454 if (ret != -EBUSY)
455 break;
456
457 pr_debug("%s(): memory range at %p is busy, retrying\n",
458 __func__, pfn_to_page(pfn));
459
460 start = bitmap_no + mask + 1;
461 }
462
463 trace_cma_alloc(pfn, page, count, align);
464
465 if (ret) {
466 pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
467 __func__, count, ret);
468 cma_debug_show_areas(cma);
469 }
470
471 pr_debug("%s(): returned %p\n", __func__, page);
472 return page;
473}
474
475
476
477
478
479
480
481
482
483
484
485bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
486{
487 unsigned long pfn;
488
489 if (!cma || !pages)
490 return false;
491
492 pr_debug("%s(page %p)\n", __func__, (void *)pages);
493
494 pfn = page_to_pfn(pages);
495
496 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
497 return false;
498
499 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
500
501 free_contig_range(pfn, count);
502 cma_clear_bitmap(cma, pfn, count);
503 trace_cma_release(pfn, pages, count);
504
505 return true;
506}
507
508int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
509{
510 int i;
511
512 for (i = 0; i < cma_area_count; i++) {
513 int ret = it(&cma_areas[i], data);
514
515 if (ret)
516 return ret;
517 }
518
519 return 0;
520}
521