1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
26#define CREATE_TRACE_POINTS
27
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
36#include <linux/highmem.h>
37#include <linux/io.h>
38#include <trace/events/cma.h>
39
40#include "cma.h"
41
42struct cma cma_areas[MAX_CMA_AREAS];
43unsigned cma_area_count;
44static DEFINE_MUTEX(cma_mutex);
45
46phys_addr_t cma_get_base(const struct cma *cma)
47{
48 return PFN_PHYS(cma->base_pfn);
49}
50
51unsigned long cma_get_size(const struct cma *cma)
52{
53 return cma->count << PAGE_SHIFT;
54}
55
56const char *cma_get_name(const struct cma *cma)
57{
58 return cma->name ? cma->name : "(undefined)";
59}
60
61static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
62 unsigned int align_order)
63{
64 if (align_order <= cma->order_per_bit)
65 return 0;
66 return (1UL << (align_order - cma->order_per_bit)) - 1;
67}
68
69
70
71
72
73static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
74 unsigned int align_order)
75{
76 return (cma->base_pfn & ((1UL << align_order) - 1))
77 >> cma->order_per_bit;
78}
79
80static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
81 unsigned long pages)
82{
83 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
84}
85
86static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
87 unsigned int count)
88{
89 unsigned long bitmap_no, bitmap_count;
90
91 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
92 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
93
94 mutex_lock(&cma->lock);
95 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
96 mutex_unlock(&cma->lock);
97}
98
99static int __init cma_activate_area(struct cma *cma)
100{
101 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
102 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
103 unsigned i = cma->count >> pageblock_order;
104 struct zone *zone;
105
106 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
107
108 if (!cma->bitmap)
109 return -ENOMEM;
110
111 WARN_ON_ONCE(!pfn_valid(pfn));
112 zone = page_zone(pfn_to_page(pfn));
113
114 do {
115 unsigned j;
116
117 base_pfn = pfn;
118 for (j = pageblock_nr_pages; j; --j, pfn++) {
119 WARN_ON_ONCE(!pfn_valid(pfn));
120
121
122
123
124
125
126 if (page_zone(pfn_to_page(pfn)) != zone)
127 goto not_in_zone;
128 }
129 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
130 } while (--i);
131
132 mutex_init(&cma->lock);
133
134#ifdef CONFIG_CMA_DEBUGFS
135 INIT_HLIST_HEAD(&cma->mem_head);
136 spin_lock_init(&cma->mem_head_lock);
137#endif
138
139 return 0;
140
141not_in_zone:
142 pr_err("CMA area %s could not be activated\n", cma->name);
143 kfree(cma->bitmap);
144 cma->count = 0;
145 return -EINVAL;
146}
147
148static int __init cma_init_reserved_areas(void)
149{
150 int i;
151
152 for (i = 0; i < cma_area_count; i++) {
153 int ret = cma_activate_area(&cma_areas[i]);
154
155 if (ret)
156 return ret;
157 }
158
159 return 0;
160}
161core_initcall(cma_init_reserved_areas);
162
163
164
165
166
167
168
169
170
171
172int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
173 unsigned int order_per_bit,
174 const char *name,
175 struct cma **res_cma)
176{
177 struct cma *cma;
178 phys_addr_t alignment;
179
180
181 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
182 pr_err("Not enough slots for CMA reserved regions!\n");
183 return -ENOSPC;
184 }
185
186 if (!size || !memblock_is_region_reserved(base, size))
187 return -EINVAL;
188
189
190 alignment = PAGE_SIZE <<
191 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
192
193
194 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
195 return -EINVAL;
196
197 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
198 return -EINVAL;
199
200
201
202
203
204 cma = &cma_areas[cma_area_count];
205 if (name) {
206 cma->name = name;
207 } else {
208 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
209 if (!cma->name)
210 return -ENOMEM;
211 }
212 cma->base_pfn = PFN_DOWN(base);
213 cma->count = size >> PAGE_SHIFT;
214 cma->order_per_bit = order_per_bit;
215 *res_cma = cma;
216 cma_area_count++;
217 totalcma_pages += (size / PAGE_SIZE);
218
219 return 0;
220}
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240int __init cma_declare_contiguous(phys_addr_t base,
241 phys_addr_t size, phys_addr_t limit,
242 phys_addr_t alignment, unsigned int order_per_bit,
243 bool fixed, const char *name, struct cma **res_cma)
244{
245 phys_addr_t memblock_end = memblock_end_of_DRAM();
246 phys_addr_t highmem_start;
247 int ret = 0;
248
249
250
251
252
253
254
255 highmem_start = __pa(high_memory - 1) + 1;
256 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
257 __func__, &size, &base, &limit, &alignment);
258
259 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
260 pr_err("Not enough slots for CMA reserved regions!\n");
261 return -ENOSPC;
262 }
263
264 if (!size)
265 return -EINVAL;
266
267 if (alignment && !is_power_of_2(alignment))
268 return -EINVAL;
269
270
271
272
273
274
275
276 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
277 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
278 base = ALIGN(base, alignment);
279 size = ALIGN(size, alignment);
280 limit &= ~(alignment - 1);
281
282 if (!base)
283 fixed = false;
284
285
286 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
287 return -EINVAL;
288
289
290
291
292
293 if (fixed && base < highmem_start && base + size > highmem_start) {
294 ret = -EINVAL;
295 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
296 &base, &highmem_start);
297 goto err;
298 }
299
300
301
302
303
304
305 if (limit == 0 || limit > memblock_end)
306 limit = memblock_end;
307
308
309 if (fixed) {
310 if (memblock_is_region_reserved(base, size) ||
311 memblock_reserve(base, size) < 0) {
312 ret = -EBUSY;
313 goto err;
314 }
315 } else {
316 phys_addr_t addr = 0;
317
318
319
320
321
322
323
324 if (base < highmem_start && limit > highmem_start) {
325 addr = memblock_alloc_range(size, alignment,
326 highmem_start, limit,
327 MEMBLOCK_NONE);
328 limit = highmem_start;
329 }
330
331 if (!addr) {
332 addr = memblock_alloc_range(size, alignment, base,
333 limit,
334 MEMBLOCK_NONE);
335 if (!addr) {
336 ret = -ENOMEM;
337 goto err;
338 }
339 }
340
341
342
343
344
345 kmemleak_ignore_phys(addr);
346 base = addr;
347 }
348
349 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
350 if (ret)
351 goto err;
352
353 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
354 &base);
355 return 0;
356
357err:
358 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
359 return ret;
360}
361
362#ifdef CONFIG_CMA_DEBUG
363static void cma_debug_show_areas(struct cma *cma)
364{
365 unsigned long next_zero_bit, next_set_bit;
366 unsigned long start = 0;
367 unsigned int nr_zero, nr_total = 0;
368
369 mutex_lock(&cma->lock);
370 pr_info("number of available pages: ");
371 for (;;) {
372 next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
373 if (next_zero_bit >= cma->count)
374 break;
375 next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
376 nr_zero = next_set_bit - next_zero_bit;
377 pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
378 nr_total += nr_zero;
379 start = next_zero_bit + nr_zero;
380 }
381 pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
382 mutex_unlock(&cma->lock);
383}
384#else
385static inline void cma_debug_show_areas(struct cma *cma) { }
386#endif
387
388
389
390
391
392
393
394
395
396
397struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
398 gfp_t gfp_mask)
399{
400 unsigned long mask, offset;
401 unsigned long pfn = -1;
402 unsigned long start = 0;
403 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
404 struct page *page = NULL;
405 int ret = -ENOMEM;
406
407 if (!cma || !cma->count)
408 return NULL;
409
410 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
411 count, align);
412
413 if (!count)
414 return NULL;
415
416 mask = cma_bitmap_aligned_mask(cma, align);
417 offset = cma_bitmap_aligned_offset(cma, align);
418 bitmap_maxno = cma_bitmap_maxno(cma);
419 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
420
421 if (bitmap_count > bitmap_maxno)
422 return NULL;
423
424 for (;;) {
425 mutex_lock(&cma->lock);
426 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
427 bitmap_maxno, start, bitmap_count, mask,
428 offset);
429 if (bitmap_no >= bitmap_maxno) {
430 mutex_unlock(&cma->lock);
431 break;
432 }
433 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
434
435
436
437
438
439 mutex_unlock(&cma->lock);
440
441 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
442 mutex_lock(&cma_mutex);
443 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
444 gfp_mask);
445 mutex_unlock(&cma_mutex);
446 if (ret == 0) {
447 page = pfn_to_page(pfn);
448 break;
449 }
450
451 cma_clear_bitmap(cma, pfn, count);
452 if (ret != -EBUSY)
453 break;
454
455 pr_debug("%s(): memory range at %p is busy, retrying\n",
456 __func__, pfn_to_page(pfn));
457
458 start = bitmap_no + mask + 1;
459 }
460
461 trace_cma_alloc(pfn, page, count, align);
462
463 if (ret && !(gfp_mask & __GFP_NOWARN)) {
464 pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
465 __func__, count, ret);
466 cma_debug_show_areas(cma);
467 }
468
469 pr_debug("%s(): returned %p\n", __func__, page);
470 return page;
471}
472
473
474
475
476
477
478
479
480
481
482
483bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
484{
485 unsigned long pfn;
486
487 if (!cma || !pages)
488 return false;
489
490 pr_debug("%s(page %p)\n", __func__, (void *)pages);
491
492 pfn = page_to_pfn(pages);
493
494 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
495 return false;
496
497 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
498
499 free_contig_range(pfn, count);
500 cma_clear_bitmap(cma, pfn, count);
501 trace_cma_release(pfn, pages, count);
502
503 return true;
504}
505
506int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
507{
508 int i;
509
510 for (i = 0; i < cma_area_count; i++) {
511 int ret = it(&cma_areas[i], data);
512
513 if (ret)
514 return ret;
515 }
516
517 return 0;
518}
519