1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
26#define CREATE_TRACE_POINTS
27
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
36#include <linux/highmem.h>
37#include <linux/io.h>
38#include <trace/events/cma.h>
39
40#include "cma.h"
41
42struct cma cma_areas[MAX_CMA_AREAS];
43unsigned cma_area_count;
44static DEFINE_MUTEX(cma_mutex);
45
46phys_addr_t cma_get_base(const struct cma *cma)
47{
48 return PFN_PHYS(cma->base_pfn);
49}
50
51unsigned long cma_get_size(const struct cma *cma)
52{
53 return cma->count << PAGE_SHIFT;
54}
55
56static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 int align_order)
58{
59 if (align_order <= cma->order_per_bit)
60 return 0;
61 return (1UL << (align_order - cma->order_per_bit)) - 1;
62}
63
64
65
66
67
68static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 int align_order)
70{
71 if (align_order <= cma->order_per_bit)
72 return 0;
73
74 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 - cma->base_pfn) >> cma->order_per_bit;
76}
77
78static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
79 unsigned long pages)
80{
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
82}
83
84static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
85 unsigned int count)
86{
87 unsigned long bitmap_no, bitmap_count;
88
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91
92 mutex_lock(&cma->lock);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 mutex_unlock(&cma->lock);
95}
96
97static int __init cma_activate_area(struct cma *cma)
98{
99 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
100 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101 unsigned i = cma->count >> pageblock_order;
102 struct zone *zone;
103
104 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
105
106 if (!cma->bitmap)
107 return -ENOMEM;
108
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 zone = page_zone(pfn_to_page(pfn));
111
112 do {
113 unsigned j;
114
115 base_pfn = pfn;
116 for (j = pageblock_nr_pages; j; --j, pfn++) {
117 WARN_ON_ONCE(!pfn_valid(pfn));
118
119
120
121
122
123
124 if (page_zone(pfn_to_page(pfn)) != zone)
125 goto err;
126 }
127 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
128 } while (--i);
129
130 mutex_init(&cma->lock);
131
132#ifdef CONFIG_CMA_DEBUGFS
133 INIT_HLIST_HEAD(&cma->mem_head);
134 spin_lock_init(&cma->mem_head_lock);
135#endif
136
137 return 0;
138
139err:
140 kfree(cma->bitmap);
141 cma->count = 0;
142 return -EINVAL;
143}
144
145static int __init cma_init_reserved_areas(void)
146{
147 int i;
148
149 for (i = 0; i < cma_area_count; i++) {
150 int ret = cma_activate_area(&cma_areas[i]);
151
152 if (ret)
153 return ret;
154 }
155
156 return 0;
157}
158core_initcall(cma_init_reserved_areas);
159
160
161
162
163
164
165
166
167
168
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 unsigned int order_per_bit,
171 struct cma **res_cma)
172{
173 struct cma *cma;
174 phys_addr_t alignment;
175
176
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
179 return -ENOSPC;
180 }
181
182 if (!size || !memblock_is_region_reserved(base, size))
183 return -EINVAL;
184
185
186 alignment = PAGE_SIZE <<
187 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
188
189
190 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
191 return -EINVAL;
192
193 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
194 return -EINVAL;
195
196
197
198
199
200 cma = &cma_areas[cma_area_count];
201 cma->base_pfn = PFN_DOWN(base);
202 cma->count = size >> PAGE_SHIFT;
203 cma->order_per_bit = order_per_bit;
204 *res_cma = cma;
205 cma_area_count++;
206 totalcma_pages += (size / PAGE_SIZE);
207
208 return 0;
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229int __init cma_declare_contiguous(phys_addr_t base,
230 phys_addr_t size, phys_addr_t limit,
231 phys_addr_t alignment, unsigned int order_per_bit,
232 bool fixed, struct cma **res_cma)
233{
234 phys_addr_t memblock_end = memblock_end_of_DRAM();
235 phys_addr_t highmem_start;
236 int ret = 0;
237
238#ifdef CONFIG_X86
239
240
241
242
243
244
245
246 highmem_start = __pa_nodebug(high_memory);
247#else
248 highmem_start = __pa(high_memory);
249#endif
250 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
251 __func__, &size, &base, &limit, &alignment);
252
253 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
254 pr_err("Not enough slots for CMA reserved regions!\n");
255 return -ENOSPC;
256 }
257
258 if (!size)
259 return -EINVAL;
260
261 if (alignment && !is_power_of_2(alignment))
262 return -EINVAL;
263
264
265
266
267
268
269
270 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
271 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
272 base = ALIGN(base, alignment);
273 size = ALIGN(size, alignment);
274 limit &= ~(alignment - 1);
275
276 if (!base)
277 fixed = false;
278
279
280 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
281 return -EINVAL;
282
283
284
285
286
287 if (fixed && base < highmem_start && base + size > highmem_start) {
288 ret = -EINVAL;
289 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
290 &base, &highmem_start);
291 goto err;
292 }
293
294
295
296
297
298
299 if (limit == 0 || limit > memblock_end)
300 limit = memblock_end;
301
302
303 if (fixed) {
304 if (memblock_is_region_reserved(base, size) ||
305 memblock_reserve(base, size) < 0) {
306 ret = -EBUSY;
307 goto err;
308 }
309 } else {
310 phys_addr_t addr = 0;
311
312
313
314
315
316
317
318 if (base < highmem_start && limit > highmem_start) {
319 addr = memblock_alloc_range(size, alignment,
320 highmem_start, limit,
321 MEMBLOCK_NONE);
322 limit = highmem_start;
323 }
324
325 if (!addr) {
326 addr = memblock_alloc_range(size, alignment, base,
327 limit,
328 MEMBLOCK_NONE);
329 if (!addr) {
330 ret = -ENOMEM;
331 goto err;
332 }
333 }
334
335
336
337
338
339 kmemleak_ignore_phys(addr);
340 base = addr;
341 }
342
343 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
344 if (ret)
345 goto err;
346
347 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
348 &base);
349 return 0;
350
351err:
352 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
353 return ret;
354}
355
356
357
358
359
360
361
362
363
364
365struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
366{
367 unsigned long mask, offset;
368 unsigned long pfn = -1;
369 unsigned long start = 0;
370 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
371 struct page *page = NULL;
372 int ret;
373
374 if (!cma || !cma->count)
375 return NULL;
376
377 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
378 count, align);
379
380 if (!count)
381 return NULL;
382
383 mask = cma_bitmap_aligned_mask(cma, align);
384 offset = cma_bitmap_aligned_offset(cma, align);
385 bitmap_maxno = cma_bitmap_maxno(cma);
386 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
387
388 if (bitmap_count > bitmap_maxno)
389 return NULL;
390
391 for (;;) {
392 mutex_lock(&cma->lock);
393 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
394 bitmap_maxno, start, bitmap_count, mask,
395 offset);
396 if (bitmap_no >= bitmap_maxno) {
397 mutex_unlock(&cma->lock);
398 break;
399 }
400 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
401
402
403
404
405
406 mutex_unlock(&cma->lock);
407
408 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
409 mutex_lock(&cma_mutex);
410 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
411 mutex_unlock(&cma_mutex);
412 if (ret == 0) {
413 page = pfn_to_page(pfn);
414 break;
415 }
416
417 cma_clear_bitmap(cma, pfn, count);
418 if (ret != -EBUSY)
419 break;
420
421 pr_debug("%s(): memory range at %p is busy, retrying\n",
422 __func__, pfn_to_page(pfn));
423
424 start = bitmap_no + mask + 1;
425 }
426
427 trace_cma_alloc(pfn, page, count, align);
428
429 pr_debug("%s(): returned %p\n", __func__, page);
430 return page;
431}
432
433
434
435
436
437
438
439
440
441
442
443bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
444{
445 unsigned long pfn;
446
447 if (!cma || !pages)
448 return false;
449
450 pr_debug("%s(page %p)\n", __func__, (void *)pages);
451
452 pfn = page_to_pfn(pages);
453
454 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
455 return false;
456
457 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
458
459 free_contig_range(pfn, count);
460 cma_clear_bitmap(cma, pfn, count);
461 trace_cma_release(pfn, pages, count);
462
463 return true;
464}
465