1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
26#define CREATE_TRACE_POINTS
27
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
36#include <linux/highmem.h>
37#include <linux/io.h>
38#include <trace/events/cma.h>
39
40#include "cma.h"
41
42struct cma cma_areas[MAX_CMA_AREAS];
43unsigned cma_area_count;
44static DEFINE_MUTEX(cma_mutex);
45
46phys_addr_t cma_get_base(const struct cma *cma)
47{
48 return PFN_PHYS(cma->base_pfn);
49}
50
51unsigned long cma_get_size(const struct cma *cma)
52{
53 return cma->count << PAGE_SHIFT;
54}
55
56static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 int align_order)
58{
59 if (align_order <= cma->order_per_bit)
60 return 0;
61 return (1UL << (align_order - cma->order_per_bit)) - 1;
62}
63
64
65
66
67
68static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 int align_order)
70{
71 if (align_order <= cma->order_per_bit)
72 return 0;
73
74 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 - cma->base_pfn) >> cma->order_per_bit;
76}
77
78static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
79 unsigned long pages)
80{
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
82}
83
84static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
85 unsigned int count)
86{
87 unsigned long bitmap_no, bitmap_count;
88
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91
92 mutex_lock(&cma->lock);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 mutex_unlock(&cma->lock);
95}
96
97static int __init cma_activate_area(struct cma *cma)
98{
99 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
100 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101 unsigned i = cma->count >> pageblock_order;
102 struct zone *zone;
103
104 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
105
106 if (!cma->bitmap)
107 return -ENOMEM;
108
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 zone = page_zone(pfn_to_page(pfn));
111
112 do {
113 unsigned j;
114
115 base_pfn = pfn;
116 for (j = pageblock_nr_pages; j; --j, pfn++) {
117 WARN_ON_ONCE(!pfn_valid(pfn));
118
119
120
121
122
123
124 if (page_zone(pfn_to_page(pfn)) != zone)
125 goto err;
126 }
127 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
128 } while (--i);
129
130 mutex_init(&cma->lock);
131
132#ifdef CONFIG_CMA_DEBUGFS
133 INIT_HLIST_HEAD(&cma->mem_head);
134 spin_lock_init(&cma->mem_head_lock);
135#endif
136
137 return 0;
138
139err:
140 kfree(cma->bitmap);
141 cma->count = 0;
142 return -EINVAL;
143}
144
145static int __init cma_init_reserved_areas(void)
146{
147 int i;
148
149 for (i = 0; i < cma_area_count; i++) {
150 int ret = cma_activate_area(&cma_areas[i]);
151
152 if (ret)
153 return ret;
154 }
155
156 return 0;
157}
158core_initcall(cma_init_reserved_areas);
159
160
161
162
163
164
165
166
167
168
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 unsigned int order_per_bit,
171 struct cma **res_cma)
172{
173 struct cma *cma;
174 phys_addr_t alignment;
175
176
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
179 return -ENOSPC;
180 }
181
182 if (!size || !memblock_is_region_reserved(base, size))
183 return -EINVAL;
184
185
186 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
187
188
189 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
190 return -EINVAL;
191
192 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
193 return -EINVAL;
194
195
196
197
198
199 cma = &cma_areas[cma_area_count];
200 cma->base_pfn = PFN_DOWN(base);
201 cma->count = size >> PAGE_SHIFT;
202 cma->order_per_bit = order_per_bit;
203 *res_cma = cma;
204 cma_area_count++;
205 totalcma_pages += (size / PAGE_SIZE);
206
207 return 0;
208}
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228int __init cma_declare_contiguous(phys_addr_t base,
229 phys_addr_t size, phys_addr_t limit,
230 phys_addr_t alignment, unsigned int order_per_bit,
231 bool fixed, struct cma **res_cma)
232{
233 phys_addr_t memblock_end = memblock_end_of_DRAM();
234 phys_addr_t highmem_start;
235 int ret = 0;
236
237#ifdef CONFIG_X86
238
239
240
241
242
243
244
245 highmem_start = __pa_nodebug(high_memory);
246#else
247 highmem_start = __pa(high_memory);
248#endif
249 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
250 __func__, &size, &base, &limit, &alignment);
251
252 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
253 pr_err("Not enough slots for CMA reserved regions!\n");
254 return -ENOSPC;
255 }
256
257 if (!size)
258 return -EINVAL;
259
260 if (alignment && !is_power_of_2(alignment))
261 return -EINVAL;
262
263
264
265
266
267
268
269 alignment = max(alignment,
270 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
271 base = ALIGN(base, alignment);
272 size = ALIGN(size, alignment);
273 limit &= ~(alignment - 1);
274
275 if (!base)
276 fixed = false;
277
278
279 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
280 return -EINVAL;
281
282
283
284
285
286 if (fixed && base < highmem_start && base + size > highmem_start) {
287 ret = -EINVAL;
288 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
289 &base, &highmem_start);
290 goto err;
291 }
292
293
294
295
296
297
298 if (limit == 0 || limit > memblock_end)
299 limit = memblock_end;
300
301
302 if (fixed) {
303 if (memblock_is_region_reserved(base, size) ||
304 memblock_reserve(base, size) < 0) {
305 ret = -EBUSY;
306 goto err;
307 }
308 } else {
309 phys_addr_t addr = 0;
310
311
312
313
314
315
316
317 if (base < highmem_start && limit > highmem_start) {
318 addr = memblock_alloc_range(size, alignment,
319 highmem_start, limit,
320 MEMBLOCK_NONE);
321 limit = highmem_start;
322 }
323
324 if (!addr) {
325 addr = memblock_alloc_range(size, alignment, base,
326 limit,
327 MEMBLOCK_NONE);
328 if (!addr) {
329 ret = -ENOMEM;
330 goto err;
331 }
332 }
333
334
335
336
337
338 kmemleak_ignore(phys_to_virt(addr));
339 base = addr;
340 }
341
342 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
343 if (ret)
344 goto err;
345
346 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
347 &base);
348 return 0;
349
350err:
351 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
352 return ret;
353}
354
355
356
357
358
359
360
361
362
363
364struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
365{
366 unsigned long mask, offset;
367 unsigned long pfn = -1;
368 unsigned long start = 0;
369 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
370 struct page *page = NULL;
371 int ret;
372
373 if (!cma || !cma->count)
374 return NULL;
375
376 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
377 count, align);
378
379 if (!count)
380 return NULL;
381
382 mask = cma_bitmap_aligned_mask(cma, align);
383 offset = cma_bitmap_aligned_offset(cma, align);
384 bitmap_maxno = cma_bitmap_maxno(cma);
385 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
386
387 for (;;) {
388 mutex_lock(&cma->lock);
389 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
390 bitmap_maxno, start, bitmap_count, mask,
391 offset);
392 if (bitmap_no >= bitmap_maxno) {
393 mutex_unlock(&cma->lock);
394 break;
395 }
396 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
397
398
399
400
401
402 mutex_unlock(&cma->lock);
403
404 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
405 mutex_lock(&cma_mutex);
406 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
407 mutex_unlock(&cma_mutex);
408 if (ret == 0) {
409 page = pfn_to_page(pfn);
410 break;
411 }
412
413 cma_clear_bitmap(cma, pfn, count);
414 if (ret != -EBUSY)
415 break;
416
417 pr_debug("%s(): memory range at %p is busy, retrying\n",
418 __func__, pfn_to_page(pfn));
419
420 start = bitmap_no + mask + 1;
421 }
422
423 trace_cma_alloc(pfn, page, count, align);
424
425 pr_debug("%s(): returned %p\n", __func__, page);
426 return page;
427}
428
429
430
431
432
433
434
435
436
437
438
439bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
440{
441 unsigned long pfn;
442
443 if (!cma || !pages)
444 return false;
445
446 pr_debug("%s(page %p)\n", __func__, (void *)pages);
447
448 pfn = page_to_pfn(pages);
449
450 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
451 return false;
452
453 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
454
455 free_contig_range(pfn, count);
456 cma_clear_bitmap(cma, pfn, count);
457 trace_cma_release(pfn, pages, count);
458
459 return true;
460}
461