linux/drivers/staging/android/ion/ion_system_heap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/staging/android/ion/ion_system_heap.c
   4 *
   5 * Copyright (C) 2011 Google, Inc.
   6 */
   7
   8#include <asm/page.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/highmem.h>
  12#include <linux/mm.h>
  13#include <linux/scatterlist.h>
  14#include <linux/seq_file.h>
  15#include <linux/slab.h>
  16#include <linux/vmalloc.h>
  17#include "ion.h"
  18
  19#define NUM_ORDERS ARRAY_SIZE(orders)
  20
  21static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
  22                                     __GFP_NORETRY) & ~__GFP_RECLAIM;
  23static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;
  24static const unsigned int orders[] = {8, 4, 0};
  25
  26static int order_to_index(unsigned int order)
  27{
  28        int i;
  29
  30        for (i = 0; i < NUM_ORDERS; i++)
  31                if (order == orders[i])
  32                        return i;
  33        BUG();
  34        return -1;
  35}
  36
  37static inline unsigned int order_to_size(int order)
  38{
  39        return PAGE_SIZE << order;
  40}
  41
  42struct ion_system_heap {
  43        struct ion_heap heap;
  44        struct ion_page_pool *uncached_pools[NUM_ORDERS];
  45        struct ion_page_pool *cached_pools[NUM_ORDERS];
  46};
  47
  48/**
  49 * The page from page-pool are all zeroed before. We need do cache
  50 * clean for cached buffer. The uncached buffer are always non-cached
  51 * since it's allocated. So no need for non-cached pages.
  52 */
  53static struct page *alloc_buffer_page(struct ion_system_heap *heap,
  54                                      struct ion_buffer *buffer,
  55                                      unsigned long order)
  56{
  57        bool cached = ion_buffer_cached(buffer);
  58        struct ion_page_pool *pool;
  59        struct page *page;
  60
  61        if (!cached)
  62                pool = heap->uncached_pools[order_to_index(order)];
  63        else
  64                pool = heap->cached_pools[order_to_index(order)];
  65
  66        page = ion_page_pool_alloc(pool);
  67
  68        return page;
  69}
  70
  71static void free_buffer_page(struct ion_system_heap *heap,
  72                             struct ion_buffer *buffer, struct page *page)
  73{
  74        struct ion_page_pool *pool;
  75        unsigned int order = compound_order(page);
  76        bool cached = ion_buffer_cached(buffer);
  77
  78        /* go to system */
  79        if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
  80                __free_pages(page, order);
  81                return;
  82        }
  83
  84        if (!cached)
  85                pool = heap->uncached_pools[order_to_index(order)];
  86        else
  87                pool = heap->cached_pools[order_to_index(order)];
  88
  89        ion_page_pool_free(pool, page);
  90}
  91
  92static struct page *alloc_largest_available(struct ion_system_heap *heap,
  93                                            struct ion_buffer *buffer,
  94                                            unsigned long size,
  95                                            unsigned int max_order)
  96{
  97        struct page *page;
  98        int i;
  99
 100        for (i = 0; i < NUM_ORDERS; i++) {
 101                if (size < order_to_size(orders[i]))
 102                        continue;
 103                if (max_order < orders[i])
 104                        continue;
 105
 106                page = alloc_buffer_page(heap, buffer, orders[i]);
 107                if (!page)
 108                        continue;
 109
 110                return page;
 111        }
 112
 113        return NULL;
 114}
 115
 116static int ion_system_heap_allocate(struct ion_heap *heap,
 117                                    struct ion_buffer *buffer,
 118                                    unsigned long size,
 119                                    unsigned long flags)
 120{
 121        struct ion_system_heap *sys_heap = container_of(heap,
 122                                                        struct ion_system_heap,
 123                                                        heap);
 124        struct sg_table *table;
 125        struct scatterlist *sg;
 126        struct list_head pages;
 127        struct page *page, *tmp_page;
 128        int i = 0;
 129        unsigned long size_remaining = PAGE_ALIGN(size);
 130        unsigned int max_order = orders[0];
 131
 132        if (size / PAGE_SIZE > totalram_pages / 2)
 133                return -ENOMEM;
 134
 135        INIT_LIST_HEAD(&pages);
 136        while (size_remaining > 0) {
 137                page = alloc_largest_available(sys_heap, buffer, size_remaining,
 138                                               max_order);
 139                if (!page)
 140                        goto free_pages;
 141                list_add_tail(&page->lru, &pages);
 142                size_remaining -= PAGE_SIZE << compound_order(page);
 143                max_order = compound_order(page);
 144                i++;
 145        }
 146        table = kmalloc(sizeof(*table), GFP_KERNEL);
 147        if (!table)
 148                goto free_pages;
 149
 150        if (sg_alloc_table(table, i, GFP_KERNEL))
 151                goto free_table;
 152
 153        sg = table->sgl;
 154        list_for_each_entry_safe(page, tmp_page, &pages, lru) {
 155                sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
 156                sg = sg_next(sg);
 157                list_del(&page->lru);
 158        }
 159
 160        buffer->sg_table = table;
 161        return 0;
 162
 163free_table:
 164        kfree(table);
 165free_pages:
 166        list_for_each_entry_safe(page, tmp_page, &pages, lru)
 167                free_buffer_page(sys_heap, buffer, page);
 168        return -ENOMEM;
 169}
 170
 171static void ion_system_heap_free(struct ion_buffer *buffer)
 172{
 173        struct ion_system_heap *sys_heap = container_of(buffer->heap,
 174                                                        struct ion_system_heap,
 175                                                        heap);
 176        struct sg_table *table = buffer->sg_table;
 177        struct scatterlist *sg;
 178        int i;
 179
 180        /* zero the buffer before goto page pool */
 181        if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
 182                ion_heap_buffer_zero(buffer);
 183
 184        for_each_sg(table->sgl, sg, table->nents, i)
 185                free_buffer_page(sys_heap, buffer, sg_page(sg));
 186        sg_free_table(table);
 187        kfree(table);
 188}
 189
 190static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 191                                  int nr_to_scan)
 192{
 193        struct ion_page_pool *uncached_pool;
 194        struct ion_page_pool *cached_pool;
 195        struct ion_system_heap *sys_heap;
 196        int nr_total = 0;
 197        int i, nr_freed;
 198        int only_scan = 0;
 199
 200        sys_heap = container_of(heap, struct ion_system_heap, heap);
 201
 202        if (!nr_to_scan)
 203                only_scan = 1;
 204
 205        for (i = 0; i < NUM_ORDERS; i++) {
 206                uncached_pool = sys_heap->uncached_pools[i];
 207                cached_pool = sys_heap->cached_pools[i];
 208
 209                if (only_scan) {
 210                        nr_total += ion_page_pool_shrink(uncached_pool,
 211                                                         gfp_mask,
 212                                                         nr_to_scan);
 213
 214                        nr_total += ion_page_pool_shrink(cached_pool,
 215                                                         gfp_mask,
 216                                                         nr_to_scan);
 217                } else {
 218                        nr_freed = ion_page_pool_shrink(uncached_pool,
 219                                                        gfp_mask,
 220                                                        nr_to_scan);
 221                        nr_to_scan -= nr_freed;
 222                        nr_total += nr_freed;
 223                        if (nr_to_scan <= 0)
 224                                break;
 225                        nr_freed = ion_page_pool_shrink(cached_pool,
 226                                                        gfp_mask,
 227                                                        nr_to_scan);
 228                        nr_to_scan -= nr_freed;
 229                        nr_total += nr_freed;
 230                        if (nr_to_scan <= 0)
 231                                break;
 232                }
 233        }
 234        return nr_total;
 235}
 236
 237static struct ion_heap_ops system_heap_ops = {
 238        .allocate = ion_system_heap_allocate,
 239        .free = ion_system_heap_free,
 240        .map_kernel = ion_heap_map_kernel,
 241        .unmap_kernel = ion_heap_unmap_kernel,
 242        .map_user = ion_heap_map_user,
 243        .shrink = ion_system_heap_shrink,
 244};
 245
 246static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
 247                                      void *unused)
 248{
 249        struct ion_system_heap *sys_heap = container_of(heap,
 250                                                        struct ion_system_heap,
 251                                                        heap);
 252        int i;
 253        struct ion_page_pool *pool;
 254
 255        for (i = 0; i < NUM_ORDERS; i++) {
 256                pool = sys_heap->uncached_pools[i];
 257
 258                seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
 259                           pool->high_count, pool->order,
 260                           (PAGE_SIZE << pool->order) * pool->high_count);
 261                seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
 262                           pool->low_count, pool->order,
 263                           (PAGE_SIZE << pool->order) * pool->low_count);
 264        }
 265
 266        for (i = 0; i < NUM_ORDERS; i++) {
 267                pool = sys_heap->cached_pools[i];
 268
 269                seq_printf(s, "%d order %u highmem pages cached %lu total\n",
 270                           pool->high_count, pool->order,
 271                           (PAGE_SIZE << pool->order) * pool->high_count);
 272                seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
 273                           pool->low_count, pool->order,
 274                           (PAGE_SIZE << pool->order) * pool->low_count);
 275        }
 276        return 0;
 277}
 278
 279static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
 280{
 281        int i;
 282
 283        for (i = 0; i < NUM_ORDERS; i++)
 284                if (pools[i])
 285                        ion_page_pool_destroy(pools[i]);
 286}
 287
 288static int ion_system_heap_create_pools(struct ion_page_pool **pools,
 289                                        bool cached)
 290{
 291        int i;
 292        gfp_t gfp_flags = low_order_gfp_flags;
 293
 294        for (i = 0; i < NUM_ORDERS; i++) {
 295                struct ion_page_pool *pool;
 296
 297                if (orders[i] > 4)
 298                        gfp_flags = high_order_gfp_flags;
 299
 300                pool = ion_page_pool_create(gfp_flags, orders[i], cached);
 301                if (!pool)
 302                        goto err_create_pool;
 303                pools[i] = pool;
 304        }
 305        return 0;
 306
 307err_create_pool:
 308        ion_system_heap_destroy_pools(pools);
 309        return -ENOMEM;
 310}
 311
 312static struct ion_heap *__ion_system_heap_create(void)
 313{
 314        struct ion_system_heap *heap;
 315
 316        heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 317        if (!heap)
 318                return ERR_PTR(-ENOMEM);
 319        heap->heap.ops = &system_heap_ops;
 320        heap->heap.type = ION_HEAP_TYPE_SYSTEM;
 321        heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
 322
 323        if (ion_system_heap_create_pools(heap->uncached_pools, false))
 324                goto free_heap;
 325
 326        if (ion_system_heap_create_pools(heap->cached_pools, true))
 327                goto destroy_uncached_pools;
 328
 329        heap->heap.debug_show = ion_system_heap_debug_show;
 330        return &heap->heap;
 331
 332destroy_uncached_pools:
 333        ion_system_heap_destroy_pools(heap->uncached_pools);
 334
 335free_heap:
 336        kfree(heap);
 337        return ERR_PTR(-ENOMEM);
 338}
 339
 340static int ion_system_heap_create(void)
 341{
 342        struct ion_heap *heap;
 343
 344        heap = __ion_system_heap_create();
 345        if (IS_ERR(heap))
 346                return PTR_ERR(heap);
 347        heap->name = "ion_system_heap";
 348
 349        ion_device_add_heap(heap);
 350        return 0;
 351}
 352device_initcall(ion_system_heap_create);
 353
 354static int ion_system_contig_heap_allocate(struct ion_heap *heap,
 355                                           struct ion_buffer *buffer,
 356                                           unsigned long len,
 357                                           unsigned long flags)
 358{
 359        int order = get_order(len);
 360        struct page *page;
 361        struct sg_table *table;
 362        unsigned long i;
 363        int ret;
 364
 365        page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
 366        if (!page)
 367                return -ENOMEM;
 368
 369        split_page(page, order);
 370
 371        len = PAGE_ALIGN(len);
 372        for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
 373                __free_page(page + i);
 374
 375        table = kmalloc(sizeof(*table), GFP_KERNEL);
 376        if (!table) {
 377                ret = -ENOMEM;
 378                goto free_pages;
 379        }
 380
 381        ret = sg_alloc_table(table, 1, GFP_KERNEL);
 382        if (ret)
 383                goto free_table;
 384
 385        sg_set_page(table->sgl, page, len, 0);
 386
 387        buffer->sg_table = table;
 388
 389        return 0;
 390
 391free_table:
 392        kfree(table);
 393free_pages:
 394        for (i = 0; i < len >> PAGE_SHIFT; i++)
 395                __free_page(page + i);
 396
 397        return ret;
 398}
 399
 400static void ion_system_contig_heap_free(struct ion_buffer *buffer)
 401{
 402        struct sg_table *table = buffer->sg_table;
 403        struct page *page = sg_page(table->sgl);
 404        unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
 405        unsigned long i;
 406
 407        for (i = 0; i < pages; i++)
 408                __free_page(page + i);
 409        sg_free_table(table);
 410        kfree(table);
 411}
 412
 413static struct ion_heap_ops kmalloc_ops = {
 414        .allocate = ion_system_contig_heap_allocate,
 415        .free = ion_system_contig_heap_free,
 416        .map_kernel = ion_heap_map_kernel,
 417        .unmap_kernel = ion_heap_unmap_kernel,
 418        .map_user = ion_heap_map_user,
 419};
 420
 421static struct ion_heap *__ion_system_contig_heap_create(void)
 422{
 423        struct ion_heap *heap;
 424
 425        heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 426        if (!heap)
 427                return ERR_PTR(-ENOMEM);
 428        heap->ops = &kmalloc_ops;
 429        heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
 430        heap->name = "ion_system_contig_heap";
 431        return heap;
 432}
 433
 434static int ion_system_contig_heap_create(void)
 435{
 436        struct ion_heap *heap;
 437
 438        heap = __ion_system_contig_heap_create();
 439        if (IS_ERR(heap))
 440                return PTR_ERR(heap);
 441
 442        ion_device_add_heap(heap);
 443        return 0;
 444}
 445device_initcall(ion_system_contig_heap_create);
 446
 447