linux/drivers/gpu/drm/ttm/ttm_memory.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#define pr_fmt(fmt) "[TTM] " fmt
  30
  31#include <drm/ttm/ttm_memory.h>
  32#include <drm/ttm/ttm_module.h>
  33#include <drm/ttm/ttm_page_alloc.h>
  34#include <linux/spinlock.h>
  35#include <linux/sched.h>
  36#include <linux/wait.h>
  37#include <linux/mm.h>
  38#include <linux/module.h>
  39#include <linux/slab.h>
  40#include <linux/swap.h>
  41
  42#define TTM_MEMORY_ALLOC_RETRIES 4
  43
  44struct ttm_mem_zone {
  45        struct kobject kobj;
  46        struct ttm_mem_global *glob;
  47        const char *name;
  48        uint64_t zone_mem;
  49        uint64_t emer_mem;
  50        uint64_t max_mem;
  51        uint64_t swap_limit;
  52        uint64_t used_mem;
  53};
  54
  55static struct attribute ttm_mem_sys = {
  56        .name = "zone_memory",
  57        .mode = S_IRUGO
  58};
  59static struct attribute ttm_mem_emer = {
  60        .name = "emergency_memory",
  61        .mode = S_IRUGO | S_IWUSR
  62};
  63static struct attribute ttm_mem_max = {
  64        .name = "available_memory",
  65        .mode = S_IRUGO | S_IWUSR
  66};
  67static struct attribute ttm_mem_swap = {
  68        .name = "swap_limit",
  69        .mode = S_IRUGO | S_IWUSR
  70};
  71static struct attribute ttm_mem_used = {
  72        .name = "used_memory",
  73        .mode = S_IRUGO
  74};
  75
  76static void ttm_mem_zone_kobj_release(struct kobject *kobj)
  77{
  78        struct ttm_mem_zone *zone =
  79                container_of(kobj, struct ttm_mem_zone, kobj);
  80
  81        pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
  82                zone->name, (unsigned long long)zone->used_mem >> 10);
  83        kfree(zone);
  84}
  85
  86static ssize_t ttm_mem_zone_show(struct kobject *kobj,
  87                                 struct attribute *attr,
  88                                 char *buffer)
  89{
  90        struct ttm_mem_zone *zone =
  91                container_of(kobj, struct ttm_mem_zone, kobj);
  92        uint64_t val = 0;
  93
  94        spin_lock(&zone->glob->lock);
  95        if (attr == &ttm_mem_sys)
  96                val = zone->zone_mem;
  97        else if (attr == &ttm_mem_emer)
  98                val = zone->emer_mem;
  99        else if (attr == &ttm_mem_max)
 100                val = zone->max_mem;
 101        else if (attr == &ttm_mem_swap)
 102                val = zone->swap_limit;
 103        else if (attr == &ttm_mem_used)
 104                val = zone->used_mem;
 105        spin_unlock(&zone->glob->lock);
 106
 107        return snprintf(buffer, PAGE_SIZE, "%llu\n",
 108                        (unsigned long long) val >> 10);
 109}
 110
 111static void ttm_check_swapping(struct ttm_mem_global *glob);
 112
 113static ssize_t ttm_mem_zone_store(struct kobject *kobj,
 114                                  struct attribute *attr,
 115                                  const char *buffer,
 116                                  size_t size)
 117{
 118        struct ttm_mem_zone *zone =
 119                container_of(kobj, struct ttm_mem_zone, kobj);
 120        int chars;
 121        unsigned long val;
 122        uint64_t val64;
 123
 124        chars = sscanf(buffer, "%lu", &val);
 125        if (chars == 0)
 126                return size;
 127
 128        val64 = val;
 129        val64 <<= 10;
 130
 131        spin_lock(&zone->glob->lock);
 132        if (val64 > zone->zone_mem)
 133                val64 = zone->zone_mem;
 134        if (attr == &ttm_mem_emer) {
 135                zone->emer_mem = val64;
 136                if (zone->max_mem > val64)
 137                        zone->max_mem = val64;
 138        } else if (attr == &ttm_mem_max) {
 139                zone->max_mem = val64;
 140                if (zone->emer_mem < val64)
 141                        zone->emer_mem = val64;
 142        } else if (attr == &ttm_mem_swap)
 143                zone->swap_limit = val64;
 144        spin_unlock(&zone->glob->lock);
 145
 146        ttm_check_swapping(zone->glob);
 147
 148        return size;
 149}
 150
 151static struct attribute *ttm_mem_zone_attrs[] = {
 152        &ttm_mem_sys,
 153        &ttm_mem_emer,
 154        &ttm_mem_max,
 155        &ttm_mem_swap,
 156        &ttm_mem_used,
 157        NULL
 158};
 159
 160static const struct sysfs_ops ttm_mem_zone_ops = {
 161        .show = &ttm_mem_zone_show,
 162        .store = &ttm_mem_zone_store
 163};
 164
 165static struct kobj_type ttm_mem_zone_kobj_type = {
 166        .release = &ttm_mem_zone_kobj_release,
 167        .sysfs_ops = &ttm_mem_zone_ops,
 168        .default_attrs = ttm_mem_zone_attrs,
 169};
 170
 171static struct attribute ttm_mem_global_lower_mem_limit = {
 172        .name = "lower_mem_limit",
 173        .mode = S_IRUGO | S_IWUSR
 174};
 175
 176static ssize_t ttm_mem_global_show(struct kobject *kobj,
 177                                 struct attribute *attr,
 178                                 char *buffer)
 179{
 180        struct ttm_mem_global *glob =
 181                container_of(kobj, struct ttm_mem_global, kobj);
 182        uint64_t val = 0;
 183
 184        spin_lock(&glob->lock);
 185        val = glob->lower_mem_limit;
 186        spin_unlock(&glob->lock);
 187        /* convert from number of pages to KB */
 188        val <<= (PAGE_SHIFT - 10);
 189        return snprintf(buffer, PAGE_SIZE, "%llu\n",
 190                        (unsigned long long) val);
 191}
 192
 193static ssize_t ttm_mem_global_store(struct kobject *kobj,
 194                                  struct attribute *attr,
 195                                  const char *buffer,
 196                                  size_t size)
 197{
 198        int chars;
 199        uint64_t val64;
 200        unsigned long val;
 201        struct ttm_mem_global *glob =
 202                container_of(kobj, struct ttm_mem_global, kobj);
 203
 204        chars = sscanf(buffer, "%lu", &val);
 205        if (chars == 0)
 206                return size;
 207
 208        val64 = val;
 209        /* convert from KB to number of pages */
 210        val64 >>= (PAGE_SHIFT - 10);
 211
 212        spin_lock(&glob->lock);
 213        glob->lower_mem_limit = val64;
 214        spin_unlock(&glob->lock);
 215
 216        return size;
 217}
 218
 219static void ttm_mem_global_kobj_release(struct kobject *kobj)
 220{
 221        struct ttm_mem_global *glob =
 222                container_of(kobj, struct ttm_mem_global, kobj);
 223
 224        kfree(glob);
 225}
 226
 227static struct attribute *ttm_mem_global_attrs[] = {
 228        &ttm_mem_global_lower_mem_limit,
 229        NULL
 230};
 231
 232static const struct sysfs_ops ttm_mem_global_ops = {
 233        .show = &ttm_mem_global_show,
 234        .store = &ttm_mem_global_store,
 235};
 236
 237static struct kobj_type ttm_mem_glob_kobj_type = {
 238        .release = &ttm_mem_global_kobj_release,
 239        .sysfs_ops = &ttm_mem_global_ops,
 240        .default_attrs = ttm_mem_global_attrs,
 241};
 242
 243static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
 244                                        bool from_wq, uint64_t extra)
 245{
 246        unsigned int i;
 247        struct ttm_mem_zone *zone;
 248        uint64_t target;
 249
 250        for (i = 0; i < glob->num_zones; ++i) {
 251                zone = glob->zones[i];
 252
 253                if (from_wq)
 254                        target = zone->swap_limit;
 255                else if (capable(CAP_SYS_ADMIN))
 256                        target = zone->emer_mem;
 257                else
 258                        target = zone->max_mem;
 259
 260                target = (extra > target) ? 0ULL : target;
 261
 262                if (zone->used_mem > target)
 263                        return true;
 264        }
 265        return false;
 266}
 267
 268/**
 269 * At this point we only support a single shrink callback.
 270 * Extend this if needed, perhaps using a linked list of callbacks.
 271 * Note that this function is reentrant:
 272 * many threads may try to swap out at any given time.
 273 */
 274
 275static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
 276                        uint64_t extra, struct ttm_operation_ctx *ctx)
 277{
 278        int ret;
 279
 280        spin_lock(&glob->lock);
 281
 282        while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
 283                spin_unlock(&glob->lock);
 284                ret = ttm_bo_swapout(glob->bo_glob, ctx);
 285                spin_lock(&glob->lock);
 286                if (unlikely(ret != 0))
 287                        break;
 288        }
 289
 290        spin_unlock(&glob->lock);
 291}
 292
 293static void ttm_shrink_work(struct work_struct *work)
 294{
 295        struct ttm_operation_ctx ctx = {
 296                .interruptible = false,
 297                .no_wait_gpu = false
 298        };
 299        struct ttm_mem_global *glob =
 300            container_of(work, struct ttm_mem_global, work);
 301
 302        ttm_shrink(glob, true, 0ULL, &ctx);
 303}
 304
 305static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
 306                                    const struct sysinfo *si)
 307{
 308        struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
 309        uint64_t mem;
 310        int ret;
 311
 312        if (unlikely(!zone))
 313                return -ENOMEM;
 314
 315        mem = si->totalram - si->totalhigh;
 316        mem *= si->mem_unit;
 317
 318        zone->name = "kernel";
 319        zone->zone_mem = mem;
 320        zone->max_mem = mem >> 1;
 321        zone->emer_mem = (mem >> 1) + (mem >> 2);
 322        zone->swap_limit = zone->max_mem - (mem >> 3);
 323        zone->used_mem = 0;
 324        zone->glob = glob;
 325        glob->zone_kernel = zone;
 326        ret = kobject_init_and_add(
 327                &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
 328        if (unlikely(ret != 0)) {
 329                kobject_put(&zone->kobj);
 330                return ret;
 331        }
 332        glob->zones[glob->num_zones++] = zone;
 333        return 0;
 334}
 335
 336#ifdef CONFIG_HIGHMEM
 337static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
 338                                     const struct sysinfo *si)
 339{
 340        struct ttm_mem_zone *zone;
 341        uint64_t mem;
 342        int ret;
 343
 344        if (si->totalhigh == 0)
 345                return 0;
 346
 347        zone = kzalloc(sizeof(*zone), GFP_KERNEL);
 348        if (unlikely(!zone))
 349                return -ENOMEM;
 350
 351        mem = si->totalram;
 352        mem *= si->mem_unit;
 353
 354        zone->name = "highmem";
 355        zone->zone_mem = mem;
 356        zone->max_mem = mem >> 1;
 357        zone->emer_mem = (mem >> 1) + (mem >> 2);
 358        zone->swap_limit = zone->max_mem - (mem >> 3);
 359        zone->used_mem = 0;
 360        zone->glob = glob;
 361        glob->zone_highmem = zone;
 362        ret = kobject_init_and_add(
 363                &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
 364                zone->name);
 365        if (unlikely(ret != 0)) {
 366                kobject_put(&zone->kobj);
 367                return ret;
 368        }
 369        glob->zones[glob->num_zones++] = zone;
 370        return 0;
 371}
 372#else
 373static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
 374                                   const struct sysinfo *si)
 375{
 376        struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
 377        uint64_t mem;
 378        int ret;
 379
 380        if (unlikely(!zone))
 381                return -ENOMEM;
 382
 383        mem = si->totalram;
 384        mem *= si->mem_unit;
 385
 386        /**
 387         * No special dma32 zone needed.
 388         */
 389
 390        if (mem <= ((uint64_t) 1ULL << 32)) {
 391                kfree(zone);
 392                return 0;
 393        }
 394
 395        /*
 396         * Limit max dma32 memory to 4GB for now
 397         * until we can figure out how big this
 398         * zone really is.
 399         */
 400
 401        mem = ((uint64_t) 1ULL << 32);
 402        zone->name = "dma32";
 403        zone->zone_mem = mem;
 404        zone->max_mem = mem >> 1;
 405        zone->emer_mem = (mem >> 1) + (mem >> 2);
 406        zone->swap_limit = zone->max_mem - (mem >> 3);
 407        zone->used_mem = 0;
 408        zone->glob = glob;
 409        glob->zone_dma32 = zone;
 410        ret = kobject_init_and_add(
 411                &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
 412        if (unlikely(ret != 0)) {
 413                kobject_put(&zone->kobj);
 414                return ret;
 415        }
 416        glob->zones[glob->num_zones++] = zone;
 417        return 0;
 418}
 419#endif
 420
 421int ttm_mem_global_init(struct ttm_mem_global *glob)
 422{
 423        struct sysinfo si;
 424        int ret;
 425        int i;
 426        struct ttm_mem_zone *zone;
 427
 428        spin_lock_init(&glob->lock);
 429        glob->swap_queue = create_singlethread_workqueue("ttm_swap");
 430        INIT_WORK(&glob->work, ttm_shrink_work);
 431        ret = kobject_init_and_add(
 432                &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
 433        if (unlikely(ret != 0)) {
 434                kobject_put(&glob->kobj);
 435                return ret;
 436        }
 437
 438        si_meminfo(&si);
 439
 440        /* set it as 0 by default to keep original behavior of OOM */
 441        glob->lower_mem_limit = 0;
 442
 443        ret = ttm_mem_init_kernel_zone(glob, &si);
 444        if (unlikely(ret != 0))
 445                goto out_no_zone;
 446#ifdef CONFIG_HIGHMEM
 447        ret = ttm_mem_init_highmem_zone(glob, &si);
 448        if (unlikely(ret != 0))
 449                goto out_no_zone;
 450#else
 451        ret = ttm_mem_init_dma32_zone(glob, &si);
 452        if (unlikely(ret != 0))
 453                goto out_no_zone;
 454#endif
 455        for (i = 0; i < glob->num_zones; ++i) {
 456                zone = glob->zones[i];
 457                pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
 458                        zone->name, (unsigned long long)zone->max_mem >> 10);
 459        }
 460        ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
 461        ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
 462        return 0;
 463out_no_zone:
 464        ttm_mem_global_release(glob);
 465        return ret;
 466}
 467EXPORT_SYMBOL(ttm_mem_global_init);
 468
 469void ttm_mem_global_release(struct ttm_mem_global *glob)
 470{
 471        unsigned int i;
 472        struct ttm_mem_zone *zone;
 473
 474        /* let the page allocator first stop the shrink work. */
 475        ttm_page_alloc_fini();
 476        ttm_dma_page_alloc_fini();
 477
 478        flush_workqueue(glob->swap_queue);
 479        destroy_workqueue(glob->swap_queue);
 480        glob->swap_queue = NULL;
 481        for (i = 0; i < glob->num_zones; ++i) {
 482                zone = glob->zones[i];
 483                kobject_del(&zone->kobj);
 484                kobject_put(&zone->kobj);
 485                        }
 486        kobject_del(&glob->kobj);
 487        kobject_put(&glob->kobj);
 488}
 489EXPORT_SYMBOL(ttm_mem_global_release);
 490
 491static void ttm_check_swapping(struct ttm_mem_global *glob)
 492{
 493        bool needs_swapping = false;
 494        unsigned int i;
 495        struct ttm_mem_zone *zone;
 496
 497        spin_lock(&glob->lock);
 498        for (i = 0; i < glob->num_zones; ++i) {
 499                zone = glob->zones[i];
 500                if (zone->used_mem > zone->swap_limit) {
 501                        needs_swapping = true;
 502                        break;
 503                }
 504        }
 505
 506        spin_unlock(&glob->lock);
 507
 508        if (unlikely(needs_swapping))
 509                (void)queue_work(glob->swap_queue, &glob->work);
 510
 511}
 512
 513static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
 514                                     struct ttm_mem_zone *single_zone,
 515                                     uint64_t amount)
 516{
 517        unsigned int i;
 518        struct ttm_mem_zone *zone;
 519
 520        spin_lock(&glob->lock);
 521        for (i = 0; i < glob->num_zones; ++i) {
 522                zone = glob->zones[i];
 523                if (single_zone && zone != single_zone)
 524                        continue;
 525                zone->used_mem -= amount;
 526        }
 527        spin_unlock(&glob->lock);
 528}
 529
 530void ttm_mem_global_free(struct ttm_mem_global *glob,
 531                         uint64_t amount)
 532{
 533        return ttm_mem_global_free_zone(glob, NULL, amount);
 534}
 535EXPORT_SYMBOL(ttm_mem_global_free);
 536
 537/*
 538 * check if the available mem is under lower memory limit
 539 *
 540 * a. if no swap disk at all or free swap space is under swap_mem_limit
 541 * but available system mem is bigger than sys_mem_limit, allow TTM
 542 * allocation;
 543 *
 544 * b. if the available system mem is less than sys_mem_limit but free
 545 * swap disk is bigger than swap_mem_limit, allow TTM allocation.
 546 */
 547bool
 548ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
 549                        uint64_t num_pages,
 550                        struct ttm_operation_ctx *ctx)
 551{
 552        int64_t available;
 553
 554        if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
 555                return false;
 556
 557        available = get_nr_swap_pages() + si_mem_available();
 558        available -= num_pages;
 559        if (available < glob->lower_mem_limit)
 560                return true;
 561
 562        return false;
 563}
 564EXPORT_SYMBOL(ttm_check_under_lowerlimit);
 565
 566static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 567                                  struct ttm_mem_zone *single_zone,
 568                                  uint64_t amount, bool reserve)
 569{
 570        uint64_t limit;
 571        int ret = -ENOMEM;
 572        unsigned int i;
 573        struct ttm_mem_zone *zone;
 574
 575        spin_lock(&glob->lock);
 576        for (i = 0; i < glob->num_zones; ++i) {
 577                zone = glob->zones[i];
 578                if (single_zone && zone != single_zone)
 579                        continue;
 580
 581                limit = (capable(CAP_SYS_ADMIN)) ?
 582                        zone->emer_mem : zone->max_mem;
 583
 584                if (zone->used_mem > limit)
 585                        goto out_unlock;
 586        }
 587
 588        if (reserve) {
 589                for (i = 0; i < glob->num_zones; ++i) {
 590                        zone = glob->zones[i];
 591                        if (single_zone && zone != single_zone)
 592                                continue;
 593                        zone->used_mem += amount;
 594                }
 595        }
 596
 597        ret = 0;
 598out_unlock:
 599        spin_unlock(&glob->lock);
 600        ttm_check_swapping(glob);
 601
 602        return ret;
 603}
 604
 605
 606static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
 607                                     struct ttm_mem_zone *single_zone,
 608                                     uint64_t memory,
 609                                     struct ttm_operation_ctx *ctx)
 610{
 611        int count = TTM_MEMORY_ALLOC_RETRIES;
 612
 613        while (unlikely(ttm_mem_global_reserve(glob,
 614                                               single_zone,
 615                                               memory, true)
 616                        != 0)) {
 617                if (ctx->no_wait_gpu)
 618                        return -ENOMEM;
 619                if (unlikely(count-- == 0))
 620                        return -ENOMEM;
 621                ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
 622        }
 623
 624        return 0;
 625}
 626
 627int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
 628                         struct ttm_operation_ctx *ctx)
 629{
 630        /**
 631         * Normal allocations of kernel memory are registered in
 632         * all zones.
 633         */
 634
 635        return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
 636}
 637EXPORT_SYMBOL(ttm_mem_global_alloc);
 638
 639int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
 640                              struct page *page, uint64_t size,
 641                              struct ttm_operation_ctx *ctx)
 642{
 643        struct ttm_mem_zone *zone = NULL;
 644
 645        /**
 646         * Page allocations may be registed in a single zone
 647         * only if highmem or !dma32.
 648         */
 649
 650#ifdef CONFIG_HIGHMEM
 651        if (PageHighMem(page) && glob->zone_highmem != NULL)
 652                zone = glob->zone_highmem;
 653#else
 654        if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
 655                zone = glob->zone_kernel;
 656#endif
 657        return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
 658}
 659
 660void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
 661                              uint64_t size)
 662{
 663        struct ttm_mem_zone *zone = NULL;
 664
 665#ifdef CONFIG_HIGHMEM
 666        if (PageHighMem(page) && glob->zone_highmem != NULL)
 667                zone = glob->zone_highmem;
 668#else
 669        if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
 670                zone = glob->zone_kernel;
 671#endif
 672        ttm_mem_global_free_zone(glob, zone, size);
 673}
 674
 675size_t ttm_round_pot(size_t size)
 676{
 677        if ((size & (size - 1)) == 0)
 678                return size;
 679        else if (size > PAGE_SIZE)
 680                return PAGE_ALIGN(size);
 681        else {
 682                size_t tmp_size = 4;
 683
 684                while (tmp_size < size)
 685                        tmp_size <<= 1;
 686
 687                return tmp_size;
 688        }
 689        return 0;
 690}
 691EXPORT_SYMBOL(ttm_round_pot);
 692
 693uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
 694{
 695        return glob->zone_kernel->max_mem;
 696}
 697EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
 698