linux/drivers/gpu/drm/ttm/ttm_memory.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "ttm/ttm_memory.h"
  29#include "ttm/ttm_module.h"
  30#include <linux/spinlock.h>
  31#include <linux/sched.h>
  32#include <linux/wait.h>
  33#include <linux/mm.h>
  34#include <linux/module.h>
  35
  36#define TTM_MEMORY_ALLOC_RETRIES 4
  37
  38struct ttm_mem_zone {
  39        struct kobject kobj;
  40        struct ttm_mem_global *glob;
  41        const char *name;
  42        uint64_t zone_mem;
  43        uint64_t emer_mem;
  44        uint64_t max_mem;
  45        uint64_t swap_limit;
  46        uint64_t used_mem;
  47};
  48
  49static struct attribute ttm_mem_sys = {
  50        .name = "zone_memory",
  51        .mode = S_IRUGO
  52};
  53static struct attribute ttm_mem_emer = {
  54        .name = "emergency_memory",
  55        .mode = S_IRUGO | S_IWUSR
  56};
  57static struct attribute ttm_mem_max = {
  58        .name = "available_memory",
  59        .mode = S_IRUGO | S_IWUSR
  60};
  61static struct attribute ttm_mem_swap = {
  62        .name = "swap_limit",
  63        .mode = S_IRUGO | S_IWUSR
  64};
  65static struct attribute ttm_mem_used = {
  66        .name = "used_memory",
  67        .mode = S_IRUGO
  68};
  69
  70static void ttm_mem_zone_kobj_release(struct kobject *kobj)
  71{
  72        struct ttm_mem_zone *zone =
  73                container_of(kobj, struct ttm_mem_zone, kobj);
  74
  75        printk(KERN_INFO TTM_PFX
  76               "Zone %7s: Used memory at exit: %llu kiB.\n",
  77               zone->name, (unsigned long long) zone->used_mem >> 10);
  78        kfree(zone);
  79}
  80
  81static ssize_t ttm_mem_zone_show(struct kobject *kobj,
  82                                 struct attribute *attr,
  83                                 char *buffer)
  84{
  85        struct ttm_mem_zone *zone =
  86                container_of(kobj, struct ttm_mem_zone, kobj);
  87        uint64_t val = 0;
  88
  89        spin_lock(&zone->glob->lock);
  90        if (attr == &ttm_mem_sys)
  91                val = zone->zone_mem;
  92        else if (attr == &ttm_mem_emer)
  93                val = zone->emer_mem;
  94        else if (attr == &ttm_mem_max)
  95                val = zone->max_mem;
  96        else if (attr == &ttm_mem_swap)
  97                val = zone->swap_limit;
  98        else if (attr == &ttm_mem_used)
  99                val = zone->used_mem;
 100        spin_unlock(&zone->glob->lock);
 101
 102        return snprintf(buffer, PAGE_SIZE, "%llu\n",
 103                        (unsigned long long) val >> 10);
 104}
 105
 106static void ttm_check_swapping(struct ttm_mem_global *glob);
 107
 108static ssize_t ttm_mem_zone_store(struct kobject *kobj,
 109                                  struct attribute *attr,
 110                                  const char *buffer,
 111                                  size_t size)
 112{
 113        struct ttm_mem_zone *zone =
 114                container_of(kobj, struct ttm_mem_zone, kobj);
 115        int chars;
 116        unsigned long val;
 117        uint64_t val64;
 118
 119        chars = sscanf(buffer, "%lu", &val);
 120        if (chars == 0)
 121                return size;
 122
 123        val64 = val;
 124        val64 <<= 10;
 125
 126        spin_lock(&zone->glob->lock);
 127        if (val64 > zone->zone_mem)
 128                val64 = zone->zone_mem;
 129        if (attr == &ttm_mem_emer) {
 130                zone->emer_mem = val64;
 131                if (zone->max_mem > val64)
 132                        zone->max_mem = val64;
 133        } else if (attr == &ttm_mem_max) {
 134                zone->max_mem = val64;
 135                if (zone->emer_mem < val64)
 136                        zone->emer_mem = val64;
 137        } else if (attr == &ttm_mem_swap)
 138                zone->swap_limit = val64;
 139        spin_unlock(&zone->glob->lock);
 140
 141        ttm_check_swapping(zone->glob);
 142
 143        return size;
 144}
 145
 146static struct attribute *ttm_mem_zone_attrs[] = {
 147        &ttm_mem_sys,
 148        &ttm_mem_emer,
 149        &ttm_mem_max,
 150        &ttm_mem_swap,
 151        &ttm_mem_used,
 152        NULL
 153};
 154
 155static struct sysfs_ops ttm_mem_zone_ops = {
 156        .show = &ttm_mem_zone_show,
 157        .store = &ttm_mem_zone_store
 158};
 159
 160static struct kobj_type ttm_mem_zone_kobj_type = {
 161        .release = &ttm_mem_zone_kobj_release,
 162        .sysfs_ops = &ttm_mem_zone_ops,
 163        .default_attrs = ttm_mem_zone_attrs,
 164};
 165
 166static void ttm_mem_global_kobj_release(struct kobject *kobj)
 167{
 168        struct ttm_mem_global *glob =
 169                container_of(kobj, struct ttm_mem_global, kobj);
 170
 171        kfree(glob);
 172}
 173
 174static struct kobj_type ttm_mem_glob_kobj_type = {
 175        .release = &ttm_mem_global_kobj_release,
 176};
 177
 178static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
 179                                        bool from_wq, uint64_t extra)
 180{
 181        unsigned int i;
 182        struct ttm_mem_zone *zone;
 183        uint64_t target;
 184
 185        for (i = 0; i < glob->num_zones; ++i) {
 186                zone = glob->zones[i];
 187
 188                if (from_wq)
 189                        target = zone->swap_limit;
 190                else if (capable(CAP_SYS_ADMIN))
 191                        target = zone->emer_mem;
 192                else
 193                        target = zone->max_mem;
 194
 195                target = (extra > target) ? 0ULL : target;
 196
 197                if (zone->used_mem > target)
 198                        return true;
 199        }
 200        return false;
 201}
 202
 203/**
 204 * At this point we only support a single shrink callback.
 205 * Extend this if needed, perhaps using a linked list of callbacks.
 206 * Note that this function is reentrant:
 207 * many threads may try to swap out at any given time.
 208 */
 209
 210static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
 211                       uint64_t extra)
 212{
 213        int ret;
 214        struct ttm_mem_shrink *shrink;
 215
 216        spin_lock(&glob->lock);
 217        if (glob->shrink == NULL)
 218                goto out;
 219
 220        while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
 221                shrink = glob->shrink;
 222                spin_unlock(&glob->lock);
 223                ret = shrink->do_shrink(shrink);
 224                spin_lock(&glob->lock);
 225                if (unlikely(ret != 0))
 226                        goto out;
 227        }
 228out:
 229        spin_unlock(&glob->lock);
 230}
 231
 232
 233
 234static void ttm_shrink_work(struct work_struct *work)
 235{
 236        struct ttm_mem_global *glob =
 237            container_of(work, struct ttm_mem_global, work);
 238
 239        ttm_shrink(glob, true, 0ULL);
 240}
 241
 242static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
 243                                    const struct sysinfo *si)
 244{
 245        struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
 246        uint64_t mem;
 247        int ret;
 248
 249        if (unlikely(!zone))
 250                return -ENOMEM;
 251
 252        mem = si->totalram - si->totalhigh;
 253        mem *= si->mem_unit;
 254
 255        zone->name = "kernel";
 256        zone->zone_mem = mem;
 257        zone->max_mem = mem >> 1;
 258        zone->emer_mem = (mem >> 1) + (mem >> 2);
 259        zone->swap_limit = zone->max_mem - (mem >> 3);
 260        zone->used_mem = 0;
 261        zone->glob = glob;
 262        glob->zone_kernel = zone;
 263        kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
 264        ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
 265        if (unlikely(ret != 0)) {
 266                kobject_put(&zone->kobj);
 267                return ret;
 268        }
 269        glob->zones[glob->num_zones++] = zone;
 270        return 0;
 271}
 272
 273#ifdef CONFIG_HIGHMEM
 274static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
 275                                     const struct sysinfo *si)
 276{
 277        struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
 278        uint64_t mem;
 279        int ret;
 280
 281        if (unlikely(!zone))
 282                return -ENOMEM;
 283
 284        if (si->totalhigh == 0)
 285                return 0;
 286
 287        mem = si->totalram;
 288        mem *= si->mem_unit;
 289
 290        zone->name = "highmem";
 291        zone->zone_mem = mem;
 292        zone->max_mem = mem >> 1;
 293        zone->emer_mem = (mem >> 1) + (mem >> 2);
 294        zone->swap_limit = zone->max_mem - (mem >> 3);
 295        zone->used_mem = 0;
 296        zone->glob = glob;
 297        glob->zone_highmem = zone;
 298        kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
 299        ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
 300        if (unlikely(ret != 0)) {
 301                kobject_put(&zone->kobj);
 302                return ret;
 303        }
 304        glob->zones[glob->num_zones++] = zone;
 305        return 0;
 306}
 307#else
 308static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
 309                                   const struct sysinfo *si)
 310{
 311        struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
 312        uint64_t mem;
 313        int ret;
 314
 315        if (unlikely(!zone))
 316                return -ENOMEM;
 317
 318        mem = si->totalram;
 319        mem *= si->mem_unit;
 320
 321        /**
 322         * No special dma32 zone needed.
 323         */
 324
 325        if (mem <= ((uint64_t) 1ULL << 32))
 326                return 0;
 327
 328        /*
 329         * Limit max dma32 memory to 4GB for now
 330         * until we can figure out how big this
 331         * zone really is.
 332         */
 333
 334        mem = ((uint64_t) 1ULL << 32);
 335        zone->name = "dma32";
 336        zone->zone_mem = mem;
 337        zone->max_mem = mem >> 1;
 338        zone->emer_mem = (mem >> 1) + (mem >> 2);
 339        zone->swap_limit = zone->max_mem - (mem >> 3);
 340        zone->used_mem = 0;
 341        zone->glob = glob;
 342        glob->zone_dma32 = zone;
 343        kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
 344        ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
 345        if (unlikely(ret != 0)) {
 346                kobject_put(&zone->kobj);
 347                return ret;
 348        }
 349        glob->zones[glob->num_zones++] = zone;
 350        return 0;
 351}
 352#endif
 353
 354int ttm_mem_global_init(struct ttm_mem_global *glob)
 355{
 356        struct sysinfo si;
 357        int ret;
 358        int i;
 359        struct ttm_mem_zone *zone;
 360
 361        spin_lock_init(&glob->lock);
 362        glob->swap_queue = create_singlethread_workqueue("ttm_swap");
 363        INIT_WORK(&glob->work, ttm_shrink_work);
 364        init_waitqueue_head(&glob->queue);
 365        kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
 366        ret = kobject_add(&glob->kobj,
 367                          ttm_get_kobj(),
 368                          "memory_accounting");
 369        if (unlikely(ret != 0)) {
 370                kobject_put(&glob->kobj);
 371                return ret;
 372        }
 373
 374        si_meminfo(&si);
 375
 376        ret = ttm_mem_init_kernel_zone(glob, &si);
 377        if (unlikely(ret != 0))
 378                goto out_no_zone;
 379#ifdef CONFIG_HIGHMEM
 380        ret = ttm_mem_init_highmem_zone(glob, &si);
 381        if (unlikely(ret != 0))
 382                goto out_no_zone;
 383#else
 384        ret = ttm_mem_init_dma32_zone(glob, &si);
 385        if (unlikely(ret != 0))
 386                goto out_no_zone;
 387#endif
 388        for (i = 0; i < glob->num_zones; ++i) {
 389                zone = glob->zones[i];
 390                printk(KERN_INFO TTM_PFX
 391                       "Zone %7s: Available graphics memory: %llu kiB.\n",
 392                       zone->name, (unsigned long long) zone->max_mem >> 10);
 393        }
 394        return 0;
 395out_no_zone:
 396        ttm_mem_global_release(glob);
 397        return ret;
 398}
 399EXPORT_SYMBOL(ttm_mem_global_init);
 400
 401void ttm_mem_global_release(struct ttm_mem_global *glob)
 402{
 403        unsigned int i;
 404        struct ttm_mem_zone *zone;
 405
 406        flush_workqueue(glob->swap_queue);
 407        destroy_workqueue(glob->swap_queue);
 408        glob->swap_queue = NULL;
 409        for (i = 0; i < glob->num_zones; ++i) {
 410                zone = glob->zones[i];
 411                kobject_del(&zone->kobj);
 412                kobject_put(&zone->kobj);
 413        }
 414        kobject_del(&glob->kobj);
 415        kobject_put(&glob->kobj);
 416}
 417EXPORT_SYMBOL(ttm_mem_global_release);
 418
 419static void ttm_check_swapping(struct ttm_mem_global *glob)
 420{
 421        bool needs_swapping = false;
 422        unsigned int i;
 423        struct ttm_mem_zone *zone;
 424
 425        spin_lock(&glob->lock);
 426        for (i = 0; i < glob->num_zones; ++i) {
 427                zone = glob->zones[i];
 428                if (zone->used_mem > zone->swap_limit) {
 429                        needs_swapping = true;
 430                        break;
 431                }
 432        }
 433
 434        spin_unlock(&glob->lock);
 435
 436        if (unlikely(needs_swapping))
 437                (void)queue_work(glob->swap_queue, &glob->work);
 438
 439}
 440
 441static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
 442                                     struct ttm_mem_zone *single_zone,
 443                                     uint64_t amount)
 444{
 445        unsigned int i;
 446        struct ttm_mem_zone *zone;
 447
 448        spin_lock(&glob->lock);
 449        for (i = 0; i < glob->num_zones; ++i) {
 450                zone = glob->zones[i];
 451                if (single_zone && zone != single_zone)
 452                        continue;
 453                zone->used_mem -= amount;
 454        }
 455        spin_unlock(&glob->lock);
 456}
 457
 458void ttm_mem_global_free(struct ttm_mem_global *glob,
 459                         uint64_t amount)
 460{
 461        return ttm_mem_global_free_zone(glob, NULL, amount);
 462}
 463
 464static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 465                                  struct ttm_mem_zone *single_zone,
 466                                  uint64_t amount, bool reserve)
 467{
 468        uint64_t limit;
 469        int ret = -ENOMEM;
 470        unsigned int i;
 471        struct ttm_mem_zone *zone;
 472
 473        spin_lock(&glob->lock);
 474        for (i = 0; i < glob->num_zones; ++i) {
 475                zone = glob->zones[i];
 476                if (single_zone && zone != single_zone)
 477                        continue;
 478
 479                limit = (capable(CAP_SYS_ADMIN)) ?
 480                        zone->emer_mem : zone->max_mem;
 481
 482                if (zone->used_mem > limit)
 483                        goto out_unlock;
 484        }
 485
 486        if (reserve) {
 487                for (i = 0; i < glob->num_zones; ++i) {
 488                        zone = glob->zones[i];
 489                        if (single_zone && zone != single_zone)
 490                                continue;
 491                        zone->used_mem += amount;
 492                }
 493        }
 494
 495        ret = 0;
 496out_unlock:
 497        spin_unlock(&glob->lock);
 498        ttm_check_swapping(glob);
 499
 500        return ret;
 501}
 502
 503
 504static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
 505                                     struct ttm_mem_zone *single_zone,
 506                                     uint64_t memory,
 507                                     bool no_wait, bool interruptible)
 508{
 509        int count = TTM_MEMORY_ALLOC_RETRIES;
 510
 511        while (unlikely(ttm_mem_global_reserve(glob,
 512                                               single_zone,
 513                                               memory, true)
 514                        != 0)) {
 515                if (no_wait)
 516                        return -ENOMEM;
 517                if (unlikely(count-- == 0))
 518                        return -ENOMEM;
 519                ttm_shrink(glob, false, memory + (memory >> 2) + 16);
 520        }
 521
 522        return 0;
 523}
 524
 525int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
 526                         bool no_wait, bool interruptible)
 527{
 528        /**
 529         * Normal allocations of kernel memory are registered in
 530         * all zones.
 531         */
 532
 533        return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
 534                                         interruptible);
 535}
 536
 537int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
 538                              struct page *page,
 539                              bool no_wait, bool interruptible)
 540{
 541
 542        struct ttm_mem_zone *zone = NULL;
 543
 544        /**
 545         * Page allocations may be registed in a single zone
 546         * only if highmem or !dma32.
 547         */
 548
 549#ifdef CONFIG_HIGHMEM
 550        if (PageHighMem(page) && glob->zone_highmem != NULL)
 551                zone = glob->zone_highmem;
 552#else
 553        if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
 554                zone = glob->zone_kernel;
 555#endif
 556        return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
 557                                         interruptible);
 558}
 559
 560void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
 561{
 562        struct ttm_mem_zone *zone = NULL;
 563
 564#ifdef CONFIG_HIGHMEM
 565        if (PageHighMem(page) && glob->zone_highmem != NULL)
 566                zone = glob->zone_highmem;
 567#else
 568        if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
 569                zone = glob->zone_kernel;
 570#endif
 571        ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
 572}
 573
 574
 575size_t ttm_round_pot(size_t size)
 576{
 577        if ((size & (size - 1)) == 0)
 578                return size;
 579        else if (size > PAGE_SIZE)
 580                return PAGE_ALIGN(size);
 581        else {
 582                size_t tmp_size = 4;
 583
 584                while (tmp_size < size)
 585                        tmp_size <<= 1;
 586
 587                return tmp_size;
 588        }
 589        return 0;
 590}
 591