linux/drivers/gpu/drm/ttm/ttm_tt.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30
  31#include <linux/sched.h>
  32#include <linux/highmem.h>
  33#include <linux/pagemap.h>
  34#include <linux/shmem_fs.h>
  35#include <linux/file.h>
  36#include <linux/swap.h>
  37#include <linux/slab.h>
  38#include <linux/export.h>
  39#include "drm_cache.h"
  40#include "drm_mem_util.h"
  41#include "ttm/ttm_module.h"
  42#include "ttm/ttm_bo_driver.h"
  43#include "ttm/ttm_placement.h"
  44#include "ttm/ttm_page_alloc.h"
  45
  46static int ttm_tt_swapin(struct ttm_tt *ttm);
  47
  48/**
  49 * Allocates storage for pointers to the pages that back the ttm.
  50 */
  51static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  52{
  53        ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
  54        ttm->dma_address = drm_calloc_large(ttm->num_pages,
  55                                            sizeof(*ttm->dma_address));
  56}
  57
  58static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
  59{
  60        drm_free_large(ttm->pages);
  61        ttm->pages = NULL;
  62        drm_free_large(ttm->dma_address);
  63        ttm->dma_address = NULL;
  64}
  65
  66static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
  67{
  68        int write;
  69        int dirty;
  70        struct page *page;
  71        int i;
  72        struct ttm_backend *be = ttm->be;
  73
  74        BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
  75        write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
  76        dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
  77
  78        if (be)
  79                be->func->clear(be);
  80
  81        for (i = 0; i < ttm->num_pages; ++i) {
  82                page = ttm->pages[i];
  83                if (page == NULL)
  84                        continue;
  85
  86                if (page == ttm->dummy_read_page) {
  87                        BUG_ON(write);
  88                        continue;
  89                }
  90
  91                if (write && dirty && !PageReserved(page))
  92                        set_page_dirty_lock(page);
  93
  94                ttm->pages[i] = NULL;
  95                ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
  96                put_page(page);
  97        }
  98        ttm->state = tt_unpopulated;
  99        ttm->first_himem_page = ttm->num_pages;
 100        ttm->last_lomem_page = -1;
 101}
 102
 103static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
 104{
 105        struct page *p;
 106        struct list_head h;
 107        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
 108        int ret;
 109
 110        while (NULL == (p = ttm->pages[index])) {
 111
 112                INIT_LIST_HEAD(&h);
 113
 114                ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
 115                                    &ttm->dma_address[index]);
 116
 117                if (ret != 0)
 118                        return NULL;
 119
 120                p = list_first_entry(&h, struct page, lru);
 121
 122                ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
 123                if (unlikely(ret != 0))
 124                        goto out_err;
 125
 126                if (PageHighMem(p))
 127                        ttm->pages[--ttm->first_himem_page] = p;
 128                else
 129                        ttm->pages[++ttm->last_lomem_page] = p;
 130        }
 131        return p;
 132out_err:
 133        put_page(p);
 134        return NULL;
 135}
 136
 137struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
 138{
 139        int ret;
 140
 141        if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
 142                ret = ttm_tt_swapin(ttm);
 143                if (unlikely(ret != 0))
 144                        return NULL;
 145        }
 146        return __ttm_tt_get_page(ttm, index);
 147}
 148
 149int ttm_tt_populate(struct ttm_tt *ttm)
 150{
 151        struct page *page;
 152        unsigned long i;
 153        struct ttm_backend *be;
 154        int ret;
 155
 156        if (ttm->state != tt_unpopulated)
 157                return 0;
 158
 159        if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
 160                ret = ttm_tt_swapin(ttm);
 161                if (unlikely(ret != 0))
 162                        return ret;
 163        }
 164
 165        be = ttm->be;
 166
 167        for (i = 0; i < ttm->num_pages; ++i) {
 168                page = __ttm_tt_get_page(ttm, i);
 169                if (!page)
 170                        return -ENOMEM;
 171        }
 172
 173        be->func->populate(be, ttm->num_pages, ttm->pages,
 174                           ttm->dummy_read_page, ttm->dma_address);
 175        ttm->state = tt_unbound;
 176        return 0;
 177}
 178EXPORT_SYMBOL(ttm_tt_populate);
 179
 180#ifdef CONFIG_X86
 181static inline int ttm_tt_set_page_caching(struct page *p,
 182                                          enum ttm_caching_state c_old,
 183                                          enum ttm_caching_state c_new)
 184{
 185        int ret = 0;
 186
 187        if (PageHighMem(p))
 188                return 0;
 189
 190        if (c_old != tt_cached) {
 191                /* p isn't in the default caching state, set it to
 192                 * writeback first to free its current memtype. */
 193
 194                ret = set_pages_wb(p, 1);
 195                if (ret)
 196                        return ret;
 197        }
 198
 199        if (c_new == tt_wc)
 200                ret = set_memory_wc((unsigned long) page_address(p), 1);
 201        else if (c_new == tt_uncached)
 202                ret = set_pages_uc(p, 1);
 203
 204        return ret;
 205}
 206#else /* CONFIG_X86 */
 207static inline int ttm_tt_set_page_caching(struct page *p,
 208                                          enum ttm_caching_state c_old,
 209                                          enum ttm_caching_state c_new)
 210{
 211        return 0;
 212}
 213#endif /* CONFIG_X86 */
 214
 215/*
 216 * Change caching policy for the linear kernel map
 217 * for range of pages in a ttm.
 218 */
 219
 220static int ttm_tt_set_caching(struct ttm_tt *ttm,
 221                              enum ttm_caching_state c_state)
 222{
 223        int i, j;
 224        struct page *cur_page;
 225        int ret;
 226
 227        if (ttm->caching_state == c_state)
 228                return 0;
 229
 230        if (ttm->state == tt_unpopulated) {
 231                /* Change caching but don't populate */
 232                ttm->caching_state = c_state;
 233                return 0;
 234        }
 235
 236        if (ttm->caching_state == tt_cached)
 237                drm_clflush_pages(ttm->pages, ttm->num_pages);
 238
 239        for (i = 0; i < ttm->num_pages; ++i) {
 240                cur_page = ttm->pages[i];
 241                if (likely(cur_page != NULL)) {
 242                        ret = ttm_tt_set_page_caching(cur_page,
 243                                                      ttm->caching_state,
 244                                                      c_state);
 245                        if (unlikely(ret != 0))
 246                                goto out_err;
 247                }
 248        }
 249
 250        ttm->caching_state = c_state;
 251
 252        return 0;
 253
 254out_err:
 255        for (j = 0; j < i; ++j) {
 256                cur_page = ttm->pages[j];
 257                if (likely(cur_page != NULL)) {
 258                        (void)ttm_tt_set_page_caching(cur_page, c_state,
 259                                                      ttm->caching_state);
 260                }
 261        }
 262
 263        return ret;
 264}
 265
 266int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
 267{
 268        enum ttm_caching_state state;
 269
 270        if (placement & TTM_PL_FLAG_WC)
 271                state = tt_wc;
 272        else if (placement & TTM_PL_FLAG_UNCACHED)
 273                state = tt_uncached;
 274        else
 275                state = tt_cached;
 276
 277        return ttm_tt_set_caching(ttm, state);
 278}
 279EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 280
 281static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
 282{
 283        int i;
 284        unsigned count = 0;
 285        struct list_head h;
 286        struct page *cur_page;
 287        struct ttm_backend *be = ttm->be;
 288
 289        INIT_LIST_HEAD(&h);
 290
 291        if (be)
 292                be->func->clear(be);
 293        for (i = 0; i < ttm->num_pages; ++i) {
 294
 295                cur_page = ttm->pages[i];
 296                ttm->pages[i] = NULL;
 297                if (cur_page) {
 298                        if (page_count(cur_page) != 1)
 299                                printk(KERN_ERR TTM_PFX
 300                                       "Erroneous page count. "
 301                                       "Leaking pages.\n");
 302                        ttm_mem_global_free_page(ttm->glob->mem_glob,
 303                                                 cur_page);
 304                        list_add(&cur_page->lru, &h);
 305                        count++;
 306                }
 307        }
 308        ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
 309                      ttm->dma_address);
 310        ttm->state = tt_unpopulated;
 311        ttm->first_himem_page = ttm->num_pages;
 312        ttm->last_lomem_page = -1;
 313}
 314
 315void ttm_tt_destroy(struct ttm_tt *ttm)
 316{
 317        struct ttm_backend *be;
 318
 319        if (unlikely(ttm == NULL))
 320                return;
 321
 322        be = ttm->be;
 323        if (likely(be != NULL)) {
 324                be->func->destroy(be);
 325                ttm->be = NULL;
 326        }
 327
 328        if (likely(ttm->pages != NULL)) {
 329                if (ttm->page_flags & TTM_PAGE_FLAG_USER)
 330                        ttm_tt_free_user_pages(ttm);
 331                else
 332                        ttm_tt_free_alloced_pages(ttm);
 333
 334                ttm_tt_free_page_directory(ttm);
 335        }
 336
 337        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
 338            ttm->swap_storage)
 339                fput(ttm->swap_storage);
 340
 341        kfree(ttm);
 342}
 343
 344int ttm_tt_set_user(struct ttm_tt *ttm,
 345                    struct task_struct *tsk,
 346                    unsigned long start, unsigned long num_pages)
 347{
 348        struct mm_struct *mm = tsk->mm;
 349        int ret;
 350        int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
 351        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
 352
 353        BUG_ON(num_pages != ttm->num_pages);
 354        BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
 355
 356        /**
 357         * Account user pages as lowmem pages for now.
 358         */
 359
 360        ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
 361                                   false, false);
 362        if (unlikely(ret != 0))
 363                return ret;
 364
 365        down_read(&mm->mmap_sem);
 366        ret = get_user_pages(tsk, mm, start, num_pages,
 367                             write, 0, ttm->pages, NULL);
 368        up_read(&mm->mmap_sem);
 369
 370        if (ret != num_pages && write) {
 371                ttm_tt_free_user_pages(ttm);
 372                ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
 373                return -ENOMEM;
 374        }
 375
 376        ttm->tsk = tsk;
 377        ttm->start = start;
 378        ttm->state = tt_unbound;
 379
 380        return 0;
 381}
 382
 383struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
 384                             uint32_t page_flags, struct page *dummy_read_page)
 385{
 386        struct ttm_bo_driver *bo_driver = bdev->driver;
 387        struct ttm_tt *ttm;
 388
 389        if (!bo_driver)
 390                return NULL;
 391
 392        ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
 393        if (!ttm)
 394                return NULL;
 395
 396        ttm->glob = bdev->glob;
 397        ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 398        ttm->first_himem_page = ttm->num_pages;
 399        ttm->last_lomem_page = -1;
 400        ttm->caching_state = tt_cached;
 401        ttm->page_flags = page_flags;
 402
 403        ttm->dummy_read_page = dummy_read_page;
 404
 405        ttm_tt_alloc_page_directory(ttm);
 406        if (!ttm->pages) {
 407                ttm_tt_destroy(ttm);
 408                printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
 409                return NULL;
 410        }
 411        ttm->be = bo_driver->create_ttm_backend_entry(bdev);
 412        if (!ttm->be) {
 413                ttm_tt_destroy(ttm);
 414                printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
 415                return NULL;
 416        }
 417        ttm->state = tt_unpopulated;
 418        return ttm;
 419}
 420
 421void ttm_tt_unbind(struct ttm_tt *ttm)
 422{
 423        int ret;
 424        struct ttm_backend *be = ttm->be;
 425
 426        if (ttm->state == tt_bound) {
 427                ret = be->func->unbind(be);
 428                BUG_ON(ret);
 429                ttm->state = tt_unbound;
 430        }
 431}
 432
 433int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 434{
 435        int ret = 0;
 436        struct ttm_backend *be;
 437
 438        if (!ttm)
 439                return -EINVAL;
 440
 441        if (ttm->state == tt_bound)
 442                return 0;
 443
 444        be = ttm->be;
 445
 446        ret = ttm_tt_populate(ttm);
 447        if (ret)
 448                return ret;
 449
 450        ret = be->func->bind(be, bo_mem);
 451        if (unlikely(ret != 0))
 452                return ret;
 453
 454        ttm->state = tt_bound;
 455
 456        if (ttm->page_flags & TTM_PAGE_FLAG_USER)
 457                ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
 458        return 0;
 459}
 460EXPORT_SYMBOL(ttm_tt_bind);
 461
 462static int ttm_tt_swapin(struct ttm_tt *ttm)
 463{
 464        struct address_space *swap_space;
 465        struct file *swap_storage;
 466        struct page *from_page;
 467        struct page *to_page;
 468        void *from_virtual;
 469        void *to_virtual;
 470        int i;
 471        int ret = -ENOMEM;
 472
 473        if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
 474                ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
 475                                      ttm->num_pages);
 476                if (unlikely(ret != 0))
 477                        return ret;
 478
 479                ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 480                return 0;
 481        }
 482
 483        swap_storage = ttm->swap_storage;
 484        BUG_ON(swap_storage == NULL);
 485
 486        swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
 487
 488        for (i = 0; i < ttm->num_pages; ++i) {
 489                from_page = shmem_read_mapping_page(swap_space, i);
 490                if (IS_ERR(from_page)) {
 491                        ret = PTR_ERR(from_page);
 492                        goto out_err;
 493                }
 494                to_page = __ttm_tt_get_page(ttm, i);
 495                if (unlikely(to_page == NULL))
 496                        goto out_err;
 497
 498                preempt_disable();
 499                from_virtual = kmap_atomic(from_page, KM_USER0);
 500                to_virtual = kmap_atomic(to_page, KM_USER1);
 501                memcpy(to_virtual, from_virtual, PAGE_SIZE);
 502                kunmap_atomic(to_virtual, KM_USER1);
 503                kunmap_atomic(from_virtual, KM_USER0);
 504                preempt_enable();
 505                page_cache_release(from_page);
 506        }
 507
 508        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
 509                fput(swap_storage);
 510        ttm->swap_storage = NULL;
 511        ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 512
 513        return 0;
 514out_err:
 515        ttm_tt_free_alloced_pages(ttm);
 516        return ret;
 517}
 518
 519int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
 520{
 521        struct address_space *swap_space;
 522        struct file *swap_storage;
 523        struct page *from_page;
 524        struct page *to_page;
 525        void *from_virtual;
 526        void *to_virtual;
 527        int i;
 528        int ret = -ENOMEM;
 529
 530        BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
 531        BUG_ON(ttm->caching_state != tt_cached);
 532
 533        /*
 534         * For user buffers, just unpin the pages, as there should be
 535         * vma references.
 536         */
 537
 538        if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
 539                ttm_tt_free_user_pages(ttm);
 540                ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
 541                ttm->swap_storage = NULL;
 542                return 0;
 543        }
 544
 545        if (!persistent_swap_storage) {
 546                swap_storage = shmem_file_setup("ttm swap",
 547                                                ttm->num_pages << PAGE_SHIFT,
 548                                                0);
 549                if (unlikely(IS_ERR(swap_storage))) {
 550                        printk(KERN_ERR "Failed allocating swap storage.\n");
 551                        return PTR_ERR(swap_storage);
 552                }
 553        } else
 554                swap_storage = persistent_swap_storage;
 555
 556        swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
 557
 558        for (i = 0; i < ttm->num_pages; ++i) {
 559                from_page = ttm->pages[i];
 560                if (unlikely(from_page == NULL))
 561                        continue;
 562                to_page = shmem_read_mapping_page(swap_space, i);
 563                if (unlikely(IS_ERR(to_page))) {
 564                        ret = PTR_ERR(to_page);
 565                        goto out_err;
 566                }
 567                preempt_disable();
 568                from_virtual = kmap_atomic(from_page, KM_USER0);
 569                to_virtual = kmap_atomic(to_page, KM_USER1);
 570                memcpy(to_virtual, from_virtual, PAGE_SIZE);
 571                kunmap_atomic(to_virtual, KM_USER1);
 572                kunmap_atomic(from_virtual, KM_USER0);
 573                preempt_enable();
 574                set_page_dirty(to_page);
 575                mark_page_accessed(to_page);
 576                page_cache_release(to_page);
 577        }
 578
 579        ttm_tt_free_alloced_pages(ttm);
 580        ttm->swap_storage = swap_storage;
 581        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
 582        if (persistent_swap_storage)
 583                ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
 584
 585        return 0;
 586out_err:
 587        if (!persistent_swap_storage)
 588                fput(swap_storage);
 589
 590        return ret;
 591}
 592