linux/drivers/gpu/drm/ttm/ttm_tt.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30
  31#include <linux/vmalloc.h>
  32#include <linux/sched.h>
  33#include <linux/highmem.h>
  34#include <linux/pagemap.h>
  35#include <linux/file.h>
  36#include <linux/swap.h>
  37#include "drm_cache.h"
  38#include "ttm/ttm_module.h"
  39#include "ttm/ttm_bo_driver.h"
  40#include "ttm/ttm_placement.h"
  41
  42static int ttm_tt_swapin(struct ttm_tt *ttm);
  43
  44/**
  45 * Allocates storage for pointers to the pages that back the ttm.
  46 *
  47 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
  48 */
  49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  50{
  51        unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
  52        ttm->pages = NULL;
  53
  54        if (size <= PAGE_SIZE)
  55                ttm->pages = kzalloc(size, GFP_KERNEL);
  56
  57        if (!ttm->pages) {
  58                ttm->pages = vmalloc_user(size);
  59                if (ttm->pages)
  60                        ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
  61        }
  62}
  63
  64static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
  65{
  66        if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
  67                vfree(ttm->pages);
  68                ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
  69        } else {
  70                kfree(ttm->pages);
  71        }
  72        ttm->pages = NULL;
  73}
  74
  75static struct page *ttm_tt_alloc_page(unsigned page_flags)
  76{
  77        gfp_t gfp_flags = GFP_USER;
  78
  79        if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
  80                gfp_flags |= __GFP_ZERO;
  81
  82        if (page_flags & TTM_PAGE_FLAG_DMA32)
  83                gfp_flags |= __GFP_DMA32;
  84        else
  85                gfp_flags |= __GFP_HIGHMEM;
  86
  87        return alloc_page(gfp_flags);
  88}
  89
  90static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
  91{
  92        int write;
  93        int dirty;
  94        struct page *page;
  95        int i;
  96        struct ttm_backend *be = ttm->be;
  97
  98        BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
  99        write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
 100        dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
 101
 102        if (be)
 103                be->func->clear(be);
 104
 105        for (i = 0; i < ttm->num_pages; ++i) {
 106                page = ttm->pages[i];
 107                if (page == NULL)
 108                        continue;
 109
 110                if (page == ttm->dummy_read_page) {
 111                        BUG_ON(write);
 112                        continue;
 113                }
 114
 115                if (write && dirty && !PageReserved(page))
 116                        set_page_dirty_lock(page);
 117
 118                ttm->pages[i] = NULL;
 119                ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
 120                put_page(page);
 121        }
 122        ttm->state = tt_unpopulated;
 123        ttm->first_himem_page = ttm->num_pages;
 124        ttm->last_lomem_page = -1;
 125}
 126
 127static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
 128{
 129        struct page *p;
 130        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
 131        int ret;
 132
 133        while (NULL == (p = ttm->pages[index])) {
 134                p = ttm_tt_alloc_page(ttm->page_flags);
 135
 136                if (!p)
 137                        return NULL;
 138
 139                ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
 140                if (unlikely(ret != 0))
 141                        goto out_err;
 142
 143                if (PageHighMem(p))
 144                        ttm->pages[--ttm->first_himem_page] = p;
 145                else
 146                        ttm->pages[++ttm->last_lomem_page] = p;
 147        }
 148        return p;
 149out_err:
 150        put_page(p);
 151        return NULL;
 152}
 153
 154struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
 155{
 156        int ret;
 157
 158        if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
 159                ret = ttm_tt_swapin(ttm);
 160                if (unlikely(ret != 0))
 161                        return NULL;
 162        }
 163        return __ttm_tt_get_page(ttm, index);
 164}
 165
 166int ttm_tt_populate(struct ttm_tt *ttm)
 167{
 168        struct page *page;
 169        unsigned long i;
 170        struct ttm_backend *be;
 171        int ret;
 172
 173        if (ttm->state != tt_unpopulated)
 174                return 0;
 175
 176        if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
 177                ret = ttm_tt_swapin(ttm);
 178                if (unlikely(ret != 0))
 179                        return ret;
 180        }
 181
 182        be = ttm->be;
 183
 184        for (i = 0; i < ttm->num_pages; ++i) {
 185                page = __ttm_tt_get_page(ttm, i);
 186                if (!page)
 187                        return -ENOMEM;
 188        }
 189
 190        be->func->populate(be, ttm->num_pages, ttm->pages,
 191                           ttm->dummy_read_page);
 192        ttm->state = tt_unbound;
 193        return 0;
 194}
 195
 196#ifdef CONFIG_X86
 197static inline int ttm_tt_set_page_caching(struct page *p,
 198                                          enum ttm_caching_state c_state)
 199{
 200        if (PageHighMem(p))
 201                return 0;
 202
 203        switch (c_state) {
 204        case tt_cached:
 205                return set_pages_wb(p, 1);
 206        case tt_wc:
 207            return set_memory_wc((unsigned long) page_address(p), 1);
 208        default:
 209                return set_pages_uc(p, 1);
 210        }
 211}
 212#else /* CONFIG_X86 */
 213static inline int ttm_tt_set_page_caching(struct page *p,
 214                                          enum ttm_caching_state c_state)
 215{
 216        return 0;
 217}
 218#endif /* CONFIG_X86 */
 219
 220/*
 221 * Change caching policy for the linear kernel map
 222 * for range of pages in a ttm.
 223 */
 224
 225static int ttm_tt_set_caching(struct ttm_tt *ttm,
 226                              enum ttm_caching_state c_state)
 227{
 228        int i, j;
 229        struct page *cur_page;
 230        int ret;
 231
 232        if (ttm->caching_state == c_state)
 233                return 0;
 234
 235        if (c_state != tt_cached) {
 236                ret = ttm_tt_populate(ttm);
 237                if (unlikely(ret != 0))
 238                        return ret;
 239        }
 240
 241        if (ttm->caching_state == tt_cached)
 242                drm_clflush_pages(ttm->pages, ttm->num_pages);
 243
 244        for (i = 0; i < ttm->num_pages; ++i) {
 245                cur_page = ttm->pages[i];
 246                if (likely(cur_page != NULL)) {
 247                        ret = ttm_tt_set_page_caching(cur_page, c_state);
 248                        if (unlikely(ret != 0))
 249                                goto out_err;
 250                }
 251        }
 252
 253        ttm->caching_state = c_state;
 254
 255        return 0;
 256
 257out_err:
 258        for (j = 0; j < i; ++j) {
 259                cur_page = ttm->pages[j];
 260                if (likely(cur_page != NULL)) {
 261                        (void)ttm_tt_set_page_caching(cur_page,
 262                                                      ttm->caching_state);
 263                }
 264        }
 265
 266        return ret;
 267}
 268
 269int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
 270{
 271        enum ttm_caching_state state;
 272
 273        if (placement & TTM_PL_FLAG_WC)
 274                state = tt_wc;
 275        else if (placement & TTM_PL_FLAG_UNCACHED)
 276                state = tt_uncached;
 277        else
 278                state = tt_cached;
 279
 280        return ttm_tt_set_caching(ttm, state);
 281}
 282EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 283
 284static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
 285{
 286        int i;
 287        struct page *cur_page;
 288        struct ttm_backend *be = ttm->be;
 289
 290        if (be)
 291                be->func->clear(be);
 292        (void)ttm_tt_set_caching(ttm, tt_cached);
 293        for (i = 0; i < ttm->num_pages; ++i) {
 294                cur_page = ttm->pages[i];
 295                ttm->pages[i] = NULL;
 296                if (cur_page) {
 297                        if (page_count(cur_page) != 1)
 298                                printk(KERN_ERR TTM_PFX
 299                                       "Erroneous page count. "
 300                                       "Leaking pages.\n");
 301                        ttm_mem_global_free_page(ttm->glob->mem_glob,
 302                                                 cur_page);
 303                        __free_page(cur_page);
 304                }
 305        }
 306        ttm->state = tt_unpopulated;
 307        ttm->first_himem_page = ttm->num_pages;
 308        ttm->last_lomem_page = -1;
 309}
 310
 311void ttm_tt_destroy(struct ttm_tt *ttm)
 312{
 313        struct ttm_backend *be;
 314
 315        if (unlikely(ttm == NULL))
 316                return;
 317
 318        be = ttm->be;
 319        if (likely(be != NULL)) {
 320                be->func->destroy(be);
 321                ttm->be = NULL;
 322        }
 323
 324        if (likely(ttm->pages != NULL)) {
 325                if (ttm->page_flags & TTM_PAGE_FLAG_USER)
 326                        ttm_tt_free_user_pages(ttm);
 327                else
 328                        ttm_tt_free_alloced_pages(ttm);
 329
 330                ttm_tt_free_page_directory(ttm);
 331        }
 332
 333        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
 334            ttm->swap_storage)
 335                fput(ttm->swap_storage);
 336
 337        kfree(ttm);
 338}
 339
 340int ttm_tt_set_user(struct ttm_tt *ttm,
 341                    struct task_struct *tsk,
 342                    unsigned long start, unsigned long num_pages)
 343{
 344        struct mm_struct *mm = tsk->mm;
 345        int ret;
 346        int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
 347        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
 348
 349        BUG_ON(num_pages != ttm->num_pages);
 350        BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
 351
 352        /**
 353         * Account user pages as lowmem pages for now.
 354         */
 355
 356        ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
 357                                   false, false);
 358        if (unlikely(ret != 0))
 359                return ret;
 360
 361        down_read(&mm->mmap_sem);
 362        ret = get_user_pages(tsk, mm, start, num_pages,
 363                             write, 0, ttm->pages, NULL);
 364        up_read(&mm->mmap_sem);
 365
 366        if (ret != num_pages && write) {
 367                ttm_tt_free_user_pages(ttm);
 368                ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
 369                return -ENOMEM;
 370        }
 371
 372        ttm->tsk = tsk;
 373        ttm->start = start;
 374        ttm->state = tt_unbound;
 375
 376        return 0;
 377}
 378
 379struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
 380                             uint32_t page_flags, struct page *dummy_read_page)
 381{
 382        struct ttm_bo_driver *bo_driver = bdev->driver;
 383        struct ttm_tt *ttm;
 384
 385        if (!bo_driver)
 386                return NULL;
 387
 388        ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
 389        if (!ttm)
 390                return NULL;
 391
 392        ttm->glob = bdev->glob;
 393        ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 394        ttm->first_himem_page = ttm->num_pages;
 395        ttm->last_lomem_page = -1;
 396        ttm->caching_state = tt_cached;
 397        ttm->page_flags = page_flags;
 398
 399        ttm->dummy_read_page = dummy_read_page;
 400
 401        ttm_tt_alloc_page_directory(ttm);
 402        if (!ttm->pages) {
 403                ttm_tt_destroy(ttm);
 404                printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
 405                return NULL;
 406        }
 407        ttm->be = bo_driver->create_ttm_backend_entry(bdev);
 408        if (!ttm->be) {
 409                ttm_tt_destroy(ttm);
 410                printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
 411                return NULL;
 412        }
 413        ttm->state = tt_unpopulated;
 414        return ttm;
 415}
 416
 417void ttm_tt_unbind(struct ttm_tt *ttm)
 418{
 419        int ret;
 420        struct ttm_backend *be = ttm->be;
 421
 422        if (ttm->state == tt_bound) {
 423                ret = be->func->unbind(be);
 424                BUG_ON(ret);
 425                ttm->state = tt_unbound;
 426        }
 427}
 428
 429int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 430{
 431        int ret = 0;
 432        struct ttm_backend *be;
 433
 434        if (!ttm)
 435                return -EINVAL;
 436
 437        if (ttm->state == tt_bound)
 438                return 0;
 439
 440        be = ttm->be;
 441
 442        ret = ttm_tt_populate(ttm);
 443        if (ret)
 444                return ret;
 445
 446        ret = be->func->bind(be, bo_mem);
 447        if (ret) {
 448                printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
 449                return ret;
 450        }
 451
 452        ttm->state = tt_bound;
 453
 454        if (ttm->page_flags & TTM_PAGE_FLAG_USER)
 455                ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
 456        return 0;
 457}
 458EXPORT_SYMBOL(ttm_tt_bind);
 459
 460static int ttm_tt_swapin(struct ttm_tt *ttm)
 461{
 462        struct address_space *swap_space;
 463        struct file *swap_storage;
 464        struct page *from_page;
 465        struct page *to_page;
 466        void *from_virtual;
 467        void *to_virtual;
 468        int i;
 469        int ret;
 470
 471        if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
 472                ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
 473                                      ttm->num_pages);
 474                if (unlikely(ret != 0))
 475                        return ret;
 476
 477                ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 478                return 0;
 479        }
 480
 481        swap_storage = ttm->swap_storage;
 482        BUG_ON(swap_storage == NULL);
 483
 484        swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
 485
 486        for (i = 0; i < ttm->num_pages; ++i) {
 487                from_page = read_mapping_page(swap_space, i, NULL);
 488                if (IS_ERR(from_page))
 489                        goto out_err;
 490                to_page = __ttm_tt_get_page(ttm, i);
 491                if (unlikely(to_page == NULL))
 492                        goto out_err;
 493
 494                preempt_disable();
 495                from_virtual = kmap_atomic(from_page, KM_USER0);
 496                to_virtual = kmap_atomic(to_page, KM_USER1);
 497                memcpy(to_virtual, from_virtual, PAGE_SIZE);
 498                kunmap_atomic(to_virtual, KM_USER1);
 499                kunmap_atomic(from_virtual, KM_USER0);
 500                preempt_enable();
 501                page_cache_release(from_page);
 502        }
 503
 504        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
 505                fput(swap_storage);
 506        ttm->swap_storage = NULL;
 507        ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 508
 509        return 0;
 510out_err:
 511        ttm_tt_free_alloced_pages(ttm);
 512        return -ENOMEM;
 513}
 514
 515int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
 516{
 517        struct address_space *swap_space;
 518        struct file *swap_storage;
 519        struct page *from_page;
 520        struct page *to_page;
 521        void *from_virtual;
 522        void *to_virtual;
 523        int i;
 524
 525        BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
 526        BUG_ON(ttm->caching_state != tt_cached);
 527
 528        /*
 529         * For user buffers, just unpin the pages, as there should be
 530         * vma references.
 531         */
 532
 533        if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
 534                ttm_tt_free_user_pages(ttm);
 535                ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
 536                ttm->swap_storage = NULL;
 537                return 0;
 538        }
 539
 540        if (!persistant_swap_storage) {
 541                swap_storage = shmem_file_setup("ttm swap",
 542                                                ttm->num_pages << PAGE_SHIFT,
 543                                                0);
 544                if (unlikely(IS_ERR(swap_storage))) {
 545                        printk(KERN_ERR "Failed allocating swap storage.\n");
 546                        return -ENOMEM;
 547                }
 548        } else
 549                swap_storage = persistant_swap_storage;
 550
 551        swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
 552
 553        for (i = 0; i < ttm->num_pages; ++i) {
 554                from_page = ttm->pages[i];
 555                if (unlikely(from_page == NULL))
 556                        continue;
 557                to_page = read_mapping_page(swap_space, i, NULL);
 558                if (unlikely(to_page == NULL))
 559                        goto out_err;
 560
 561                preempt_disable();
 562                from_virtual = kmap_atomic(from_page, KM_USER0);
 563                to_virtual = kmap_atomic(to_page, KM_USER1);
 564                memcpy(to_virtual, from_virtual, PAGE_SIZE);
 565                kunmap_atomic(to_virtual, KM_USER1);
 566                kunmap_atomic(from_virtual, KM_USER0);
 567                preempt_enable();
 568                set_page_dirty(to_page);
 569                mark_page_accessed(to_page);
 570                page_cache_release(to_page);
 571        }
 572
 573        ttm_tt_free_alloced_pages(ttm);
 574        ttm->swap_storage = swap_storage;
 575        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
 576        if (persistant_swap_storage)
 577                ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
 578
 579        return 0;
 580out_err:
 581        if (!persistant_swap_storage)
 582                fput(swap_storage);
 583
 584        return -ENOMEM;
 585}
 586