linux/drivers/gpu/drm/ttm/ttm_tt.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#define pr_fmt(fmt) "[TTM] " fmt
  33
  34#include <linux/sched.h>
  35#include <linux/pagemap.h>
  36#include <linux/shmem_fs.h>
  37#include <linux/file.h>
  38#include <drm/drm_cache.h>
  39#include <drm/ttm/ttm_bo_driver.h>
  40#include <drm/ttm/ttm_page_alloc.h>
  41#include <drm/ttm/ttm_set_memory.h>
  42
  43/**
  44 * Allocates a ttm structure for the given BO.
  45 */
  46int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
  47{
  48        struct ttm_bo_device *bdev = bo->bdev;
  49        uint32_t page_flags = 0;
  50
  51        dma_resv_assert_held(bo->base.resv);
  52
  53        if (bdev->need_dma32)
  54                page_flags |= TTM_PAGE_FLAG_DMA32;
  55
  56        if (bdev->no_retry)
  57                page_flags |= TTM_PAGE_FLAG_NO_RETRY;
  58
  59        switch (bo->type) {
  60        case ttm_bo_type_device:
  61                if (zero_alloc)
  62                        page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  63                break;
  64        case ttm_bo_type_kernel:
  65                break;
  66        case ttm_bo_type_sg:
  67                page_flags |= TTM_PAGE_FLAG_SG;
  68                break;
  69        default:
  70                bo->ttm = NULL;
  71                pr_err("Illegal buffer object type\n");
  72                return -EINVAL;
  73        }
  74
  75        bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
  76        if (unlikely(bo->ttm == NULL))
  77                return -ENOMEM;
  78
  79        return 0;
  80}
  81
  82/**
  83 * Allocates storage for pointers to the pages that back the ttm.
  84 */
  85static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  86{
  87        ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
  88                        GFP_KERNEL | __GFP_ZERO);
  89        if (!ttm->pages)
  90                return -ENOMEM;
  91        return 0;
  92}
  93
  94static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
  95{
  96        ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
  97                                          sizeof(*ttm->ttm.pages) +
  98                                          sizeof(*ttm->dma_address),
  99                                          GFP_KERNEL | __GFP_ZERO);
 100        if (!ttm->ttm.pages)
 101                return -ENOMEM;
 102        ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
 103        return 0;
 104}
 105
 106static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 107{
 108        ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
 109                                          sizeof(*ttm->dma_address),
 110                                          GFP_KERNEL | __GFP_ZERO);
 111        if (!ttm->dma_address)
 112                return -ENOMEM;
 113        return 0;
 114}
 115
 116static int ttm_tt_set_page_caching(struct page *p,
 117                                   enum ttm_caching_state c_old,
 118                                   enum ttm_caching_state c_new)
 119{
 120        int ret = 0;
 121
 122        if (PageHighMem(p))
 123                return 0;
 124
 125        if (c_old != tt_cached) {
 126                /* p isn't in the default caching state, set it to
 127                 * writeback first to free its current memtype. */
 128
 129                ret = ttm_set_pages_wb(p, 1);
 130                if (ret)
 131                        return ret;
 132        }
 133
 134        if (c_new == tt_wc)
 135                ret = ttm_set_pages_wc(p, 1);
 136        else if (c_new == tt_uncached)
 137                ret = ttm_set_pages_uc(p, 1);
 138
 139        return ret;
 140}
 141
 142/*
 143 * Change caching policy for the linear kernel map
 144 * for range of pages in a ttm.
 145 */
 146
 147static int ttm_tt_set_caching(struct ttm_tt *ttm,
 148                              enum ttm_caching_state c_state)
 149{
 150        int i, j;
 151        struct page *cur_page;
 152        int ret;
 153
 154        if (ttm->caching_state == c_state)
 155                return 0;
 156
 157        if (ttm->state == tt_unpopulated) {
 158                /* Change caching but don't populate */
 159                ttm->caching_state = c_state;
 160                return 0;
 161        }
 162
 163        if (ttm->caching_state == tt_cached)
 164                drm_clflush_pages(ttm->pages, ttm->num_pages);
 165
 166        for (i = 0; i < ttm->num_pages; ++i) {
 167                cur_page = ttm->pages[i];
 168                if (likely(cur_page != NULL)) {
 169                        ret = ttm_tt_set_page_caching(cur_page,
 170                                                      ttm->caching_state,
 171                                                      c_state);
 172                        if (unlikely(ret != 0))
 173                                goto out_err;
 174                }
 175        }
 176
 177        ttm->caching_state = c_state;
 178
 179        return 0;
 180
 181out_err:
 182        for (j = 0; j < i; ++j) {
 183                cur_page = ttm->pages[j];
 184                if (likely(cur_page != NULL)) {
 185                        (void)ttm_tt_set_page_caching(cur_page, c_state,
 186                                                      ttm->caching_state);
 187                }
 188        }
 189
 190        return ret;
 191}
 192
 193int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
 194{
 195        enum ttm_caching_state state;
 196
 197        if (placement & TTM_PL_FLAG_WC)
 198                state = tt_wc;
 199        else if (placement & TTM_PL_FLAG_UNCACHED)
 200                state = tt_uncached;
 201        else
 202                state = tt_cached;
 203
 204        return ttm_tt_set_caching(ttm, state);
 205}
 206EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 207
 208void ttm_tt_destroy(struct ttm_tt *ttm)
 209{
 210        if (ttm == NULL)
 211                return;
 212
 213        ttm_tt_unbind(ttm);
 214
 215        if (ttm->state == tt_unbound)
 216                ttm_tt_unpopulate(ttm);
 217
 218        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
 219            ttm->swap_storage)
 220                fput(ttm->swap_storage);
 221
 222        ttm->swap_storage = NULL;
 223        ttm->func->destroy(ttm);
 224}
 225
 226static void ttm_tt_init_fields(struct ttm_tt *ttm,
 227                               struct ttm_buffer_object *bo,
 228                               uint32_t page_flags)
 229{
 230        ttm->bdev = bo->bdev;
 231        ttm->num_pages = bo->num_pages;
 232        ttm->caching_state = tt_cached;
 233        ttm->page_flags = page_flags;
 234        ttm->state = tt_unpopulated;
 235        ttm->swap_storage = NULL;
 236        ttm->sg = bo->sg;
 237}
 238
 239int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
 240                uint32_t page_flags)
 241{
 242        ttm_tt_init_fields(ttm, bo, page_flags);
 243
 244        if (ttm_tt_alloc_page_directory(ttm)) {
 245                ttm_tt_destroy(ttm);
 246                pr_err("Failed allocating page table\n");
 247                return -ENOMEM;
 248        }
 249        return 0;
 250}
 251EXPORT_SYMBOL(ttm_tt_init);
 252
 253void ttm_tt_fini(struct ttm_tt *ttm)
 254{
 255        kvfree(ttm->pages);
 256        ttm->pages = NULL;
 257}
 258EXPORT_SYMBOL(ttm_tt_fini);
 259
 260int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
 261                    uint32_t page_flags)
 262{
 263        struct ttm_tt *ttm = &ttm_dma->ttm;
 264
 265        ttm_tt_init_fields(ttm, bo, page_flags);
 266
 267        INIT_LIST_HEAD(&ttm_dma->pages_list);
 268        if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
 269                ttm_tt_destroy(ttm);
 270                pr_err("Failed allocating page table\n");
 271                return -ENOMEM;
 272        }
 273        return 0;
 274}
 275EXPORT_SYMBOL(ttm_dma_tt_init);
 276
 277int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
 278                   uint32_t page_flags)
 279{
 280        struct ttm_tt *ttm = &ttm_dma->ttm;
 281        int ret;
 282
 283        ttm_tt_init_fields(ttm, bo, page_flags);
 284
 285        INIT_LIST_HEAD(&ttm_dma->pages_list);
 286        if (page_flags & TTM_PAGE_FLAG_SG)
 287                ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
 288        else
 289                ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
 290        if (ret) {
 291                ttm_tt_destroy(ttm);
 292                pr_err("Failed allocating page table\n");
 293                return -ENOMEM;
 294        }
 295        return 0;
 296}
 297EXPORT_SYMBOL(ttm_sg_tt_init);
 298
 299void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 300{
 301        struct ttm_tt *ttm = &ttm_dma->ttm;
 302
 303        if (ttm->pages)
 304                kvfree(ttm->pages);
 305        else
 306                kvfree(ttm_dma->dma_address);
 307        ttm->pages = NULL;
 308        ttm_dma->dma_address = NULL;
 309}
 310EXPORT_SYMBOL(ttm_dma_tt_fini);
 311
 312void ttm_tt_unbind(struct ttm_tt *ttm)
 313{
 314        int ret;
 315
 316        if (ttm->state == tt_bound) {
 317                ret = ttm->func->unbind(ttm);
 318                BUG_ON(ret);
 319                ttm->state = tt_unbound;
 320        }
 321}
 322
 323int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
 324                struct ttm_operation_ctx *ctx)
 325{
 326        int ret = 0;
 327
 328        if (!ttm)
 329                return -EINVAL;
 330
 331        if (ttm->state == tt_bound)
 332                return 0;
 333
 334        ret = ttm_tt_populate(ttm, ctx);
 335        if (ret)
 336                return ret;
 337
 338        ret = ttm->func->bind(ttm, bo_mem);
 339        if (unlikely(ret != 0))
 340                return ret;
 341
 342        ttm->state = tt_bound;
 343
 344        return 0;
 345}
 346EXPORT_SYMBOL(ttm_tt_bind);
 347
 348int ttm_tt_swapin(struct ttm_tt *ttm)
 349{
 350        struct address_space *swap_space;
 351        struct file *swap_storage;
 352        struct page *from_page;
 353        struct page *to_page;
 354        int i;
 355        int ret = -ENOMEM;
 356
 357        swap_storage = ttm->swap_storage;
 358        BUG_ON(swap_storage == NULL);
 359
 360        swap_space = swap_storage->f_mapping;
 361
 362        for (i = 0; i < ttm->num_pages; ++i) {
 363                gfp_t gfp_mask = mapping_gfp_mask(swap_space);
 364
 365                gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
 366                from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
 367
 368                if (IS_ERR(from_page)) {
 369                        ret = PTR_ERR(from_page);
 370                        goto out_err;
 371                }
 372                to_page = ttm->pages[i];
 373                if (unlikely(to_page == NULL))
 374                        goto out_err;
 375
 376                copy_highpage(to_page, from_page);
 377                put_page(from_page);
 378        }
 379
 380        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
 381                fput(swap_storage);
 382        ttm->swap_storage = NULL;
 383        ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 384
 385        return 0;
 386out_err:
 387        return ret;
 388}
 389
 390int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
 391{
 392        struct address_space *swap_space;
 393        struct file *swap_storage;
 394        struct page *from_page;
 395        struct page *to_page;
 396        int i;
 397        int ret = -ENOMEM;
 398
 399        BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
 400        BUG_ON(ttm->caching_state != tt_cached);
 401
 402        if (!persistent_swap_storage) {
 403                swap_storage = shmem_file_setup("ttm swap",
 404                                                ttm->num_pages << PAGE_SHIFT,
 405                                                0);
 406                if (IS_ERR(swap_storage)) {
 407                        pr_err("Failed allocating swap storage\n");
 408                        return PTR_ERR(swap_storage);
 409                }
 410        } else {
 411                swap_storage = persistent_swap_storage;
 412        }
 413
 414        swap_space = swap_storage->f_mapping;
 415
 416        for (i = 0; i < ttm->num_pages; ++i) {
 417                gfp_t gfp_mask = mapping_gfp_mask(swap_space);
 418
 419                gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
 420
 421                from_page = ttm->pages[i];
 422                if (unlikely(from_page == NULL))
 423                        continue;
 424
 425                to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
 426                if (IS_ERR(to_page)) {
 427                        ret = PTR_ERR(to_page);
 428                        goto out_err;
 429                }
 430                copy_highpage(to_page, from_page);
 431                set_page_dirty(to_page);
 432                mark_page_accessed(to_page);
 433                put_page(to_page);
 434        }
 435
 436        ttm_tt_unpopulate(ttm);
 437        ttm->swap_storage = swap_storage;
 438        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
 439        if (persistent_swap_storage)
 440                ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
 441
 442        return 0;
 443out_err:
 444        if (!persistent_swap_storage)
 445                fput(swap_storage);
 446
 447        return ret;
 448}
 449
 450static void ttm_tt_add_mapping(struct ttm_tt *ttm)
 451{
 452        pgoff_t i;
 453
 454        if (ttm->page_flags & TTM_PAGE_FLAG_SG)
 455                return;
 456
 457        for (i = 0; i < ttm->num_pages; ++i)
 458                ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
 459}
 460
 461int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 462{
 463        int ret;
 464
 465        if (ttm->state != tt_unpopulated)
 466                return 0;
 467
 468        if (ttm->bdev->driver->ttm_tt_populate)
 469                ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
 470        else
 471                ret = ttm_pool_populate(ttm, ctx);
 472        if (!ret)
 473                ttm_tt_add_mapping(ttm);
 474        return ret;
 475}
 476
 477static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
 478{
 479        pgoff_t i;
 480        struct page **page = ttm->pages;
 481
 482        if (ttm->page_flags & TTM_PAGE_FLAG_SG)
 483                return;
 484
 485        for (i = 0; i < ttm->num_pages; ++i) {
 486                (*page)->mapping = NULL;
 487                (*page++)->index = 0;
 488        }
 489}
 490
 491void ttm_tt_unpopulate(struct ttm_tt *ttm)
 492{
 493        if (ttm->state == tt_unpopulated)
 494                return;
 495
 496        ttm_tt_clear_mapping(ttm);
 497        if (ttm->bdev->driver->ttm_tt_unpopulate)
 498                ttm->bdev->driver->ttm_tt_unpopulate(ttm);
 499        else
 500                ttm_pool_unpopulate(ttm);
 501}
 502