linux/drivers/gpu/drm/ttm/ttm_tt.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#define pr_fmt(fmt) "[TTM] " fmt
  33
  34#include <linux/sched.h>
  35#include <linux/pagemap.h>
  36#include <linux/shmem_fs.h>
  37#include <linux/file.h>
  38#include <drm/drm_cache.h>
  39#include <drm/ttm/ttm_bo_driver.h>
  40
  41#include "ttm_module.h"
  42
  43static unsigned long ttm_pages_limit;
  44
  45MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
  46module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
  47
  48static unsigned long ttm_dma32_pages_limit;
  49
  50MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
  51module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
  52
  53static atomic_long_t ttm_pages_allocated;
  54static atomic_long_t ttm_dma32_pages_allocated;
  55
  56/*
  57 * Allocates a ttm structure for the given BO.
  58 */
  59int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
  60{
  61        struct ttm_device *bdev = bo->bdev;
  62        uint32_t page_flags = 0;
  63
  64        dma_resv_assert_held(bo->base.resv);
  65
  66        if (bo->ttm)
  67                return 0;
  68
  69        switch (bo->type) {
  70        case ttm_bo_type_device:
  71                if (zero_alloc)
  72                        page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  73                break;
  74        case ttm_bo_type_kernel:
  75                break;
  76        case ttm_bo_type_sg:
  77                page_flags |= TTM_PAGE_FLAG_SG;
  78                break;
  79        default:
  80                pr_err("Illegal buffer object type\n");
  81                return -EINVAL;
  82        }
  83
  84        bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
  85        if (unlikely(bo->ttm == NULL))
  86                return -ENOMEM;
  87
  88        return 0;
  89}
  90
  91/*
  92 * Allocates storage for pointers to the pages that back the ttm.
  93 */
  94static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  95{
  96        ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
  97                        GFP_KERNEL | __GFP_ZERO);
  98        if (!ttm->pages)
  99                return -ENOMEM;
 100        return 0;
 101}
 102
 103static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
 104{
 105        ttm->pages = kvmalloc_array(ttm->num_pages,
 106                                    sizeof(*ttm->pages) +
 107                                    sizeof(*ttm->dma_address),
 108                                    GFP_KERNEL | __GFP_ZERO);
 109        if (!ttm->pages)
 110                return -ENOMEM;
 111
 112        ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
 113        return 0;
 114}
 115
 116static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
 117{
 118        ttm->dma_address = kvmalloc_array(ttm->num_pages,
 119                                          sizeof(*ttm->dma_address),
 120                                          GFP_KERNEL | __GFP_ZERO);
 121        if (!ttm->dma_address)
 122                return -ENOMEM;
 123        return 0;
 124}
 125
 126void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
 127{
 128        ttm_tt_unpopulate(bdev, ttm);
 129
 130        if (ttm->swap_storage)
 131                fput(ttm->swap_storage);
 132
 133        ttm->swap_storage = NULL;
 134}
 135EXPORT_SYMBOL(ttm_tt_destroy_common);
 136
 137void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
 138{
 139        bdev->funcs->ttm_tt_destroy(bdev, ttm);
 140}
 141
 142static void ttm_tt_init_fields(struct ttm_tt *ttm,
 143                               struct ttm_buffer_object *bo,
 144                               uint32_t page_flags,
 145                               enum ttm_caching caching)
 146{
 147        ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
 148        ttm->caching = ttm_cached;
 149        ttm->page_flags = page_flags;
 150        ttm->dma_address = NULL;
 151        ttm->swap_storage = NULL;
 152        ttm->sg = bo->sg;
 153        ttm->caching = caching;
 154}
 155
 156int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
 157                uint32_t page_flags, enum ttm_caching caching)
 158{
 159        ttm_tt_init_fields(ttm, bo, page_flags, caching);
 160
 161        if (ttm_tt_alloc_page_directory(ttm)) {
 162                pr_err("Failed allocating page table\n");
 163                return -ENOMEM;
 164        }
 165        return 0;
 166}
 167EXPORT_SYMBOL(ttm_tt_init);
 168
 169void ttm_tt_fini(struct ttm_tt *ttm)
 170{
 171        if (ttm->pages)
 172                kvfree(ttm->pages);
 173        else
 174                kvfree(ttm->dma_address);
 175        ttm->pages = NULL;
 176        ttm->dma_address = NULL;
 177}
 178EXPORT_SYMBOL(ttm_tt_fini);
 179
 180int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
 181                   uint32_t page_flags, enum ttm_caching caching)
 182{
 183        int ret;
 184
 185        ttm_tt_init_fields(ttm, bo, page_flags, caching);
 186
 187        if (page_flags & TTM_PAGE_FLAG_SG)
 188                ret = ttm_sg_tt_alloc_page_directory(ttm);
 189        else
 190                ret = ttm_dma_tt_alloc_page_directory(ttm);
 191        if (ret) {
 192                pr_err("Failed allocating page table\n");
 193                return -ENOMEM;
 194        }
 195        return 0;
 196}
 197EXPORT_SYMBOL(ttm_sg_tt_init);
 198
 199int ttm_tt_swapin(struct ttm_tt *ttm)
 200{
 201        struct address_space *swap_space;
 202        struct file *swap_storage;
 203        struct page *from_page;
 204        struct page *to_page;
 205        gfp_t gfp_mask;
 206        int i, ret;
 207
 208        swap_storage = ttm->swap_storage;
 209        BUG_ON(swap_storage == NULL);
 210
 211        swap_space = swap_storage->f_mapping;
 212        gfp_mask = mapping_gfp_mask(swap_space);
 213
 214        for (i = 0; i < ttm->num_pages; ++i) {
 215                from_page = shmem_read_mapping_page_gfp(swap_space, i,
 216                                                        gfp_mask);
 217                if (IS_ERR(from_page)) {
 218                        ret = PTR_ERR(from_page);
 219                        goto out_err;
 220                }
 221                to_page = ttm->pages[i];
 222                if (unlikely(to_page == NULL)) {
 223                        ret = -ENOMEM;
 224                        goto out_err;
 225                }
 226
 227                copy_highpage(to_page, from_page);
 228                put_page(from_page);
 229        }
 230
 231        fput(swap_storage);
 232        ttm->swap_storage = NULL;
 233        ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 234
 235        return 0;
 236
 237out_err:
 238        return ret;
 239}
 240
 241/**
 242 * ttm_tt_swapout - swap out tt object
 243 *
 244 * @bdev: TTM device structure.
 245 * @ttm: The struct ttm_tt.
 246 * @gfp_flags: Flags to use for memory allocation.
 247 *
 248 * Swapout a TT object to a shmem_file, return number of pages swapped out or
 249 * negative error code.
 250 */
 251int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
 252                   gfp_t gfp_flags)
 253{
 254        loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
 255        struct address_space *swap_space;
 256        struct file *swap_storage;
 257        struct page *from_page;
 258        struct page *to_page;
 259        int i, ret;
 260
 261        swap_storage = shmem_file_setup("ttm swap", size, 0);
 262        if (IS_ERR(swap_storage)) {
 263                pr_err("Failed allocating swap storage\n");
 264                return PTR_ERR(swap_storage);
 265        }
 266
 267        swap_space = swap_storage->f_mapping;
 268        gfp_flags &= mapping_gfp_mask(swap_space);
 269
 270        for (i = 0; i < ttm->num_pages; ++i) {
 271                from_page = ttm->pages[i];
 272                if (unlikely(from_page == NULL))
 273                        continue;
 274
 275                to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
 276                if (IS_ERR(to_page)) {
 277                        ret = PTR_ERR(to_page);
 278                        goto out_err;
 279                }
 280                copy_highpage(to_page, from_page);
 281                set_page_dirty(to_page);
 282                mark_page_accessed(to_page);
 283                put_page(to_page);
 284        }
 285
 286        ttm_tt_unpopulate(bdev, ttm);
 287        ttm->swap_storage = swap_storage;
 288        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
 289
 290        return ttm->num_pages;
 291
 292out_err:
 293        fput(swap_storage);
 294
 295        return ret;
 296}
 297
 298static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
 299{
 300        pgoff_t i;
 301
 302        if (ttm->page_flags & TTM_PAGE_FLAG_SG)
 303                return;
 304
 305        for (i = 0; i < ttm->num_pages; ++i)
 306                ttm->pages[i]->mapping = bdev->dev_mapping;
 307}
 308
 309int ttm_tt_populate(struct ttm_device *bdev,
 310                    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 311{
 312        int ret;
 313
 314        if (!ttm)
 315                return -EINVAL;
 316
 317        if (ttm_tt_is_populated(ttm))
 318                return 0;
 319
 320        if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
 321                atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
 322                if (bdev->pool.use_dma32)
 323                        atomic_long_add(ttm->num_pages,
 324                                        &ttm_dma32_pages_allocated);
 325        }
 326
 327        while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
 328               atomic_long_read(&ttm_dma32_pages_allocated) >
 329               ttm_dma32_pages_limit) {
 330
 331                ret = ttm_global_swapout(ctx, GFP_KERNEL);
 332                if (ret == 0)
 333                        break;
 334                if (ret < 0)
 335                        goto error;
 336        }
 337
 338        if (bdev->funcs->ttm_tt_populate)
 339                ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
 340        else
 341                ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
 342        if (ret)
 343                goto error;
 344
 345        ttm_tt_add_mapping(bdev, ttm);
 346        ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
 347        if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
 348                ret = ttm_tt_swapin(ttm);
 349                if (unlikely(ret != 0)) {
 350                        ttm_tt_unpopulate(bdev, ttm);
 351                        return ret;
 352                }
 353        }
 354
 355        return 0;
 356
 357error:
 358        if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
 359                atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
 360                if (bdev->pool.use_dma32)
 361                        atomic_long_sub(ttm->num_pages,
 362                                        &ttm_dma32_pages_allocated);
 363        }
 364        return ret;
 365}
 366EXPORT_SYMBOL(ttm_tt_populate);
 367
 368static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
 369{
 370        pgoff_t i;
 371        struct page **page = ttm->pages;
 372
 373        if (ttm->page_flags & TTM_PAGE_FLAG_SG)
 374                return;
 375
 376        for (i = 0; i < ttm->num_pages; ++i) {
 377                (*page)->mapping = NULL;
 378                (*page++)->index = 0;
 379        }
 380}
 381
 382void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
 383{
 384        if (!ttm_tt_is_populated(ttm))
 385                return;
 386
 387        ttm_tt_clear_mapping(ttm);
 388        if (bdev->funcs->ttm_tt_unpopulate)
 389                bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
 390        else
 391                ttm_pool_free(&bdev->pool, ttm);
 392
 393        if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
 394                atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
 395                if (bdev->pool.use_dma32)
 396                        atomic_long_sub(ttm->num_pages,
 397                                        &ttm_dma32_pages_allocated);
 398        }
 399
 400        ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
 401}
 402
 403#ifdef CONFIG_DEBUG_FS
 404
 405/* Test the shrinker functions and dump the result */
 406static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
 407{
 408        struct ttm_operation_ctx ctx = { false, false };
 409
 410        seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
 411        return 0;
 412}
 413DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
 414
 415#endif
 416
 417
 418/*
 419 * ttm_tt_mgr_init - register with the MM shrinker
 420 *
 421 * Register with the MM shrinker for swapping out BOs.
 422 */
 423void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
 424{
 425#ifdef CONFIG_DEBUG_FS
 426        debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
 427                            &ttm_tt_debugfs_shrink_fops);
 428#endif
 429
 430        if (!ttm_pages_limit)
 431                ttm_pages_limit = num_pages;
 432
 433        if (!ttm_dma32_pages_limit)
 434                ttm_dma32_pages_limit = num_dma32_pages;
 435}
 436
 437static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
 438                                       struct dma_buf_map *dmap,
 439                                       pgoff_t i)
 440{
 441        struct ttm_kmap_iter_tt *iter_tt =
 442                container_of(iter, typeof(*iter_tt), base);
 443
 444        dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
 445                                                         iter_tt->prot));
 446}
 447
 448static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
 449                                         struct dma_buf_map *map)
 450{
 451        kunmap_local(map->vaddr);
 452}
 453
 454static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
 455        .map_local = ttm_kmap_iter_tt_map_local,
 456        .unmap_local = ttm_kmap_iter_tt_unmap_local,
 457        .maps_tt = true,
 458};
 459
 460/**
 461 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
 462 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
 463 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
 464 *
 465 * Return: Pointer to the embedded struct ttm_kmap_iter.
 466 */
 467struct ttm_kmap_iter *
 468ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
 469                      struct ttm_tt *tt)
 470{
 471        iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
 472        iter_tt->tt = tt;
 473        if (tt)
 474                iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
 475        else
 476                iter_tt->prot = PAGE_KERNEL;
 477
 478        return &iter_tt->base;
 479}
 480EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
 481