linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König
  23 */
  24
  25#include <linux/dma-mapping.h>
  26#include "amdgpu.h"
  27#include "amdgpu_vm.h"
  28#include "amdgpu_res_cursor.h"
  29#include "amdgpu_atomfirmware.h"
  30#include "atom.h"
  31
  32static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man)
  33{
  34        return container_of(man, struct amdgpu_vram_mgr, manager);
  35}
  36
  37static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
  38{
  39        return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
  40}
  41
  42/**
  43 * DOC: mem_info_vram_total
  44 *
  45 * The amdgpu driver provides a sysfs API for reporting current total VRAM
  46 * available on the device
  47 * The file mem_info_vram_total is used for this and returns the total
  48 * amount of VRAM in bytes
  49 */
  50static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
  51                struct device_attribute *attr, char *buf)
  52{
  53        struct drm_device *ddev = dev_get_drvdata(dev);
  54        struct amdgpu_device *adev = drm_to_adev(ddev);
  55
  56        return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
  57}
  58
  59/**
  60 * DOC: mem_info_vis_vram_total
  61 *
  62 * The amdgpu driver provides a sysfs API for reporting current total
  63 * visible VRAM available on the device
  64 * The file mem_info_vis_vram_total is used for this and returns the total
  65 * amount of visible VRAM in bytes
  66 */
  67static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
  68                struct device_attribute *attr, char *buf)
  69{
  70        struct drm_device *ddev = dev_get_drvdata(dev);
  71        struct amdgpu_device *adev = drm_to_adev(ddev);
  72
  73        return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
  74}
  75
  76/**
  77 * DOC: mem_info_vram_used
  78 *
  79 * The amdgpu driver provides a sysfs API for reporting current total VRAM
  80 * available on the device
  81 * The file mem_info_vram_used is used for this and returns the total
  82 * amount of currently used VRAM in bytes
  83 */
  84static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
  85                struct device_attribute *attr, char *buf)
  86{
  87        struct drm_device *ddev = dev_get_drvdata(dev);
  88        struct amdgpu_device *adev = drm_to_adev(ddev);
  89        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
  90
  91        return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
  92}
  93
  94/**
  95 * DOC: mem_info_vis_vram_used
  96 *
  97 * The amdgpu driver provides a sysfs API for reporting current total of
  98 * used visible VRAM
  99 * The file mem_info_vis_vram_used is used for this and returns the total
 100 * amount of currently used visible VRAM in bytes
 101 */
 102static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
 103                struct device_attribute *attr, char *buf)
 104{
 105        struct drm_device *ddev = dev_get_drvdata(dev);
 106        struct amdgpu_device *adev = drm_to_adev(ddev);
 107        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 108
 109        return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
 110}
 111
 112static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
 113                                                 struct device_attribute *attr,
 114                                                 char *buf)
 115{
 116        struct drm_device *ddev = dev_get_drvdata(dev);
 117        struct amdgpu_device *adev = drm_to_adev(ddev);
 118
 119        switch (adev->gmc.vram_vendor) {
 120        case SAMSUNG:
 121                return sysfs_emit(buf, "samsung\n");
 122        case INFINEON:
 123                return sysfs_emit(buf, "infineon\n");
 124        case ELPIDA:
 125                return sysfs_emit(buf, "elpida\n");
 126        case ETRON:
 127                return sysfs_emit(buf, "etron\n");
 128        case NANYA:
 129                return sysfs_emit(buf, "nanya\n");
 130        case HYNIX:
 131                return sysfs_emit(buf, "hynix\n");
 132        case MOSEL:
 133                return sysfs_emit(buf, "mosel\n");
 134        case WINBOND:
 135                return sysfs_emit(buf, "winbond\n");
 136        case ESMT:
 137                return sysfs_emit(buf, "esmt\n");
 138        case MICRON:
 139                return sysfs_emit(buf, "micron\n");
 140        default:
 141                return sysfs_emit(buf, "unknown\n");
 142        }
 143}
 144
 145static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
 146                   amdgpu_mem_info_vram_total_show, NULL);
 147static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
 148                   amdgpu_mem_info_vis_vram_total_show,NULL);
 149static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
 150                   amdgpu_mem_info_vram_used_show, NULL);
 151static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
 152                   amdgpu_mem_info_vis_vram_used_show, NULL);
 153static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
 154                   amdgpu_mem_info_vram_vendor, NULL);
 155
 156static const struct attribute *amdgpu_vram_mgr_attributes[] = {
 157        &dev_attr_mem_info_vram_total.attr,
 158        &dev_attr_mem_info_vis_vram_total.attr,
 159        &dev_attr_mem_info_vram_used.attr,
 160        &dev_attr_mem_info_vis_vram_used.attr,
 161        &dev_attr_mem_info_vram_vendor.attr,
 162        NULL
 163};
 164
 165static const struct ttm_resource_manager_func amdgpu_vram_mgr_func;
 166
 167/**
 168 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
 169 *
 170 * @adev: amdgpu_device pointer
 171 *
 172 * Allocate and initialize the VRAM manager.
 173 */
 174int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
 175{
 176        struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
 177        struct ttm_resource_manager *man = &mgr->manager;
 178        int ret;
 179
 180        ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
 181
 182        man->func = &amdgpu_vram_mgr_func;
 183
 184        drm_mm_init(&mgr->mm, 0, man->size);
 185        spin_lock_init(&mgr->lock);
 186        INIT_LIST_HEAD(&mgr->reservations_pending);
 187        INIT_LIST_HEAD(&mgr->reserved_pages);
 188
 189        /* Add the two VRAM-related sysfs files */
 190        ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
 191        if (ret)
 192                DRM_ERROR("Failed to register sysfs\n");
 193
 194        ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
 195        ttm_resource_manager_set_used(man, true);
 196        return 0;
 197}
 198
 199/**
 200 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
 201 *
 202 * @adev: amdgpu_device pointer
 203 *
 204 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
 205 * allocated inside it.
 206 */
 207void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
 208{
 209        struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
 210        struct ttm_resource_manager *man = &mgr->manager;
 211        int ret;
 212        struct amdgpu_vram_reservation *rsv, *temp;
 213
 214        ttm_resource_manager_set_used(man, false);
 215
 216        ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
 217        if (ret)
 218                return;
 219
 220        spin_lock(&mgr->lock);
 221        list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
 222                kfree(rsv);
 223
 224        list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
 225                drm_mm_remove_node(&rsv->mm_node);
 226                kfree(rsv);
 227        }
 228        drm_mm_takedown(&mgr->mm);
 229        spin_unlock(&mgr->lock);
 230
 231        sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
 232
 233        ttm_resource_manager_cleanup(man);
 234        ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
 235}
 236
 237/**
 238 * amdgpu_vram_mgr_vis_size - Calculate visible node size
 239 *
 240 * @adev: amdgpu_device pointer
 241 * @node: MM node structure
 242 *
 243 * Calculate how many bytes of the MM node are inside visible VRAM
 244 */
 245static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
 246                                    struct drm_mm_node *node)
 247{
 248        uint64_t start = node->start << PAGE_SHIFT;
 249        uint64_t end = (node->size + node->start) << PAGE_SHIFT;
 250
 251        if (start >= adev->gmc.visible_vram_size)
 252                return 0;
 253
 254        return (end > adev->gmc.visible_vram_size ?
 255                adev->gmc.visible_vram_size : end) - start;
 256}
 257
 258/**
 259 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
 260 *
 261 * @bo: &amdgpu_bo buffer object (must be in VRAM)
 262 *
 263 * Returns:
 264 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
 265 */
 266u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
 267{
 268        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 269        struct ttm_resource *mem = &bo->tbo.mem;
 270        struct drm_mm_node *nodes = mem->mm_node;
 271        unsigned pages = mem->num_pages;
 272        u64 usage;
 273
 274        if (amdgpu_gmc_vram_full_visible(&adev->gmc))
 275                return amdgpu_bo_size(bo);
 276
 277        if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
 278                return 0;
 279
 280        for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
 281                usage += amdgpu_vram_mgr_vis_size(adev, nodes);
 282
 283        return usage;
 284}
 285
 286static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
 287{
 288        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 289        struct amdgpu_device *adev = to_amdgpu_device(mgr);
 290        struct drm_mm *mm = &mgr->mm;
 291        struct amdgpu_vram_reservation *rsv, *temp;
 292        uint64_t vis_usage;
 293
 294        list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
 295                if (drm_mm_reserve_node(mm, &rsv->mm_node))
 296                        continue;
 297
 298                dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
 299                        rsv->mm_node.start, rsv->mm_node.size);
 300
 301                vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
 302                atomic64_add(vis_usage, &mgr->vis_usage);
 303                atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
 304                list_move(&rsv->node, &mgr->reserved_pages);
 305        }
 306}
 307
 308/**
 309 * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
 310 *
 311 * @man: TTM memory type manager
 312 * @start: start address of the range in VRAM
 313 * @size: size of the range
 314 *
 315 * Reserve memory from start addess with the specified size in VRAM
 316 */
 317int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
 318                                  uint64_t start, uint64_t size)
 319{
 320        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 321        struct amdgpu_vram_reservation *rsv;
 322
 323        rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
 324        if (!rsv)
 325                return -ENOMEM;
 326
 327        INIT_LIST_HEAD(&rsv->node);
 328        rsv->mm_node.start = start >> PAGE_SHIFT;
 329        rsv->mm_node.size = size >> PAGE_SHIFT;
 330
 331        spin_lock(&mgr->lock);
 332        list_add_tail(&mgr->reservations_pending, &rsv->node);
 333        amdgpu_vram_mgr_do_reserve(man);
 334        spin_unlock(&mgr->lock);
 335
 336        return 0;
 337}
 338
 339/**
 340 * amdgpu_vram_mgr_query_page_status - query the reservation status
 341 *
 342 * @man: TTM memory type manager
 343 * @start: start address of a page in VRAM
 344 *
 345 * Returns:
 346 *      -EBUSY: the page is still hold and in pending list
 347 *      0: the page has been reserved
 348 *      -ENOENT: the input page is not a reservation
 349 */
 350int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
 351                                      uint64_t start)
 352{
 353        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 354        struct amdgpu_vram_reservation *rsv;
 355        int ret;
 356
 357        spin_lock(&mgr->lock);
 358
 359        list_for_each_entry(rsv, &mgr->reservations_pending, node) {
 360                if ((rsv->mm_node.start <= start) &&
 361                    (start < (rsv->mm_node.start + rsv->mm_node.size))) {
 362                        ret = -EBUSY;
 363                        goto out;
 364                }
 365        }
 366
 367        list_for_each_entry(rsv, &mgr->reserved_pages, node) {
 368                if ((rsv->mm_node.start <= start) &&
 369                    (start < (rsv->mm_node.start + rsv->mm_node.size))) {
 370                        ret = 0;
 371                        goto out;
 372                }
 373        }
 374
 375        ret = -ENOENT;
 376out:
 377        spin_unlock(&mgr->lock);
 378        return ret;
 379}
 380
 381/**
 382 * amdgpu_vram_mgr_virt_start - update virtual start address
 383 *
 384 * @mem: ttm_resource to update
 385 * @node: just allocated node
 386 *
 387 * Calculate a virtual BO start address to easily check if everything is CPU
 388 * accessible.
 389 */
 390static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
 391                                       struct drm_mm_node *node)
 392{
 393        unsigned long start;
 394
 395        start = node->start + node->size;
 396        if (start > mem->num_pages)
 397                start -= mem->num_pages;
 398        else
 399                start = 0;
 400        mem->start = max(mem->start, start);
 401}
 402
 403/**
 404 * amdgpu_vram_mgr_new - allocate new ranges
 405 *
 406 * @man: TTM memory type manager
 407 * @tbo: TTM BO we need this range for
 408 * @place: placement flags and restrictions
 409 * @mem: the resulting mem object
 410 *
 411 * Allocate VRAM for the given BO.
 412 */
 413static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 414                               struct ttm_buffer_object *tbo,
 415                               const struct ttm_place *place,
 416                               struct ttm_resource *mem)
 417{
 418        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 419        struct amdgpu_device *adev = to_amdgpu_device(mgr);
 420        struct drm_mm *mm = &mgr->mm;
 421        struct drm_mm_node *nodes;
 422        enum drm_mm_insert_mode mode;
 423        unsigned long lpfn, num_nodes, pages_per_node, pages_left;
 424        uint64_t vis_usage = 0, mem_bytes, max_bytes;
 425        unsigned i;
 426        int r;
 427
 428        lpfn = place->lpfn;
 429        if (!lpfn)
 430                lpfn = man->size;
 431
 432        max_bytes = adev->gmc.mc_vram_size;
 433        if (tbo->type != ttm_bo_type_kernel)
 434                max_bytes -= AMDGPU_VM_RESERVED_VRAM;
 435
 436        /* bail out quickly if there's likely not enough VRAM for this BO */
 437        mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
 438        if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
 439                atomic64_sub(mem_bytes, &mgr->usage);
 440                return -ENOSPC;
 441        }
 442
 443        if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
 444                pages_per_node = ~0ul;
 445                num_nodes = 1;
 446        } else {
 447#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 448                pages_per_node = HPAGE_PMD_NR;
 449#else
 450                /* default to 2MB */
 451                pages_per_node = (2UL << (20UL - PAGE_SHIFT));
 452#endif
 453                pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
 454                num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
 455        }
 456
 457        nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
 458                               GFP_KERNEL | __GFP_ZERO);
 459        if (!nodes) {
 460                atomic64_sub(mem_bytes, &mgr->usage);
 461                return -ENOMEM;
 462        }
 463
 464        mode = DRM_MM_INSERT_BEST;
 465        if (place->flags & TTM_PL_FLAG_TOPDOWN)
 466                mode = DRM_MM_INSERT_HIGH;
 467
 468        mem->start = 0;
 469        pages_left = mem->num_pages;
 470
 471        spin_lock(&mgr->lock);
 472        for (i = 0; pages_left >= pages_per_node; ++i) {
 473                unsigned long pages = rounddown_pow_of_two(pages_left);
 474
 475                /* Limit maximum size to 2GB due to SG table limitations */
 476                pages = min(pages, (2UL << (30 - PAGE_SHIFT)));
 477
 478                r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
 479                                                pages_per_node, 0,
 480                                                place->fpfn, lpfn,
 481                                                mode);
 482                if (unlikely(r))
 483                        break;
 484
 485                vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
 486                amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
 487                pages_left -= pages;
 488        }
 489
 490        for (; pages_left; ++i) {
 491                unsigned long pages = min(pages_left, pages_per_node);
 492                uint32_t alignment = mem->page_alignment;
 493
 494                if (pages == pages_per_node)
 495                        alignment = pages_per_node;
 496
 497                r = drm_mm_insert_node_in_range(mm, &nodes[i],
 498                                                pages, alignment, 0,
 499                                                place->fpfn, lpfn,
 500                                                mode);
 501                if (unlikely(r))
 502                        goto error;
 503
 504                vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
 505                amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
 506                pages_left -= pages;
 507        }
 508        spin_unlock(&mgr->lock);
 509
 510        atomic64_add(vis_usage, &mgr->vis_usage);
 511
 512        mem->mm_node = nodes;
 513
 514        return 0;
 515
 516error:
 517        while (i--)
 518                drm_mm_remove_node(&nodes[i]);
 519        spin_unlock(&mgr->lock);
 520        atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
 521
 522        kvfree(nodes);
 523        return r;
 524}
 525
 526/**
 527 * amdgpu_vram_mgr_del - free ranges
 528 *
 529 * @man: TTM memory type manager
 530 * @mem: TTM memory object
 531 *
 532 * Free the allocated VRAM again.
 533 */
 534static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 535                                struct ttm_resource *mem)
 536{
 537        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 538        struct amdgpu_device *adev = to_amdgpu_device(mgr);
 539        struct drm_mm_node *nodes = mem->mm_node;
 540        uint64_t usage = 0, vis_usage = 0;
 541        unsigned pages = mem->num_pages;
 542
 543        if (!mem->mm_node)
 544                return;
 545
 546        spin_lock(&mgr->lock);
 547        while (pages) {
 548                pages -= nodes->size;
 549                drm_mm_remove_node(nodes);
 550                usage += nodes->size << PAGE_SHIFT;
 551                vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
 552                ++nodes;
 553        }
 554        amdgpu_vram_mgr_do_reserve(man);
 555        spin_unlock(&mgr->lock);
 556
 557        atomic64_sub(usage, &mgr->usage);
 558        atomic64_sub(vis_usage, &mgr->vis_usage);
 559
 560        kvfree(mem->mm_node);
 561        mem->mm_node = NULL;
 562}
 563
 564/**
 565 * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
 566 *
 567 * @adev: amdgpu device pointer
 568 * @mem: TTM memory object
 569 * @offset: byte offset from the base of VRAM BO
 570 * @length: number of bytes to export in sg_table
 571 * @dev: the other device
 572 * @dir: dma direction
 573 * @sgt: resulting sg table
 574 *
 575 * Allocate and fill a sg table from a VRAM allocation.
 576 */
 577int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 578                              struct ttm_resource *mem,
 579                              u64 offset, u64 length,
 580                              struct device *dev,
 581                              enum dma_data_direction dir,
 582                              struct sg_table **sgt)
 583{
 584        struct amdgpu_res_cursor cursor;
 585        struct scatterlist *sg;
 586        int num_entries = 0;
 587        int i, r;
 588
 589        *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
 590        if (!*sgt)
 591                return -ENOMEM;
 592
 593        /* Determine the number of DRM_MM nodes to export */
 594        amdgpu_res_first(mem, offset, length, &cursor);
 595        while (cursor.remaining) {
 596                num_entries++;
 597                amdgpu_res_next(&cursor, cursor.size);
 598        }
 599
 600        r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
 601        if (r)
 602                goto error_free;
 603
 604        /* Initialize scatterlist nodes of sg_table */
 605        for_each_sgtable_sg((*sgt), sg, i)
 606                sg->length = 0;
 607
 608        /*
 609         * Walk down DRM_MM nodes to populate scatterlist nodes
 610         * @note: Use iterator api to get first the DRM_MM node
 611         * and the number of bytes from it. Access the following
 612         * DRM_MM node(s) if more buffer needs to exported
 613         */
 614        amdgpu_res_first(mem, offset, length, &cursor);
 615        for_each_sgtable_sg((*sgt), sg, i) {
 616                phys_addr_t phys = cursor.start + adev->gmc.aper_base;
 617                size_t size = cursor.size;
 618                dma_addr_t addr;
 619
 620                addr = dma_map_resource(dev, phys, size, dir,
 621                                        DMA_ATTR_SKIP_CPU_SYNC);
 622                r = dma_mapping_error(dev, addr);
 623                if (r)
 624                        goto error_unmap;
 625
 626                sg_set_page(sg, NULL, size, 0);
 627                sg_dma_address(sg) = addr;
 628                sg_dma_len(sg) = size;
 629
 630                amdgpu_res_next(&cursor, cursor.size);
 631        }
 632
 633        return 0;
 634
 635error_unmap:
 636        for_each_sgtable_sg((*sgt), sg, i) {
 637                if (!sg->length)
 638                        continue;
 639
 640                dma_unmap_resource(dev, sg->dma_address,
 641                                   sg->length, dir,
 642                                   DMA_ATTR_SKIP_CPU_SYNC);
 643        }
 644        sg_free_table(*sgt);
 645
 646error_free:
 647        kfree(*sgt);
 648        return r;
 649}
 650
 651/**
 652 * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
 653 *
 654 * @dev: device pointer
 655 * @dir: data direction of resource to unmap
 656 * @sgt: sg table to free
 657 *
 658 * Free a previously allocate sg table.
 659 */
 660void amdgpu_vram_mgr_free_sgt(struct device *dev,
 661                              enum dma_data_direction dir,
 662                              struct sg_table *sgt)
 663{
 664        struct scatterlist *sg;
 665        int i;
 666
 667        for_each_sgtable_sg(sgt, sg, i)
 668                dma_unmap_resource(dev, sg->dma_address,
 669                                   sg->length, dir,
 670                                   DMA_ATTR_SKIP_CPU_SYNC);
 671        sg_free_table(sgt);
 672        kfree(sgt);
 673}
 674
 675/**
 676 * amdgpu_vram_mgr_usage - how many bytes are used in this domain
 677 *
 678 * @man: TTM memory type manager
 679 *
 680 * Returns how many bytes are used in this domain.
 681 */
 682uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
 683{
 684        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 685
 686        return atomic64_read(&mgr->usage);
 687}
 688
 689/**
 690 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
 691 *
 692 * @man: TTM memory type manager
 693 *
 694 * Returns how many bytes are used in the visible part of VRAM
 695 */
 696uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
 697{
 698        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 699
 700        return atomic64_read(&mgr->vis_usage);
 701}
 702
 703/**
 704 * amdgpu_vram_mgr_debug - dump VRAM table
 705 *
 706 * @man: TTM memory type manager
 707 * @printer: DRM printer to use
 708 *
 709 * Dump the table content using printk.
 710 */
 711static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
 712                                  struct drm_printer *printer)
 713{
 714        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 715
 716        spin_lock(&mgr->lock);
 717        drm_mm_print(&mgr->mm, printer);
 718        spin_unlock(&mgr->lock);
 719
 720        drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
 721                   man->size, amdgpu_vram_mgr_usage(man) >> 20,
 722                   amdgpu_vram_mgr_vis_usage(man) >> 20);
 723}
 724
 725static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
 726        .alloc  = amdgpu_vram_mgr_new,
 727        .free   = amdgpu_vram_mgr_del,
 728        .debug  = amdgpu_vram_mgr_debug
 729};
 730