linux/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/ttm/ttm_bo_driver.h>
  30#include <drm/ttm/ttm_placement.h>
  31#include <drm/ttm/ttm_page_alloc.h>
  32
  33static const struct ttm_place vram_placement_flags = {
  34        .fpfn = 0,
  35        .lpfn = 0,
  36        .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
  37};
  38
  39static const struct ttm_place vram_ne_placement_flags = {
  40        .fpfn = 0,
  41        .lpfn = 0,
  42        .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  43};
  44
  45static const struct ttm_place sys_placement_flags = {
  46        .fpfn = 0,
  47        .lpfn = 0,
  48        .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
  49};
  50
  51static const struct ttm_place sys_ne_placement_flags = {
  52        .fpfn = 0,
  53        .lpfn = 0,
  54        .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  55};
  56
  57static const struct ttm_place gmr_placement_flags = {
  58        .fpfn = 0,
  59        .lpfn = 0,
  60        .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  61};
  62
  63static const struct ttm_place gmr_ne_placement_flags = {
  64        .fpfn = 0,
  65        .lpfn = 0,
  66        .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  67};
  68
  69static const struct ttm_place mob_placement_flags = {
  70        .fpfn = 0,
  71        .lpfn = 0,
  72        .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
  73};
  74
  75static const struct ttm_place mob_ne_placement_flags = {
  76        .fpfn = 0,
  77        .lpfn = 0,
  78        .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  79};
  80
  81struct ttm_placement vmw_vram_placement = {
  82        .num_placement = 1,
  83        .placement = &vram_placement_flags,
  84        .num_busy_placement = 1,
  85        .busy_placement = &vram_placement_flags
  86};
  87
  88static const struct ttm_place vram_gmr_placement_flags[] = {
  89        {
  90                .fpfn = 0,
  91                .lpfn = 0,
  92                .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
  93        }, {
  94                .fpfn = 0,
  95                .lpfn = 0,
  96                .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  97        }
  98};
  99
 100static const struct ttm_place gmr_vram_placement_flags[] = {
 101        {
 102                .fpfn = 0,
 103                .lpfn = 0,
 104                .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 105        }, {
 106                .fpfn = 0,
 107                .lpfn = 0,
 108                .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
 109        }
 110};
 111
 112struct ttm_placement vmw_vram_gmr_placement = {
 113        .num_placement = 2,
 114        .placement = vram_gmr_placement_flags,
 115        .num_busy_placement = 1,
 116        .busy_placement = &gmr_placement_flags
 117};
 118
 119static const struct ttm_place vram_gmr_ne_placement_flags[] = {
 120        {
 121                .fpfn = 0,
 122                .lpfn = 0,
 123                .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
 124                         TTM_PL_FLAG_NO_EVICT
 125        }, {
 126                .fpfn = 0,
 127                .lpfn = 0,
 128                .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
 129                         TTM_PL_FLAG_NO_EVICT
 130        }
 131};
 132
 133struct ttm_placement vmw_vram_gmr_ne_placement = {
 134        .num_placement = 2,
 135        .placement = vram_gmr_ne_placement_flags,
 136        .num_busy_placement = 1,
 137        .busy_placement = &gmr_ne_placement_flags
 138};
 139
 140struct ttm_placement vmw_vram_sys_placement = {
 141        .num_placement = 1,
 142        .placement = &vram_placement_flags,
 143        .num_busy_placement = 1,
 144        .busy_placement = &sys_placement_flags
 145};
 146
 147struct ttm_placement vmw_vram_ne_placement = {
 148        .num_placement = 1,
 149        .placement = &vram_ne_placement_flags,
 150        .num_busy_placement = 1,
 151        .busy_placement = &vram_ne_placement_flags
 152};
 153
 154struct ttm_placement vmw_sys_placement = {
 155        .num_placement = 1,
 156        .placement = &sys_placement_flags,
 157        .num_busy_placement = 1,
 158        .busy_placement = &sys_placement_flags
 159};
 160
 161struct ttm_placement vmw_sys_ne_placement = {
 162        .num_placement = 1,
 163        .placement = &sys_ne_placement_flags,
 164        .num_busy_placement = 1,
 165        .busy_placement = &sys_ne_placement_flags
 166};
 167
 168static const struct ttm_place evictable_placement_flags[] = {
 169        {
 170                .fpfn = 0,
 171                .lpfn = 0,
 172                .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
 173        }, {
 174                .fpfn = 0,
 175                .lpfn = 0,
 176                .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
 177        }, {
 178                .fpfn = 0,
 179                .lpfn = 0,
 180                .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 181        }, {
 182                .fpfn = 0,
 183                .lpfn = 0,
 184                .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 185        }
 186};
 187
 188struct ttm_placement vmw_evictable_placement = {
 189        .num_placement = 4,
 190        .placement = evictable_placement_flags,
 191        .num_busy_placement = 1,
 192        .busy_placement = &sys_placement_flags
 193};
 194
 195struct ttm_placement vmw_srf_placement = {
 196        .num_placement = 1,
 197        .num_busy_placement = 2,
 198        .placement = &gmr_placement_flags,
 199        .busy_placement = gmr_vram_placement_flags
 200};
 201
 202struct ttm_placement vmw_mob_placement = {
 203        .num_placement = 1,
 204        .num_busy_placement = 1,
 205        .placement = &mob_placement_flags,
 206        .busy_placement = &mob_placement_flags
 207};
 208
 209struct ttm_placement vmw_mob_ne_placement = {
 210        .num_placement = 1,
 211        .num_busy_placement = 1,
 212        .placement = &mob_ne_placement_flags,
 213        .busy_placement = &mob_ne_placement_flags
 214};
 215
 216struct vmw_ttm_tt {
 217        struct ttm_dma_tt dma_ttm;
 218        struct vmw_private *dev_priv;
 219        int gmr_id;
 220        struct vmw_mob *mob;
 221        int mem_type;
 222        struct sg_table sgt;
 223        struct vmw_sg_table vsgt;
 224        uint64_t sg_alloc_size;
 225        bool mapped;
 226};
 227
 228const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
 229
 230/**
 231 * Helper functions to advance a struct vmw_piter iterator.
 232 *
 233 * @viter: Pointer to the iterator.
 234 *
 235 * These functions return false if past the end of the list,
 236 * true otherwise. Functions are selected depending on the current
 237 * DMA mapping mode.
 238 */
 239static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
 240{
 241        return ++(viter->i) < viter->num_pages;
 242}
 243
 244static bool __vmw_piter_sg_next(struct vmw_piter *viter)
 245{
 246        return __sg_page_iter_next(&viter->iter);
 247}
 248
 249
 250/**
 251 * Helper functions to return a pointer to the current page.
 252 *
 253 * @viter: Pointer to the iterator
 254 *
 255 * These functions return a pointer to the page currently
 256 * pointed to by @viter. Functions are selected depending on the
 257 * current mapping mode.
 258 */
 259static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
 260{
 261        return viter->pages[viter->i];
 262}
 263
 264static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
 265{
 266        return sg_page_iter_page(&viter->iter);
 267}
 268
 269
 270/**
 271 * Helper functions to return the DMA address of the current page.
 272 *
 273 * @viter: Pointer to the iterator
 274 *
 275 * These functions return the DMA address of the page currently
 276 * pointed to by @viter. Functions are selected depending on the
 277 * current mapping mode.
 278 */
 279static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
 280{
 281        return page_to_phys(viter->pages[viter->i]);
 282}
 283
 284static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
 285{
 286        return viter->addrs[viter->i];
 287}
 288
 289static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
 290{
 291        return sg_page_iter_dma_address(&viter->iter);
 292}
 293
 294
 295/**
 296 * vmw_piter_start - Initialize a struct vmw_piter.
 297 *
 298 * @viter: Pointer to the iterator to initialize
 299 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
 300 *
 301 * Note that we're following the convention of __sg_page_iter_start, so that
 302 * the iterator doesn't point to a valid page after initialization; it has
 303 * to be advanced one step first.
 304 */
 305void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
 306                     unsigned long p_offset)
 307{
 308        viter->i = p_offset - 1;
 309        viter->num_pages = vsgt->num_pages;
 310        switch (vsgt->mode) {
 311        case vmw_dma_phys:
 312                viter->next = &__vmw_piter_non_sg_next;
 313                viter->dma_address = &__vmw_piter_phys_addr;
 314                viter->page = &__vmw_piter_non_sg_page;
 315                viter->pages = vsgt->pages;
 316                break;
 317        case vmw_dma_alloc_coherent:
 318                viter->next = &__vmw_piter_non_sg_next;
 319                viter->dma_address = &__vmw_piter_dma_addr;
 320                viter->page = &__vmw_piter_non_sg_page;
 321                viter->addrs = vsgt->addrs;
 322                viter->pages = vsgt->pages;
 323                break;
 324        case vmw_dma_map_populate:
 325        case vmw_dma_map_bind:
 326                viter->next = &__vmw_piter_sg_next;
 327                viter->dma_address = &__vmw_piter_sg_addr;
 328                viter->page = &__vmw_piter_sg_page;
 329                __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
 330                                     vsgt->sgt->orig_nents, p_offset);
 331                break;
 332        default:
 333                BUG();
 334        }
 335}
 336
 337/**
 338 * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
 339 * TTM pages
 340 *
 341 * @vmw_tt: Pointer to a struct vmw_ttm_backend
 342 *
 343 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
 344 */
 345static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
 346{
 347        struct device *dev = vmw_tt->dev_priv->dev->dev;
 348
 349        dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
 350                DMA_BIDIRECTIONAL);
 351        vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
 352}
 353
 354/**
 355 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
 356 *
 357 * @vmw_tt: Pointer to a struct vmw_ttm_backend
 358 *
 359 * This function is used to get device addresses from the kernel DMA layer.
 360 * However, it's violating the DMA API in that when this operation has been
 361 * performed, it's illegal for the CPU to write to the pages without first
 362 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
 363 * therefore only legal to call this function if we know that the function
 364 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
 365 * a CPU write buffer flush.
 366 */
 367static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
 368{
 369        struct device *dev = vmw_tt->dev_priv->dev->dev;
 370        int ret;
 371
 372        ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
 373                         DMA_BIDIRECTIONAL);
 374        if (unlikely(ret == 0))
 375                return -ENOMEM;
 376
 377        vmw_tt->sgt.nents = ret;
 378
 379        return 0;
 380}
 381
 382/**
 383 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
 384 *
 385 * @vmw_tt: Pointer to a struct vmw_ttm_tt
 386 *
 387 * Select the correct function for and make sure the TTM pages are
 388 * visible to the device. Allocate storage for the device mappings.
 389 * If a mapping has already been performed, indicated by the storage
 390 * pointer being non NULL, the function returns success.
 391 */
 392static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 393{
 394        struct vmw_private *dev_priv = vmw_tt->dev_priv;
 395        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
 396        struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
 397        struct vmw_piter iter;
 398        dma_addr_t old;
 399        int ret = 0;
 400        static size_t sgl_size;
 401        static size_t sgt_size;
 402
 403        if (vmw_tt->mapped)
 404                return 0;
 405
 406        vsgt->mode = dev_priv->map_mode;
 407        vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
 408        vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
 409        vsgt->addrs = vmw_tt->dma_ttm.dma_address;
 410        vsgt->sgt = &vmw_tt->sgt;
 411
 412        switch (dev_priv->map_mode) {
 413        case vmw_dma_map_bind:
 414        case vmw_dma_map_populate:
 415                if (unlikely(!sgl_size)) {
 416                        sgl_size = ttm_round_pot(sizeof(struct scatterlist));
 417                        sgt_size = ttm_round_pot(sizeof(struct sg_table));
 418                }
 419                vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
 420                ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
 421                                           true);
 422                if (unlikely(ret != 0))
 423                        return ret;
 424
 425                ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
 426                                                vsgt->num_pages, 0,
 427                                                (unsigned long)
 428                                                vsgt->num_pages << PAGE_SHIFT,
 429                                                GFP_KERNEL);
 430                if (unlikely(ret != 0))
 431                        goto out_sg_alloc_fail;
 432
 433                if (vsgt->num_pages > vmw_tt->sgt.nents) {
 434                        uint64_t over_alloc =
 435                                sgl_size * (vsgt->num_pages -
 436                                            vmw_tt->sgt.nents);
 437
 438                        ttm_mem_global_free(glob, over_alloc);
 439                        vmw_tt->sg_alloc_size -= over_alloc;
 440                }
 441
 442                ret = vmw_ttm_map_for_dma(vmw_tt);
 443                if (unlikely(ret != 0))
 444                        goto out_map_fail;
 445
 446                break;
 447        default:
 448                break;
 449        }
 450
 451        old = ~((dma_addr_t) 0);
 452        vmw_tt->vsgt.num_regions = 0;
 453        for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
 454                dma_addr_t cur = vmw_piter_dma_addr(&iter);
 455
 456                if (cur != old + PAGE_SIZE)
 457                        vmw_tt->vsgt.num_regions++;
 458                old = cur;
 459        }
 460
 461        vmw_tt->mapped = true;
 462        return 0;
 463
 464out_map_fail:
 465        sg_free_table(vmw_tt->vsgt.sgt);
 466        vmw_tt->vsgt.sgt = NULL;
 467out_sg_alloc_fail:
 468        ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
 469        return ret;
 470}
 471
 472/**
 473 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
 474 *
 475 * @vmw_tt: Pointer to a struct vmw_ttm_tt
 476 *
 477 * Tear down any previously set up device DMA mappings and free
 478 * any storage space allocated for them. If there are no mappings set up,
 479 * this function is a NOP.
 480 */
 481static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
 482{
 483        struct vmw_private *dev_priv = vmw_tt->dev_priv;
 484
 485        if (!vmw_tt->vsgt.sgt)
 486                return;
 487
 488        switch (dev_priv->map_mode) {
 489        case vmw_dma_map_bind:
 490        case vmw_dma_map_populate:
 491                vmw_ttm_unmap_from_dma(vmw_tt);
 492                sg_free_table(vmw_tt->vsgt.sgt);
 493                vmw_tt->vsgt.sgt = NULL;
 494                ttm_mem_global_free(vmw_mem_glob(dev_priv),
 495                                    vmw_tt->sg_alloc_size);
 496                break;
 497        default:
 498                break;
 499        }
 500        vmw_tt->mapped = false;
 501}
 502
 503
 504/**
 505 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
 506 *
 507 * @bo: Pointer to a struct ttm_buffer_object
 508 *
 509 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
 510 * instead of a pointer to a struct vmw_ttm_backend as argument.
 511 * Note that the buffer object must be either pinned or reserved before
 512 * calling this function.
 513 */
 514int vmw_bo_map_dma(struct ttm_buffer_object *bo)
 515{
 516        struct vmw_ttm_tt *vmw_tt =
 517                container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 518
 519        return vmw_ttm_map_dma(vmw_tt);
 520}
 521
 522
 523/**
 524 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
 525 *
 526 * @bo: Pointer to a struct ttm_buffer_object
 527 *
 528 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
 529 * instead of a pointer to a struct vmw_ttm_backend as argument.
 530 */
 531void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
 532{
 533        struct vmw_ttm_tt *vmw_tt =
 534                container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 535
 536        vmw_ttm_unmap_dma(vmw_tt);
 537}
 538
 539
 540/**
 541 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
 542 * TTM buffer object
 543 *
 544 * @bo: Pointer to a struct ttm_buffer_object
 545 *
 546 * Returns a pointer to a struct vmw_sg_table object. The object should
 547 * not be freed after use.
 548 * Note that for the device addresses to be valid, the buffer object must
 549 * either be reserved or pinned.
 550 */
 551const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
 552{
 553        struct vmw_ttm_tt *vmw_tt =
 554                container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 555
 556        return &vmw_tt->vsgt;
 557}
 558
 559
 560static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 561{
 562        struct vmw_ttm_tt *vmw_be =
 563                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 564        int ret;
 565
 566        ret = vmw_ttm_map_dma(vmw_be);
 567        if (unlikely(ret != 0))
 568                return ret;
 569
 570        vmw_be->gmr_id = bo_mem->start;
 571        vmw_be->mem_type = bo_mem->mem_type;
 572
 573        switch (bo_mem->mem_type) {
 574        case VMW_PL_GMR:
 575                return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
 576                                    ttm->num_pages, vmw_be->gmr_id);
 577        case VMW_PL_MOB:
 578                if (unlikely(vmw_be->mob == NULL)) {
 579                        vmw_be->mob =
 580                                vmw_mob_create(ttm->num_pages);
 581                        if (unlikely(vmw_be->mob == NULL))
 582                                return -ENOMEM;
 583                }
 584
 585                return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
 586                                    &vmw_be->vsgt, ttm->num_pages,
 587                                    vmw_be->gmr_id);
 588        default:
 589                BUG();
 590        }
 591        return 0;
 592}
 593
 594static int vmw_ttm_unbind(struct ttm_tt *ttm)
 595{
 596        struct vmw_ttm_tt *vmw_be =
 597                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 598
 599        switch (vmw_be->mem_type) {
 600        case VMW_PL_GMR:
 601                vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
 602                break;
 603        case VMW_PL_MOB:
 604                vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
 605                break;
 606        default:
 607                BUG();
 608        }
 609
 610        if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
 611                vmw_ttm_unmap_dma(vmw_be);
 612
 613        return 0;
 614}
 615
 616
 617static void vmw_ttm_destroy(struct ttm_tt *ttm)
 618{
 619        struct vmw_ttm_tt *vmw_be =
 620                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 621
 622        vmw_ttm_unmap_dma(vmw_be);
 623        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
 624                ttm_dma_tt_fini(&vmw_be->dma_ttm);
 625        else
 626                ttm_tt_fini(ttm);
 627
 628        if (vmw_be->mob)
 629                vmw_mob_destroy(vmw_be->mob);
 630
 631        kfree(vmw_be);
 632}
 633
 634
 635static int vmw_ttm_populate(struct ttm_tt *ttm)
 636{
 637        struct vmw_ttm_tt *vmw_tt =
 638                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 639        struct vmw_private *dev_priv = vmw_tt->dev_priv;
 640        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
 641        int ret;
 642
 643        if (ttm->state != tt_unpopulated)
 644                return 0;
 645
 646        if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
 647                size_t size =
 648                        ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
 649                ret = ttm_mem_global_alloc(glob, size, false, true);
 650                if (unlikely(ret != 0))
 651                        return ret;
 652
 653                ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
 654                if (unlikely(ret != 0))
 655                        ttm_mem_global_free(glob, size);
 656        } else
 657                ret = ttm_pool_populate(ttm);
 658
 659        return ret;
 660}
 661
 662static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
 663{
 664        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
 665                                                 dma_ttm.ttm);
 666        struct vmw_private *dev_priv = vmw_tt->dev_priv;
 667        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
 668
 669
 670        if (vmw_tt->mob) {
 671                vmw_mob_destroy(vmw_tt->mob);
 672                vmw_tt->mob = NULL;
 673        }
 674
 675        vmw_ttm_unmap_dma(vmw_tt);
 676        if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
 677                size_t size =
 678                        ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
 679
 680                ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
 681                ttm_mem_global_free(glob, size);
 682        } else
 683                ttm_pool_unpopulate(ttm);
 684}
 685
 686static struct ttm_backend_func vmw_ttm_func = {
 687        .bind = vmw_ttm_bind,
 688        .unbind = vmw_ttm_unbind,
 689        .destroy = vmw_ttm_destroy,
 690};
 691
 692static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
 693                                 unsigned long size, uint32_t page_flags,
 694                                 struct page *dummy_read_page)
 695{
 696        struct vmw_ttm_tt *vmw_be;
 697        int ret;
 698
 699        vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
 700        if (!vmw_be)
 701                return NULL;
 702
 703        vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
 704        vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 705        vmw_be->mob = NULL;
 706
 707        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
 708                ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
 709                                      dummy_read_page);
 710        else
 711                ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
 712                                  dummy_read_page);
 713        if (unlikely(ret != 0))
 714                goto out_no_init;
 715
 716        return &vmw_be->dma_ttm.ttm;
 717out_no_init:
 718        kfree(vmw_be);
 719        return NULL;
 720}
 721
 722static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 723{
 724        return 0;
 725}
 726
 727static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 728                      struct ttm_mem_type_manager *man)
 729{
 730        switch (type) {
 731        case TTM_PL_SYSTEM:
 732                /* System memory */
 733
 734                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 735                man->available_caching = TTM_PL_FLAG_CACHED;
 736                man->default_caching = TTM_PL_FLAG_CACHED;
 737                break;
 738        case TTM_PL_VRAM:
 739                /* "On-card" video ram */
 740                man->func = &ttm_bo_manager_func;
 741                man->gpu_offset = 0;
 742                man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
 743                man->available_caching = TTM_PL_FLAG_CACHED;
 744                man->default_caching = TTM_PL_FLAG_CACHED;
 745                break;
 746        case VMW_PL_GMR:
 747        case VMW_PL_MOB:
 748                /*
 749                 * "Guest Memory Regions" is an aperture like feature with
 750                 *  one slot per bo. There is an upper limit of the number of
 751                 *  slots as well as the bo size.
 752                 */
 753                man->func = &vmw_gmrid_manager_func;
 754                man->gpu_offset = 0;
 755                man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
 756                man->available_caching = TTM_PL_FLAG_CACHED;
 757                man->default_caching = TTM_PL_FLAG_CACHED;
 758                break;
 759        default:
 760                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 761                return -EINVAL;
 762        }
 763        return 0;
 764}
 765
 766static void vmw_evict_flags(struct ttm_buffer_object *bo,
 767                     struct ttm_placement *placement)
 768{
 769        *placement = vmw_sys_placement;
 770}
 771
 772static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 773{
 774        struct ttm_object_file *tfile =
 775                vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
 776
 777        return vmw_user_dmabuf_verify_access(bo, tfile);
 778}
 779
 780static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 781{
 782        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 783        struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
 784
 785        mem->bus.addr = NULL;
 786        mem->bus.is_iomem = false;
 787        mem->bus.offset = 0;
 788        mem->bus.size = mem->num_pages << PAGE_SHIFT;
 789        mem->bus.base = 0;
 790        if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 791                return -EINVAL;
 792        switch (mem->mem_type) {
 793        case TTM_PL_SYSTEM:
 794        case VMW_PL_GMR:
 795        case VMW_PL_MOB:
 796                return 0;
 797        case TTM_PL_VRAM:
 798                mem->bus.offset = mem->start << PAGE_SHIFT;
 799                mem->bus.base = dev_priv->vram_start;
 800                mem->bus.is_iomem = true;
 801                break;
 802        default:
 803                return -EINVAL;
 804        }
 805        return 0;
 806}
 807
 808static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 809{
 810}
 811
 812static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 813{
 814        return 0;
 815}
 816
 817/**
 818 * vmw_move_notify - TTM move_notify_callback
 819 *
 820 * @bo: The TTM buffer object about to move.
 821 * @mem: The struct ttm_mem_reg indicating to what memory
 822 *       region the move is taking place.
 823 *
 824 * Calls move_notify for all subsystems needing it.
 825 * (currently only resources).
 826 */
 827static void vmw_move_notify(struct ttm_buffer_object *bo,
 828                            bool evict,
 829                            struct ttm_mem_reg *mem)
 830{
 831        vmw_resource_move_notify(bo, mem);
 832        vmw_query_move_notify(bo, mem);
 833}
 834
 835
 836/**
 837 * vmw_swap_notify - TTM move_notify_callback
 838 *
 839 * @bo: The TTM buffer object about to be swapped out.
 840 */
 841static void vmw_swap_notify(struct ttm_buffer_object *bo)
 842{
 843        (void) ttm_bo_wait(bo, false, false);
 844}
 845
 846
 847struct ttm_bo_driver vmw_bo_driver = {
 848        .ttm_tt_create = &vmw_ttm_tt_create,
 849        .ttm_tt_populate = &vmw_ttm_populate,
 850        .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
 851        .invalidate_caches = vmw_invalidate_caches,
 852        .init_mem_type = vmw_init_mem_type,
 853        .eviction_valuable = ttm_bo_eviction_valuable,
 854        .evict_flags = vmw_evict_flags,
 855        .move = NULL,
 856        .verify_access = vmw_verify_access,
 857        .move_notify = vmw_move_notify,
 858        .swap_notify = vmw_swap_notify,
 859        .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
 860        .io_mem_reserve = &vmw_ttm_io_mem_reserve,
 861        .io_mem_free = &vmw_ttm_io_mem_free,
 862        .io_mem_pfn = ttm_bo_default_io_mem_pfn,
 863};
 864