linux/drivers/gpu/drm/i915/i915_gem_stolen.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2012 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Chris Wilson <chris@chris-wilson.co.uk>
  26 *
  27 */
  28
  29#include <drm/drmP.h>
  30#include <drm/i915_drm.h>
  31#include "i915_drv.h"
  32
  33#define KB(x) ((x) * 1024)
  34#define MB(x) (KB(x) * 1024)
  35
  36/*
  37 * The BIOS typically reserves some of the system's memory for the exclusive
  38 * use of the integrated graphics. This memory is no longer available for
  39 * use by the OS and so the user finds that his system has less memory
  40 * available than he put in. We refer to this memory as stolen.
  41 *
  42 * The BIOS will allocate its framebuffer from the stolen memory. Our
  43 * goal is try to reuse that object for our own fbcon which must always
  44 * be available for panics. Anything else we can reuse the stolen memory
  45 * for is a boon.
  46 */
  47
  48int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
  49                                         struct drm_mm_node *node, u64 size,
  50                                         unsigned alignment, u64 start, u64 end)
  51{
  52        int ret;
  53
  54        if (!drm_mm_initialized(&dev_priv->mm.stolen))
  55                return -ENODEV;
  56
  57        mutex_lock(&dev_priv->mm.stolen_lock);
  58        ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
  59                                          size, alignment, 0,
  60                                          start, end, DRM_MM_INSERT_BEST);
  61        mutex_unlock(&dev_priv->mm.stolen_lock);
  62
  63        return ret;
  64}
  65
  66int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
  67                                struct drm_mm_node *node, u64 size,
  68                                unsigned alignment)
  69{
  70        return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
  71                                                    alignment, 0, U64_MAX);
  72}
  73
  74void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
  75                                 struct drm_mm_node *node)
  76{
  77        mutex_lock(&dev_priv->mm.stolen_lock);
  78        drm_mm_remove_node(node);
  79        mutex_unlock(&dev_priv->mm.stolen_lock);
  80}
  81
  82static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
  83{
  84        struct pci_dev *pdev = dev_priv->drm.pdev;
  85        struct i915_ggtt *ggtt = &dev_priv->ggtt;
  86        struct resource *r;
  87        dma_addr_t base;
  88
  89        /* Almost universally we can find the Graphics Base of Stolen Memory
  90         * at register BSM (0x5c) in the igfx configuration space. On a few
  91         * (desktop) machines this is also mirrored in the bridge device at
  92         * different locations, or in the MCHBAR.
  93         *
  94         * On 865 we just check the TOUD register.
  95         *
  96         * On 830/845/85x the stolen memory base isn't available in any
  97         * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
  98         *
  99         */
 100        base = 0;
 101        if (INTEL_GEN(dev_priv) >= 3) {
 102                u32 bsm;
 103
 104                pci_read_config_dword(pdev, INTEL_BSM, &bsm);
 105
 106                base = bsm & INTEL_BSM_MASK;
 107        } else if (IS_I865G(dev_priv)) {
 108                u32 tseg_size = 0;
 109                u16 toud = 0;
 110                u8 tmp;
 111
 112                pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 113                                         I845_ESMRAMC, &tmp);
 114
 115                if (tmp & TSEG_ENABLE) {
 116                        switch (tmp & I845_TSEG_SIZE_MASK) {
 117                        case I845_TSEG_SIZE_512K:
 118                                tseg_size = KB(512);
 119                                break;
 120                        case I845_TSEG_SIZE_1M:
 121                                tseg_size = MB(1);
 122                                break;
 123                        }
 124                }
 125
 126                pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
 127                                         I865_TOUD, &toud);
 128
 129                base = (toud << 16) + tseg_size;
 130        } else if (IS_I85X(dev_priv)) {
 131                u32 tseg_size = 0;
 132                u32 tom;
 133                u8 tmp;
 134
 135                pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 136                                         I85X_ESMRAMC, &tmp);
 137
 138                if (tmp & TSEG_ENABLE)
 139                        tseg_size = MB(1);
 140
 141                pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
 142                                         I85X_DRB3, &tmp);
 143                tom = tmp * MB(32);
 144
 145                base = tom - tseg_size - ggtt->stolen_size;
 146        } else if (IS_I845G(dev_priv)) {
 147                u32 tseg_size = 0;
 148                u32 tom;
 149                u8 tmp;
 150
 151                pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 152                                         I845_ESMRAMC, &tmp);
 153
 154                if (tmp & TSEG_ENABLE) {
 155                        switch (tmp & I845_TSEG_SIZE_MASK) {
 156                        case I845_TSEG_SIZE_512K:
 157                                tseg_size = KB(512);
 158                                break;
 159                        case I845_TSEG_SIZE_1M:
 160                                tseg_size = MB(1);
 161                                break;
 162                        }
 163                }
 164
 165                pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 166                                         I830_DRB3, &tmp);
 167                tom = tmp * MB(32);
 168
 169                base = tom - tseg_size - ggtt->stolen_size;
 170        } else if (IS_I830(dev_priv)) {
 171                u32 tseg_size = 0;
 172                u32 tom;
 173                u8 tmp;
 174
 175                pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 176                                         I830_ESMRAMC, &tmp);
 177
 178                if (tmp & TSEG_ENABLE) {
 179                        if (tmp & I830_TSEG_SIZE_1M)
 180                                tseg_size = MB(1);
 181                        else
 182                                tseg_size = KB(512);
 183                }
 184
 185                pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 186                                         I830_DRB3, &tmp);
 187                tom = tmp * MB(32);
 188
 189                base = tom - tseg_size - ggtt->stolen_size;
 190        }
 191
 192        if (base == 0 || add_overflows(base, ggtt->stolen_size))
 193                return 0;
 194
 195        /* make sure we don't clobber the GTT if it's within stolen memory */
 196        if (INTEL_GEN(dev_priv) <= 4 &&
 197            !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
 198                struct {
 199                        dma_addr_t start, end;
 200                } stolen[2] = {
 201                        { .start = base, .end = base + ggtt->stolen_size, },
 202                        { .start = base, .end = base + ggtt->stolen_size, },
 203                };
 204                u64 ggtt_start, ggtt_end;
 205
 206                ggtt_start = I915_READ(PGTBL_CTL);
 207                if (IS_GEN4(dev_priv))
 208                        ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
 209                                     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
 210                else
 211                        ggtt_start &= PGTBL_ADDRESS_LO_MASK;
 212                ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
 213
 214                if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
 215                        stolen[0].end = ggtt_start;
 216                if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
 217                        stolen[1].start = ggtt_end;
 218
 219                /* pick the larger of the two chunks */
 220                if (stolen[0].end - stolen[0].start >
 221                    stolen[1].end - stolen[1].start) {
 222                        base = stolen[0].start;
 223                        ggtt->stolen_size = stolen[0].end - stolen[0].start;
 224                } else {
 225                        base = stolen[1].start;
 226                        ggtt->stolen_size = stolen[1].end - stolen[1].start;
 227                }
 228
 229                if (stolen[0].start != stolen[1].start ||
 230                    stolen[0].end != stolen[1].end) {
 231                        dma_addr_t end = base + ggtt->stolen_size - 1;
 232
 233                        DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
 234                                      (unsigned long long)ggtt_start,
 235                                      (unsigned long long)ggtt_end - 1);
 236                        DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
 237                                      &base, &end);
 238                }
 239        }
 240
 241
 242        /* Verify that nothing else uses this physical address. Stolen
 243         * memory should be reserved by the BIOS and hidden from the
 244         * kernel. So if the region is already marked as busy, something
 245         * is seriously wrong.
 246         */
 247        r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
 248                                    "Graphics Stolen Memory");
 249        if (r == NULL) {
 250                /*
 251                 * One more attempt but this time requesting region from
 252                 * base + 1, as we have seen that this resolves the region
 253                 * conflict with the PCI Bus.
 254                 * This is a BIOS w/a: Some BIOS wrap stolen in the root
 255                 * PCI bus, but have an off-by-one error. Hence retry the
 256                 * reservation starting from 1 instead of 0.
 257                 * There's also BIOS with off-by-one on the other end.
 258                 */
 259                r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
 260                                            ggtt->stolen_size - 2,
 261                                            "Graphics Stolen Memory");
 262                /*
 263                 * GEN3 firmware likes to smash pci bridges into the stolen
 264                 * range. Apparently this works.
 265                 */
 266                if (r == NULL && !IS_GEN3(dev_priv)) {
 267                        dma_addr_t end = base + ggtt->stolen_size;
 268
 269                        DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
 270                                  &base, &end);
 271                        base = 0;
 272                }
 273        }
 274
 275        return base;
 276}
 277
 278void i915_gem_cleanup_stolen(struct drm_device *dev)
 279{
 280        struct drm_i915_private *dev_priv = to_i915(dev);
 281
 282        if (!drm_mm_initialized(&dev_priv->mm.stolen))
 283                return;
 284
 285        drm_mm_takedown(&dev_priv->mm.stolen);
 286}
 287
 288static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
 289                                    dma_addr_t *base, u32 *size)
 290{
 291        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 292        uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
 293                                     CTG_STOLEN_RESERVED :
 294                                     ELK_STOLEN_RESERVED);
 295        dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
 296
 297        *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
 298
 299        WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
 300
 301        /* On these platforms, the register doesn't have a size field, so the
 302         * size is the distance between the base and the top of the stolen
 303         * memory. We also have the genuine case where base is zero and there's
 304         * nothing reserved. */
 305        if (*base == 0)
 306                *size = 0;
 307        else
 308                *size = stolen_top - *base;
 309}
 310
 311static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
 312                                     dma_addr_t *base, u32 *size)
 313{
 314        uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 315
 316        *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 317
 318        switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
 319        case GEN6_STOLEN_RESERVED_1M:
 320                *size = 1024 * 1024;
 321                break;
 322        case GEN6_STOLEN_RESERVED_512K:
 323                *size = 512 * 1024;
 324                break;
 325        case GEN6_STOLEN_RESERVED_256K:
 326                *size = 256 * 1024;
 327                break;
 328        case GEN6_STOLEN_RESERVED_128K:
 329                *size = 128 * 1024;
 330                break;
 331        default:
 332                *size = 1024 * 1024;
 333                MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
 334        }
 335}
 336
 337static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
 338                                     dma_addr_t *base, u32 *size)
 339{
 340        uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 341
 342        *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
 343
 344        switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
 345        case GEN7_STOLEN_RESERVED_1M:
 346                *size = 1024 * 1024;
 347                break;
 348        case GEN7_STOLEN_RESERVED_256K:
 349                *size = 256 * 1024;
 350                break;
 351        default:
 352                *size = 1024 * 1024;
 353                MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
 354        }
 355}
 356
 357static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
 358                                    dma_addr_t *base, u32 *size)
 359{
 360        uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 361
 362        *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 363
 364        switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
 365        case GEN8_STOLEN_RESERVED_1M:
 366                *size = 1024 * 1024;
 367                break;
 368        case GEN8_STOLEN_RESERVED_2M:
 369                *size = 2 * 1024 * 1024;
 370                break;
 371        case GEN8_STOLEN_RESERVED_4M:
 372                *size = 4 * 1024 * 1024;
 373                break;
 374        case GEN8_STOLEN_RESERVED_8M:
 375                *size = 8 * 1024 * 1024;
 376                break;
 377        default:
 378                *size = 8 * 1024 * 1024;
 379                MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
 380        }
 381}
 382
 383static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
 384                                    dma_addr_t *base, u32 *size)
 385{
 386        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 387        uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 388        dma_addr_t stolen_top;
 389
 390        stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
 391
 392        *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 393
 394        /* On these platforms, the register doesn't have a size field, so the
 395         * size is the distance between the base and the top of the stolen
 396         * memory. We also have the genuine case where base is zero and there's
 397         * nothing reserved. */
 398        if (*base == 0)
 399                *size = 0;
 400        else
 401                *size = stolen_top - *base;
 402}
 403
 404int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 405{
 406        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 407        dma_addr_t reserved_base, stolen_top;
 408        u32 reserved_total, reserved_size;
 409        u32 stolen_usable_start;
 410
 411        mutex_init(&dev_priv->mm.stolen_lock);
 412
 413        if (intel_vgpu_active(dev_priv)) {
 414                DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
 415                return 0;
 416        }
 417
 418        if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
 419                DRM_INFO("DMAR active, disabling use of stolen memory\n");
 420                return 0;
 421        }
 422
 423        if (ggtt->stolen_size == 0)
 424                return 0;
 425
 426        dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
 427        if (dev_priv->mm.stolen_base == 0)
 428                return 0;
 429
 430        stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
 431        reserved_base = 0;
 432        reserved_size = 0;
 433
 434        switch (INTEL_INFO(dev_priv)->gen) {
 435        case 2:
 436        case 3:
 437                break;
 438        case 4:
 439                if (IS_G4X(dev_priv))
 440                        g4x_get_stolen_reserved(dev_priv,
 441                                                &reserved_base, &reserved_size);
 442                break;
 443        case 5:
 444                /* Assume the gen6 maximum for the older platforms. */
 445                reserved_size = 1024 * 1024;
 446                reserved_base = stolen_top - reserved_size;
 447                break;
 448        case 6:
 449                gen6_get_stolen_reserved(dev_priv,
 450                                         &reserved_base, &reserved_size);
 451                break;
 452        case 7:
 453                gen7_get_stolen_reserved(dev_priv,
 454                                         &reserved_base, &reserved_size);
 455                break;
 456        default:
 457                if (IS_LP(dev_priv))
 458                        chv_get_stolen_reserved(dev_priv,
 459                                                &reserved_base, &reserved_size);
 460                else
 461                        bdw_get_stolen_reserved(dev_priv,
 462                                                &reserved_base, &reserved_size);
 463                break;
 464        }
 465
 466        /* It is possible for the reserved base to be zero, but the register
 467         * field for size doesn't have a zero option. */
 468        if (reserved_base == 0) {
 469                reserved_size = 0;
 470                reserved_base = stolen_top;
 471        }
 472
 473        if (reserved_base < dev_priv->mm.stolen_base ||
 474            reserved_base + reserved_size > stolen_top) {
 475                dma_addr_t reserved_top = reserved_base + reserved_size;
 476                DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
 477                              &reserved_base, &reserved_top,
 478                              &dev_priv->mm.stolen_base, &stolen_top);
 479                return 0;
 480        }
 481
 482        ggtt->stolen_reserved_base = reserved_base;
 483        ggtt->stolen_reserved_size = reserved_size;
 484
 485        /* It is possible for the reserved area to end before the end of stolen
 486         * memory, so just consider the start. */
 487        reserved_total = stolen_top - reserved_base;
 488
 489        DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
 490                      ggtt->stolen_size >> 10,
 491                      (ggtt->stolen_size - reserved_total) >> 10);
 492
 493        stolen_usable_start = 0;
 494        /* WaSkipStolenMemoryFirstPage:bdw+ */
 495        if (INTEL_GEN(dev_priv) >= 8)
 496                stolen_usable_start = 4096;
 497
 498        ggtt->stolen_usable_size =
 499                ggtt->stolen_size - reserved_total - stolen_usable_start;
 500
 501        /* Basic memrange allocator for stolen space. */
 502        drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
 503                    ggtt->stolen_usable_size);
 504
 505        return 0;
 506}
 507
 508static struct sg_table *
 509i915_pages_create_for_stolen(struct drm_device *dev,
 510                             u32 offset, u32 size)
 511{
 512        struct drm_i915_private *dev_priv = to_i915(dev);
 513        struct sg_table *st;
 514        struct scatterlist *sg;
 515
 516        GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
 517
 518        /* We hide that we have no struct page backing our stolen object
 519         * by wrapping the contiguous physical allocation with a fake
 520         * dma mapping in a single scatterlist.
 521         */
 522
 523        st = kmalloc(sizeof(*st), GFP_KERNEL);
 524        if (st == NULL)
 525                return ERR_PTR(-ENOMEM);
 526
 527        if (sg_alloc_table(st, 1, GFP_KERNEL)) {
 528                kfree(st);
 529                return ERR_PTR(-ENOMEM);
 530        }
 531
 532        sg = st->sgl;
 533        sg->offset = 0;
 534        sg->length = size;
 535
 536        sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
 537        sg_dma_len(sg) = size;
 538
 539        return st;
 540}
 541
 542static struct sg_table *
 543i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 544{
 545        return i915_pages_create_for_stolen(obj->base.dev,
 546                                            obj->stolen->start,
 547                                            obj->stolen->size);
 548}
 549
 550static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
 551                                             struct sg_table *pages)
 552{
 553        /* Should only be called from i915_gem_object_release_stolen() */
 554        sg_free_table(pages);
 555        kfree(pages);
 556}
 557
 558static void
 559i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 560{
 561        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 562        struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
 563
 564        GEM_BUG_ON(!stolen);
 565
 566        __i915_gem_object_unpin_pages(obj);
 567
 568        i915_gem_stolen_remove_node(dev_priv, stolen);
 569        kfree(stolen);
 570}
 571
 572static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
 573        .get_pages = i915_gem_object_get_pages_stolen,
 574        .put_pages = i915_gem_object_put_pages_stolen,
 575        .release = i915_gem_object_release_stolen,
 576};
 577
 578static struct drm_i915_gem_object *
 579_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
 580                               struct drm_mm_node *stolen)
 581{
 582        struct drm_i915_gem_object *obj;
 583        unsigned int cache_level;
 584
 585        obj = i915_gem_object_alloc(dev_priv);
 586        if (obj == NULL)
 587                return NULL;
 588
 589        drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
 590        i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 591
 592        obj->stolen = stolen;
 593        obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
 594        cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
 595        i915_gem_object_set_cache_coherency(obj, cache_level);
 596
 597        if (i915_gem_object_pin_pages(obj))
 598                goto cleanup;
 599
 600        return obj;
 601
 602cleanup:
 603        i915_gem_object_free(obj);
 604        return NULL;
 605}
 606
 607struct drm_i915_gem_object *
 608i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
 609{
 610        struct drm_i915_gem_object *obj;
 611        struct drm_mm_node *stolen;
 612        int ret;
 613
 614        if (!drm_mm_initialized(&dev_priv->mm.stolen))
 615                return NULL;
 616
 617        if (size == 0)
 618                return NULL;
 619
 620        stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
 621        if (!stolen)
 622                return NULL;
 623
 624        ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
 625        if (ret) {
 626                kfree(stolen);
 627                return NULL;
 628        }
 629
 630        obj = _i915_gem_object_create_stolen(dev_priv, stolen);
 631        if (obj)
 632                return obj;
 633
 634        i915_gem_stolen_remove_node(dev_priv, stolen);
 635        kfree(stolen);
 636        return NULL;
 637}
 638
 639struct drm_i915_gem_object *
 640i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
 641                                               u32 stolen_offset,
 642                                               u32 gtt_offset,
 643                                               u32 size)
 644{
 645        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 646        struct drm_i915_gem_object *obj;
 647        struct drm_mm_node *stolen;
 648        struct i915_vma *vma;
 649        int ret;
 650
 651        if (!drm_mm_initialized(&dev_priv->mm.stolen))
 652                return NULL;
 653
 654        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 655
 656        DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
 657                        stolen_offset, gtt_offset, size);
 658
 659        /* KISS and expect everything to be page-aligned */
 660        if (WARN_ON(size == 0) ||
 661            WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
 662            WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
 663                return NULL;
 664
 665        stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
 666        if (!stolen)
 667                return NULL;
 668
 669        stolen->start = stolen_offset;
 670        stolen->size = size;
 671        mutex_lock(&dev_priv->mm.stolen_lock);
 672        ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
 673        mutex_unlock(&dev_priv->mm.stolen_lock);
 674        if (ret) {
 675                DRM_DEBUG_KMS("failed to allocate stolen space\n");
 676                kfree(stolen);
 677                return NULL;
 678        }
 679
 680        obj = _i915_gem_object_create_stolen(dev_priv, stolen);
 681        if (obj == NULL) {
 682                DRM_DEBUG_KMS("failed to allocate stolen object\n");
 683                i915_gem_stolen_remove_node(dev_priv, stolen);
 684                kfree(stolen);
 685                return NULL;
 686        }
 687
 688        /* Some objects just need physical mem from stolen space */
 689        if (gtt_offset == I915_GTT_OFFSET_NONE)
 690                return obj;
 691
 692        ret = i915_gem_object_pin_pages(obj);
 693        if (ret)
 694                goto err;
 695
 696        vma = i915_vma_instance(obj, &ggtt->base, NULL);
 697        if (IS_ERR(vma)) {
 698                ret = PTR_ERR(vma);
 699                goto err_pages;
 700        }
 701
 702        /* To simplify the initialisation sequence between KMS and GTT,
 703         * we allow construction of the stolen object prior to
 704         * setting up the GTT space. The actual reservation will occur
 705         * later.
 706         */
 707        ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
 708                                   size, gtt_offset, obj->cache_level,
 709                                   0);
 710        if (ret) {
 711                DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
 712                goto err_pages;
 713        }
 714
 715        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 716
 717        vma->pages = obj->mm.pages;
 718        vma->flags |= I915_VMA_GLOBAL_BIND;
 719        __i915_vma_set_map_and_fenceable(vma);
 720        list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
 721        list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
 722        obj->bind_count++;
 723
 724        return obj;
 725
 726err_pages:
 727        i915_gem_object_unpin_pages(obj);
 728err:
 729        i915_gem_object_put(obj);
 730        return NULL;
 731}
 732