linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "ummu.h"
  25#include "vmm.h"
  26
  27#include <subdev/bar.h>
  28#include <subdev/fb.h>
  29
  30#include <nvif/if500d.h>
  31#include <nvif/if900d.h>
  32
  33struct nvkm_mmu_ptp {
  34        struct nvkm_mmu_pt *pt;
  35        struct list_head head;
  36        u8  shift;
  37        u16 mask;
  38        u16 free;
  39};
  40
  41static void
  42nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
  43{
  44        const int slot = pt->base >> pt->ptp->shift;
  45        struct nvkm_mmu_ptp *ptp = pt->ptp;
  46
  47        /* If there were no free slots in the parent allocation before,
  48         * there will be now, so return PTP to the cache.
  49         */
  50        if (!ptp->free)
  51                list_add(&ptp->head, &mmu->ptp.list);
  52        ptp->free |= BIT(slot);
  53
  54        /* If there's no more sub-allocations, destroy PTP. */
  55        if (ptp->free == ptp->mask) {
  56                nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
  57                list_del(&ptp->head);
  58                kfree(ptp);
  59        }
  60
  61        kfree(pt);
  62}
  63
  64static struct nvkm_mmu_pt *
  65nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
  66{
  67        struct nvkm_mmu_pt *pt;
  68        struct nvkm_mmu_ptp *ptp;
  69        int slot;
  70
  71        if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
  72                return NULL;
  73
  74        ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
  75        if (!ptp) {
  76                /* Need to allocate a new parent to sub-allocate from. */
  77                if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
  78                        kfree(pt);
  79                        return NULL;
  80                }
  81
  82                ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
  83                if (!ptp->pt) {
  84                        kfree(ptp);
  85                        kfree(pt);
  86                        return NULL;
  87                }
  88
  89                ptp->shift = order_base_2(size);
  90                slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
  91                ptp->mask = (1 << slot) - 1;
  92                ptp->free = ptp->mask;
  93                list_add(&ptp->head, &mmu->ptp.list);
  94        }
  95        pt->ptp = ptp;
  96        pt->sub = true;
  97
  98        /* Sub-allocate from parent object, removing PTP from cache
  99         * if there's no more free slots left.
 100         */
 101        slot = __ffs(ptp->free);
 102        ptp->free &= ~BIT(slot);
 103        if (!ptp->free)
 104                list_del(&ptp->head);
 105
 106        pt->memory = pt->ptp->pt->memory;
 107        pt->base = slot << ptp->shift;
 108        pt->addr = pt->ptp->pt->addr + pt->base;
 109        return pt;
 110}
 111
 112struct nvkm_mmu_ptc {
 113        struct list_head head;
 114        struct list_head item;
 115        u32 size;
 116        u32 refs;
 117};
 118
 119static inline struct nvkm_mmu_ptc *
 120nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
 121{
 122        struct nvkm_mmu_ptc *ptc;
 123
 124        list_for_each_entry(ptc, &mmu->ptc.list, head) {
 125                if (ptc->size == size)
 126                        return ptc;
 127        }
 128
 129        ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
 130        if (ptc) {
 131                INIT_LIST_HEAD(&ptc->item);
 132                ptc->size = size;
 133                ptc->refs = 0;
 134                list_add(&ptc->head, &mmu->ptc.list);
 135        }
 136
 137        return ptc;
 138}
 139
 140void
 141nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
 142{
 143        struct nvkm_mmu_pt *pt = *ppt;
 144        if (pt) {
 145                /* Handle sub-allocated page tables. */
 146                if (pt->sub) {
 147                        mutex_lock(&mmu->ptp.mutex);
 148                        nvkm_mmu_ptp_put(mmu, force, pt);
 149                        mutex_unlock(&mmu->ptp.mutex);
 150                        return;
 151                }
 152
 153                /* Either cache or free the object. */
 154                mutex_lock(&mmu->ptc.mutex);
 155                if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
 156                        list_add_tail(&pt->head, &pt->ptc->item);
 157                        pt->ptc->refs++;
 158                } else {
 159                        nvkm_memory_unref(&pt->memory);
 160                        kfree(pt);
 161                }
 162                mutex_unlock(&mmu->ptc.mutex);
 163        }
 164}
 165
 166struct nvkm_mmu_pt *
 167nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
 168{
 169        struct nvkm_mmu_ptc *ptc;
 170        struct nvkm_mmu_pt *pt;
 171        int ret;
 172
 173        /* Sub-allocated page table (ie. GP100 LPT). */
 174        if (align < 0x1000) {
 175                mutex_lock(&mmu->ptp.mutex);
 176                pt = nvkm_mmu_ptp_get(mmu, align, zero);
 177                mutex_unlock(&mmu->ptp.mutex);
 178                return pt;
 179        }
 180
 181        /* Lookup cache for this page table size. */
 182        mutex_lock(&mmu->ptc.mutex);
 183        ptc = nvkm_mmu_ptc_find(mmu, size);
 184        if (!ptc) {
 185                mutex_unlock(&mmu->ptc.mutex);
 186                return NULL;
 187        }
 188
 189        /* If there's a free PT in the cache, reuse it. */
 190        pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
 191        if (pt) {
 192                if (zero)
 193                        nvkm_fo64(pt->memory, 0, 0, size >> 3);
 194                list_del(&pt->head);
 195                ptc->refs--;
 196                mutex_unlock(&mmu->ptc.mutex);
 197                return pt;
 198        }
 199        mutex_unlock(&mmu->ptc.mutex);
 200
 201        /* No such luck, we need to allocate. */
 202        if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
 203                return NULL;
 204        pt->ptc = ptc;
 205        pt->sub = false;
 206
 207        ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
 208                              size, align, zero, &pt->memory);
 209        if (ret) {
 210                kfree(pt);
 211                return NULL;
 212        }
 213
 214        pt->base = 0;
 215        pt->addr = nvkm_memory_addr(pt->memory);
 216        return pt;
 217}
 218
 219void
 220nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
 221{
 222        struct nvkm_mmu_ptc *ptc;
 223        list_for_each_entry(ptc, &mmu->ptc.list, head) {
 224                struct nvkm_mmu_pt *pt, *tt;
 225                list_for_each_entry_safe(pt, tt, &ptc->item, head) {
 226                        nvkm_memory_unref(&pt->memory);
 227                        list_del(&pt->head);
 228                        kfree(pt);
 229                }
 230        }
 231}
 232
 233static void
 234nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
 235{
 236        struct nvkm_mmu_ptc *ptc, *ptct;
 237
 238        list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
 239                WARN_ON(!list_empty(&ptc->item));
 240                list_del(&ptc->head);
 241                kfree(ptc);
 242        }
 243}
 244
 245static void
 246nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
 247{
 248        mutex_init(&mmu->ptc.mutex);
 249        INIT_LIST_HEAD(&mmu->ptc.list);
 250        mutex_init(&mmu->ptp.mutex);
 251        INIT_LIST_HEAD(&mmu->ptp.list);
 252}
 253
 254static void
 255nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
 256{
 257        if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
 258                mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
 259                mmu->type[mmu->type_nr].heap = heap;
 260                mmu->type_nr++;
 261        }
 262}
 263
 264static int
 265nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
 266{
 267        if (size) {
 268                if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
 269                        mmu->heap[mmu->heap_nr].type = type;
 270                        mmu->heap[mmu->heap_nr].size = size;
 271                        return mmu->heap_nr++;
 272                }
 273        }
 274        return -EINVAL;
 275}
 276
 277static void
 278nvkm_mmu_host(struct nvkm_mmu *mmu)
 279{
 280        struct nvkm_device *device = mmu->subdev.device;
 281        u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
 282        int heap;
 283
 284        /* Non-mappable system memory. */
 285        heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
 286        nvkm_mmu_type(mmu, heap, type);
 287
 288        /* Non-coherent, cached, system memory.
 289         *
 290         * Block-linear mappings of system memory must be done through
 291         * BAR1, and cannot be supported on systems where we're unable
 292         * to map BAR1 with write-combining.
 293         */
 294        type |= NVKM_MEM_MAPPABLE;
 295        if (!device->bar || device->bar->iomap_uncached)
 296                nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
 297        else
 298                nvkm_mmu_type(mmu, heap, type);
 299
 300        /* Coherent, cached, system memory.
 301         *
 302         * Unsupported on systems that aren't able to support snooped
 303         * mappings, and also for block-linear mappings which must be
 304         * done through BAR1.
 305         */
 306        type |= NVKM_MEM_COHERENT;
 307        if (device->func->cpu_coherent)
 308                nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
 309
 310        /* Uncached system memory. */
 311        nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
 312}
 313
 314static void
 315nvkm_mmu_vram(struct nvkm_mmu *mmu)
 316{
 317        struct nvkm_device *device = mmu->subdev.device;
 318        struct nvkm_mm *mm = &device->fb->ram->vram;
 319        const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
 320        const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
 321        const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
 322        u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
 323        u8 heap = NVKM_MEM_VRAM;
 324        int heapM, heapN, heapU;
 325
 326        /* Mixed-memory doesn't support compression or display. */
 327        heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
 328
 329        heap |= NVKM_MEM_COMP;
 330        heap |= NVKM_MEM_DISP;
 331        heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
 332        heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
 333
 334        /* Add non-mappable VRAM types first so that they're preferred
 335         * over anything else.  Mixed-memory will be slower than other
 336         * heaps, it's prioritised last.
 337         */
 338        nvkm_mmu_type(mmu, heapU, type);
 339        nvkm_mmu_type(mmu, heapN, type);
 340        nvkm_mmu_type(mmu, heapM, type);
 341
 342        /* Add host memory types next, under the assumption that users
 343         * wanting mappable memory want to use them as staging buffers
 344         * or the like.
 345         */
 346        nvkm_mmu_host(mmu);
 347
 348        /* Mappable VRAM types go last, as they're basically the worst
 349         * possible type to ask for unless there's no other choice.
 350         */
 351        if (device->bar) {
 352                /* Write-combined BAR1 access. */
 353                type |= NVKM_MEM_MAPPABLE;
 354                if (!device->bar->iomap_uncached) {
 355                        nvkm_mmu_type(mmu, heapN, type);
 356                        nvkm_mmu_type(mmu, heapM, type);
 357                }
 358
 359                /* Uncached BAR1 access. */
 360                type |= NVKM_MEM_COHERENT;
 361                type |= NVKM_MEM_UNCACHED;
 362                nvkm_mmu_type(mmu, heapN, type);
 363                nvkm_mmu_type(mmu, heapM, type);
 364        }
 365}
 366
 367static int
 368nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
 369{
 370        struct nvkm_mmu *mmu = nvkm_mmu(subdev);
 371
 372        /* Determine available memory types. */
 373        if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
 374                nvkm_mmu_vram(mmu);
 375        else
 376                nvkm_mmu_host(mmu);
 377
 378        if (mmu->func->vmm.global) {
 379                int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
 380                                       "gart", &mmu->vmm);
 381                if (ret)
 382                        return ret;
 383        }
 384
 385        return 0;
 386}
 387
 388static int
 389nvkm_mmu_init(struct nvkm_subdev *subdev)
 390{
 391        struct nvkm_mmu *mmu = nvkm_mmu(subdev);
 392        if (mmu->func->init)
 393                mmu->func->init(mmu);
 394        return 0;
 395}
 396
 397static void *
 398nvkm_mmu_dtor(struct nvkm_subdev *subdev)
 399{
 400        struct nvkm_mmu *mmu = nvkm_mmu(subdev);
 401
 402        nvkm_vmm_unref(&mmu->vmm);
 403
 404        nvkm_mmu_ptc_fini(mmu);
 405        mutex_destroy(&mmu->mutex);
 406        return mmu;
 407}
 408
 409static const struct nvkm_subdev_func
 410nvkm_mmu = {
 411        .dtor = nvkm_mmu_dtor,
 412        .oneinit = nvkm_mmu_oneinit,
 413        .init = nvkm_mmu_init,
 414};
 415
 416void
 417nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
 418              enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu)
 419{
 420        nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev);
 421        mmu->func = func;
 422        mmu->dma_bits = func->dma_bits;
 423        nvkm_mmu_ptc_init(mmu);
 424        mutex_init(&mmu->mutex);
 425        mmu->user.ctor = nvkm_ummu_new;
 426        mmu->user.base = func->mmu.user;
 427}
 428
 429int
 430nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
 431              enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu)
 432{
 433        if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
 434                return -ENOMEM;
 435        nvkm_mmu_ctor(func, device, type, inst, *pmmu);
 436        return 0;
 437}
 438