linux/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
  25#include <core/gpuobj.h>
  26#include <core/mm.h>
  27
  28#include <subdev/fb.h>
  29#include <subdev/vm.h>
  30
  31void
  32nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
  33{
  34        struct nouveau_vm *vm = vma->vm;
  35        struct nouveau_vmmgr *vmm = vm->vmm;
  36        struct nouveau_mm_node *r;
  37        int big = vma->node->type != vmm->spg_shift;
  38        u32 offset = vma->node->offset + (delta >> 12);
  39        u32 bits = vma->node->type - 12;
  40        u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
  41        u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
  42        u32 max  = 1 << (vmm->pgt_bits - bits);
  43        u32 end, len;
  44
  45        delta = 0;
  46        list_for_each_entry(r, &node->regions, rl_entry) {
  47                u64 phys = (u64)r->offset << 12;
  48                u32 num  = r->length >> bits;
  49
  50                while (num) {
  51                        struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
  52
  53                        end = (pte + num);
  54                        if (unlikely(end >= max))
  55                                end = max;
  56                        len = end - pte;
  57
  58                        vmm->map(vma, pgt, node, pte, len, phys, delta);
  59
  60                        num -= len;
  61                        pte += len;
  62                        if (unlikely(end >= max)) {
  63                                phys += len << (bits + 12);
  64                                pde++;
  65                                pte = 0;
  66                        }
  67
  68                        delta += (u64)len << vma->node->type;
  69                }
  70        }
  71
  72        vmm->flush(vm);
  73}
  74
  75static void
  76nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
  77                        struct nouveau_mem *mem)
  78{
  79        struct nouveau_vm *vm = vma->vm;
  80        struct nouveau_vmmgr *vmm = vm->vmm;
  81        int big = vma->node->type != vmm->spg_shift;
  82        u32 offset = vma->node->offset + (delta >> 12);
  83        u32 bits = vma->node->type - 12;
  84        u32 num  = length >> vma->node->type;
  85        u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
  86        u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
  87        u32 max  = 1 << (vmm->pgt_bits - bits);
  88        unsigned m, sglen;
  89        u32 end, len;
  90        int i;
  91        struct scatterlist *sg;
  92
  93        for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
  94                struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
  95                sglen = sg_dma_len(sg) >> PAGE_SHIFT;
  96
  97                end = pte + sglen;
  98                if (unlikely(end >= max))
  99                        end = max;
 100                len = end - pte;
 101
 102                for (m = 0; m < len; m++) {
 103                        dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 104
 105                        vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
 106                        num--;
 107                        pte++;
 108
 109                        if (num == 0)
 110                                goto finish;
 111                }
 112                if (unlikely(end >= max)) {
 113                        pde++;
 114                        pte = 0;
 115                }
 116                if (m < sglen) {
 117                        for (; m < sglen; m++) {
 118                                dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 119
 120                                vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
 121                                num--;
 122                                pte++;
 123                                if (num == 0)
 124                                        goto finish;
 125                        }
 126                }
 127
 128        }
 129finish:
 130        vmm->flush(vm);
 131}
 132
 133static void
 134nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
 135                  struct nouveau_mem *mem)
 136{
 137        struct nouveau_vm *vm = vma->vm;
 138        struct nouveau_vmmgr *vmm = vm->vmm;
 139        dma_addr_t *list = mem->pages;
 140        int big = vma->node->type != vmm->spg_shift;
 141        u32 offset = vma->node->offset + (delta >> 12);
 142        u32 bits = vma->node->type - 12;
 143        u32 num  = length >> vma->node->type;
 144        u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
 145        u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
 146        u32 max  = 1 << (vmm->pgt_bits - bits);
 147        u32 end, len;
 148
 149        while (num) {
 150                struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
 151
 152                end = (pte + num);
 153                if (unlikely(end >= max))
 154                        end = max;
 155                len = end - pte;
 156
 157                vmm->map_sg(vma, pgt, mem, pte, len, list);
 158
 159                num  -= len;
 160                pte  += len;
 161                list += len;
 162                if (unlikely(end >= max)) {
 163                        pde++;
 164                        pte = 0;
 165                }
 166        }
 167
 168        vmm->flush(vm);
 169}
 170
 171void
 172nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
 173{
 174        if (node->sg)
 175                nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
 176        else
 177        if (node->pages)
 178                nouveau_vm_map_sg(vma, 0, node->size << 12, node);
 179        else
 180                nouveau_vm_map_at(vma, 0, node);
 181}
 182
 183void
 184nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
 185{
 186        struct nouveau_vm *vm = vma->vm;
 187        struct nouveau_vmmgr *vmm = vm->vmm;
 188        int big = vma->node->type != vmm->spg_shift;
 189        u32 offset = vma->node->offset + (delta >> 12);
 190        u32 bits = vma->node->type - 12;
 191        u32 num  = length >> vma->node->type;
 192        u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
 193        u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
 194        u32 max  = 1 << (vmm->pgt_bits - bits);
 195        u32 end, len;
 196
 197        while (num) {
 198                struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
 199
 200                end = (pte + num);
 201                if (unlikely(end >= max))
 202                        end = max;
 203                len = end - pte;
 204
 205                vmm->unmap(pgt, pte, len);
 206
 207                num -= len;
 208                pte += len;
 209                if (unlikely(end >= max)) {
 210                        pde++;
 211                        pte = 0;
 212                }
 213        }
 214
 215        vmm->flush(vm);
 216}
 217
 218void
 219nouveau_vm_unmap(struct nouveau_vma *vma)
 220{
 221        nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
 222}
 223
 224static void
 225nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
 226{
 227        struct nouveau_vmmgr *vmm = vm->vmm;
 228        struct nouveau_vm_pgd *vpgd;
 229        struct nouveau_vm_pgt *vpgt;
 230        struct nouveau_gpuobj *pgt;
 231        u32 pde;
 232
 233        for (pde = fpde; pde <= lpde; pde++) {
 234                vpgt = &vm->pgt[pde - vm->fpde];
 235                if (--vpgt->refcount[big])
 236                        continue;
 237
 238                pgt = vpgt->obj[big];
 239                vpgt->obj[big] = NULL;
 240
 241                list_for_each_entry(vpgd, &vm->pgd_list, head) {
 242                        vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
 243                }
 244
 245                mutex_unlock(&nv_subdev(vmm)->mutex);
 246                nouveau_gpuobj_ref(NULL, &pgt);
 247                mutex_lock(&nv_subdev(vmm)->mutex);
 248        }
 249}
 250
 251static int
 252nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
 253{
 254        struct nouveau_vmmgr *vmm = vm->vmm;
 255        struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
 256        struct nouveau_vm_pgd *vpgd;
 257        struct nouveau_gpuobj *pgt;
 258        int big = (type != vmm->spg_shift);
 259        u32 pgt_size;
 260        int ret;
 261
 262        pgt_size  = (1 << (vmm->pgt_bits + 12)) >> type;
 263        pgt_size *= 8;
 264
 265        mutex_unlock(&nv_subdev(vmm)->mutex);
 266        ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
 267                                 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
 268        mutex_lock(&nv_subdev(vmm)->mutex);
 269        if (unlikely(ret))
 270                return ret;
 271
 272        /* someone beat us to filling the PDE while we didn't have the lock */
 273        if (unlikely(vpgt->refcount[big]++)) {
 274                mutex_unlock(&nv_subdev(vmm)->mutex);
 275                nouveau_gpuobj_ref(NULL, &pgt);
 276                mutex_lock(&nv_subdev(vmm)->mutex);
 277                return 0;
 278        }
 279
 280        vpgt->obj[big] = pgt;
 281        list_for_each_entry(vpgd, &vm->pgd_list, head) {
 282                vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
 283        }
 284
 285        return 0;
 286}
 287
 288int
 289nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
 290               u32 access, struct nouveau_vma *vma)
 291{
 292        struct nouveau_vmmgr *vmm = vm->vmm;
 293        u32 align = (1 << page_shift) >> 12;
 294        u32 msize = size >> 12;
 295        u32 fpde, lpde, pde;
 296        int ret;
 297
 298        mutex_lock(&nv_subdev(vmm)->mutex);
 299        ret = nouveau_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
 300                             &vma->node);
 301        if (unlikely(ret != 0)) {
 302                mutex_unlock(&nv_subdev(vmm)->mutex);
 303                return ret;
 304        }
 305
 306        fpde = (vma->node->offset >> vmm->pgt_bits);
 307        lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
 308
 309        for (pde = fpde; pde <= lpde; pde++) {
 310                struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
 311                int big = (vma->node->type != vmm->spg_shift);
 312
 313                if (likely(vpgt->refcount[big])) {
 314                        vpgt->refcount[big]++;
 315                        continue;
 316                }
 317
 318                ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
 319                if (ret) {
 320                        if (pde != fpde)
 321                                nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
 322                        nouveau_mm_free(&vm->mm, &vma->node);
 323                        mutex_unlock(&nv_subdev(vmm)->mutex);
 324                        return ret;
 325                }
 326        }
 327        mutex_unlock(&nv_subdev(vmm)->mutex);
 328
 329        vma->vm = NULL;
 330        nouveau_vm_ref(vm, &vma->vm, NULL);
 331        vma->offset = (u64)vma->node->offset << 12;
 332        vma->access = access;
 333        return 0;
 334}
 335
 336void
 337nouveau_vm_put(struct nouveau_vma *vma)
 338{
 339        struct nouveau_vm *vm = vma->vm;
 340        struct nouveau_vmmgr *vmm = vm->vmm;
 341        u32 fpde, lpde;
 342
 343        if (unlikely(vma->node == NULL))
 344                return;
 345        fpde = (vma->node->offset >> vmm->pgt_bits);
 346        lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
 347
 348        mutex_lock(&nv_subdev(vmm)->mutex);
 349        nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
 350        nouveau_mm_free(&vm->mm, &vma->node);
 351        mutex_unlock(&nv_subdev(vmm)->mutex);
 352
 353        nouveau_vm_ref(NULL, &vma->vm, NULL);
 354}
 355
 356int
 357nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
 358                  u64 mm_offset, u32 block, struct nouveau_vm **pvm)
 359{
 360        struct nouveau_vm *vm;
 361        u64 mm_length = (offset + length) - mm_offset;
 362        int ret;
 363
 364        vm = kzalloc(sizeof(*vm), GFP_KERNEL);
 365        if (!vm)
 366                return -ENOMEM;
 367
 368        INIT_LIST_HEAD(&vm->pgd_list);
 369        vm->vmm = vmm;
 370        kref_init(&vm->refcount);
 371        vm->fpde = offset >> (vmm->pgt_bits + 12);
 372        vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
 373
 374        vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
 375        if (!vm->pgt) {
 376                kfree(vm);
 377                return -ENOMEM;
 378        }
 379
 380        ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
 381                              block >> 12);
 382        if (ret) {
 383                vfree(vm->pgt);
 384                kfree(vm);
 385                return ret;
 386        }
 387
 388        *pvm = vm;
 389
 390        return 0;
 391}
 392
 393int
 394nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
 395               u64 mm_offset, struct nouveau_vm **pvm)
 396{
 397        struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
 398        return vmm->create(vmm, offset, length, mm_offset, pvm);
 399}
 400
 401static int
 402nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
 403{
 404        struct nouveau_vmmgr *vmm = vm->vmm;
 405        struct nouveau_vm_pgd *vpgd;
 406        int i;
 407
 408        if (!pgd)
 409                return 0;
 410
 411        vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
 412        if (!vpgd)
 413                return -ENOMEM;
 414
 415        nouveau_gpuobj_ref(pgd, &vpgd->obj);
 416
 417        mutex_lock(&nv_subdev(vmm)->mutex);
 418        for (i = vm->fpde; i <= vm->lpde; i++)
 419                vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
 420        list_add(&vpgd->head, &vm->pgd_list);
 421        mutex_unlock(&nv_subdev(vmm)->mutex);
 422        return 0;
 423}
 424
 425static void
 426nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
 427{
 428        struct nouveau_vmmgr *vmm = vm->vmm;
 429        struct nouveau_vm_pgd *vpgd, *tmp;
 430        struct nouveau_gpuobj *pgd = NULL;
 431
 432        if (!mpgd)
 433                return;
 434
 435        mutex_lock(&nv_subdev(vmm)->mutex);
 436        list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
 437                if (vpgd->obj == mpgd) {
 438                        pgd = vpgd->obj;
 439                        list_del(&vpgd->head);
 440                        kfree(vpgd);
 441                        break;
 442                }
 443        }
 444        mutex_unlock(&nv_subdev(vmm)->mutex);
 445
 446        nouveau_gpuobj_ref(NULL, &pgd);
 447}
 448
 449static void
 450nouveau_vm_del(struct kref *kref)
 451{
 452        struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
 453        struct nouveau_vm_pgd *vpgd, *tmp;
 454
 455        list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
 456                nouveau_vm_unlink(vm, vpgd->obj);
 457        }
 458
 459        nouveau_mm_fini(&vm->mm);
 460        vfree(vm->pgt);
 461        kfree(vm);
 462}
 463
 464int
 465nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
 466               struct nouveau_gpuobj *pgd)
 467{
 468        if (ref) {
 469                int ret = nouveau_vm_link(ref, pgd);
 470                if (ret)
 471                        return ret;
 472
 473                kref_get(&ref->refcount);
 474        }
 475
 476        if (*ptr) {
 477                nouveau_vm_unlink(*ptr, pgd);
 478                kref_put(&(*ptr)->refcount, nouveau_vm_del);
 479        }
 480
 481        *ptr = ref;
 482        return 0;
 483}
 484