linux/drivers/gpu/drm/nouveau/nouveau_svm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2018 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "nouveau_svm.h"
  23#include "nouveau_drv.h"
  24#include "nouveau_chan.h"
  25#include "nouveau_dmem.h"
  26
  27#include <nvif/notify.h>
  28#include <nvif/object.h>
  29#include <nvif/vmm.h>
  30
  31#include <nvif/class.h>
  32#include <nvif/clb069.h>
  33#include <nvif/ifc00d.h>
  34
  35#include <linux/sched/mm.h>
  36#include <linux/sort.h>
  37#include <linux/hmm.h>
  38
  39struct nouveau_svm {
  40        struct nouveau_drm *drm;
  41        struct mutex mutex;
  42        struct list_head inst;
  43
  44        struct nouveau_svm_fault_buffer {
  45                int id;
  46                struct nvif_object object;
  47                u32 entries;
  48                u32 getaddr;
  49                u32 putaddr;
  50                u32 get;
  51                u32 put;
  52                struct nvif_notify notify;
  53
  54                struct nouveau_svm_fault {
  55                        u64 inst;
  56                        u64 addr;
  57                        u64 time;
  58                        u32 engine;
  59                        u8  gpc;
  60                        u8  hub;
  61                        u8  access;
  62                        u8  client;
  63                        u8  fault;
  64                        struct nouveau_svmm *svmm;
  65                } **fault;
  66                int fault_nr;
  67        } buffer[1];
  68};
  69
  70#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
  71#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
  72
  73struct nouveau_pfnmap_args {
  74        struct nvif_ioctl_v0 i;
  75        struct nvif_ioctl_mthd_v0 m;
  76        struct nvif_vmm_pfnmap_v0 p;
  77};
  78
  79struct nouveau_ivmm {
  80        struct nouveau_svmm *svmm;
  81        u64 inst;
  82        struct list_head head;
  83};
  84
  85static struct nouveau_ivmm *
  86nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
  87{
  88        struct nouveau_ivmm *ivmm;
  89        list_for_each_entry(ivmm, &svm->inst, head) {
  90                if (ivmm->inst == inst)
  91                        return ivmm;
  92        }
  93        return NULL;
  94}
  95
  96#define SVMM_DBG(s,f,a...)                                                     \
  97        NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
  98#define SVMM_ERR(s,f,a...)                                                     \
  99        NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
 100
 101int
 102nouveau_svmm_bind(struct drm_device *dev, void *data,
 103                  struct drm_file *file_priv)
 104{
 105        struct nouveau_cli *cli = nouveau_cli(file_priv);
 106        struct drm_nouveau_svm_bind *args = data;
 107        unsigned target, cmd, priority;
 108        unsigned long addr, end;
 109        struct mm_struct *mm;
 110
 111        args->va_start &= PAGE_MASK;
 112        args->va_end = ALIGN(args->va_end, PAGE_SIZE);
 113
 114        /* Sanity check arguments */
 115        if (args->reserved0 || args->reserved1)
 116                return -EINVAL;
 117        if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
 118                return -EINVAL;
 119        if (args->va_start >= args->va_end)
 120                return -EINVAL;
 121
 122        cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
 123        cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
 124        switch (cmd) {
 125        case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
 126                break;
 127        default:
 128                return -EINVAL;
 129        }
 130
 131        priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
 132        priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
 133
 134        /* FIXME support CPU target ie all target value < GPU_VRAM */
 135        target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
 136        target &= NOUVEAU_SVM_BIND_TARGET_MASK;
 137        switch (target) {
 138        case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
 139                break;
 140        default:
 141                return -EINVAL;
 142        }
 143
 144        /*
 145         * FIXME: For now refuse non 0 stride, we need to change the migrate
 146         * kernel function to handle stride to avoid to create a mess within
 147         * each device driver.
 148         */
 149        if (args->stride)
 150                return -EINVAL;
 151
 152        /*
 153         * Ok we are ask to do something sane, for now we only support migrate
 154         * commands but we will add things like memory policy (what to do on
 155         * page fault) and maybe some other commands.
 156         */
 157
 158        mm = get_task_mm(current);
 159        mmap_read_lock(mm);
 160
 161        if (!cli->svm.svmm) {
 162                mmap_read_unlock(mm);
 163                return -EINVAL;
 164        }
 165
 166        for (addr = args->va_start, end = args->va_end; addr < end;) {
 167                struct vm_area_struct *vma;
 168                unsigned long next;
 169
 170                vma = find_vma_intersection(mm, addr, end);
 171                if (!vma)
 172                        break;
 173
 174                addr = max(addr, vma->vm_start);
 175                next = min(vma->vm_end, end);
 176                /* This is a best effort so we ignore errors */
 177                nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
 178                                         next);
 179                addr = next;
 180        }
 181
 182        /*
 183         * FIXME Return the number of page we have migrated, again we need to
 184         * update the migrate API to return that information so that we can
 185         * report it to user space.
 186         */
 187        args->result = 0;
 188
 189        mmap_read_unlock(mm);
 190        mmput(mm);
 191
 192        return 0;
 193}
 194
 195/* Unlink channel instance from SVMM. */
 196void
 197nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
 198{
 199        struct nouveau_ivmm *ivmm;
 200        if (svmm) {
 201                mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
 202                ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
 203                if (ivmm) {
 204                        list_del(&ivmm->head);
 205                        kfree(ivmm);
 206                }
 207                mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
 208        }
 209}
 210
 211/* Link channel instance to SVMM. */
 212int
 213nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
 214{
 215        struct nouveau_ivmm *ivmm;
 216        if (svmm) {
 217                if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
 218                        return -ENOMEM;
 219                ivmm->svmm = svmm;
 220                ivmm->inst = inst;
 221
 222                mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
 223                list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
 224                mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
 225        }
 226        return 0;
 227}
 228
 229/* Invalidate SVMM address-range on GPU. */
 230void
 231nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
 232{
 233        if (limit > start) {
 234                bool super = svmm->vmm->vmm.object.client->super;
 235                svmm->vmm->vmm.object.client->super = true;
 236                nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
 237                                 &(struct nvif_vmm_pfnclr_v0) {
 238                                        .addr = start,
 239                                        .size = limit - start,
 240                                 }, sizeof(struct nvif_vmm_pfnclr_v0));
 241                svmm->vmm->vmm.object.client->super = super;
 242        }
 243}
 244
 245static int
 246nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
 247                                    const struct mmu_notifier_range *update)
 248{
 249        struct nouveau_svmm *svmm =
 250                container_of(mn, struct nouveau_svmm, notifier);
 251        unsigned long start = update->start;
 252        unsigned long limit = update->end;
 253
 254        if (!mmu_notifier_range_blockable(update))
 255                return -EAGAIN;
 256
 257        SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
 258
 259        mutex_lock(&svmm->mutex);
 260        if (unlikely(!svmm->vmm))
 261                goto out;
 262
 263        /*
 264         * Ignore invalidation callbacks for device private pages since
 265         * the invalidation is handled as part of the migration process.
 266         */
 267        if (update->event == MMU_NOTIFY_MIGRATE &&
 268            update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
 269                goto out;
 270
 271        if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
 272                if (start < svmm->unmanaged.start) {
 273                        nouveau_svmm_invalidate(svmm, start,
 274                                                svmm->unmanaged.limit);
 275                }
 276                start = svmm->unmanaged.limit;
 277        }
 278
 279        nouveau_svmm_invalidate(svmm, start, limit);
 280
 281out:
 282        mutex_unlock(&svmm->mutex);
 283        return 0;
 284}
 285
 286static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
 287{
 288        kfree(container_of(mn, struct nouveau_svmm, notifier));
 289}
 290
 291static const struct mmu_notifier_ops nouveau_mn_ops = {
 292        .invalidate_range_start = nouveau_svmm_invalidate_range_start,
 293        .free_notifier = nouveau_svmm_free_notifier,
 294};
 295
 296void
 297nouveau_svmm_fini(struct nouveau_svmm **psvmm)
 298{
 299        struct nouveau_svmm *svmm = *psvmm;
 300        if (svmm) {
 301                mutex_lock(&svmm->mutex);
 302                svmm->vmm = NULL;
 303                mutex_unlock(&svmm->mutex);
 304                mmu_notifier_put(&svmm->notifier);
 305                *psvmm = NULL;
 306        }
 307}
 308
 309int
 310nouveau_svmm_init(struct drm_device *dev, void *data,
 311                  struct drm_file *file_priv)
 312{
 313        struct nouveau_cli *cli = nouveau_cli(file_priv);
 314        struct nouveau_svmm *svmm;
 315        struct drm_nouveau_svm_init *args = data;
 316        int ret;
 317
 318        /* Allocate tracking for SVM-enabled VMM. */
 319        if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
 320                return -ENOMEM;
 321        svmm->vmm = &cli->svm;
 322        svmm->unmanaged.start = args->unmanaged_addr;
 323        svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
 324        mutex_init(&svmm->mutex);
 325
 326        /* Check that SVM isn't already enabled for the client. */
 327        mutex_lock(&cli->mutex);
 328        if (cli->svm.cli) {
 329                ret = -EBUSY;
 330                goto out_free;
 331        }
 332
 333        /* Allocate a new GPU VMM that can support SVM (managed by the
 334         * client, with replayable faults enabled).
 335         *
 336         * All future channel/memory allocations will make use of this
 337         * VMM instead of the standard one.
 338         */
 339        ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
 340                            cli->vmm.vmm.object.oclass, true,
 341                            args->unmanaged_addr, args->unmanaged_size,
 342                            &(struct gp100_vmm_v0) {
 343                                .fault_replay = true,
 344                            }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
 345        if (ret)
 346                goto out_free;
 347
 348        mmap_write_lock(current->mm);
 349        svmm->notifier.ops = &nouveau_mn_ops;
 350        ret = __mmu_notifier_register(&svmm->notifier, current->mm);
 351        if (ret)
 352                goto out_mm_unlock;
 353        /* Note, ownership of svmm transfers to mmu_notifier */
 354
 355        cli->svm.svmm = svmm;
 356        cli->svm.cli = cli;
 357        mmap_write_unlock(current->mm);
 358        mutex_unlock(&cli->mutex);
 359        return 0;
 360
 361out_mm_unlock:
 362        mmap_write_unlock(current->mm);
 363out_free:
 364        mutex_unlock(&cli->mutex);
 365        kfree(svmm);
 366        return ret;
 367}
 368
 369/* Issue fault replay for GPU to retry accesses that faulted previously. */
 370static void
 371nouveau_svm_fault_replay(struct nouveau_svm *svm)
 372{
 373        SVM_DBG(svm, "replay");
 374        WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
 375                                 GP100_VMM_VN_FAULT_REPLAY,
 376                                 &(struct gp100_vmm_fault_replay_vn) {},
 377                                 sizeof(struct gp100_vmm_fault_replay_vn)));
 378}
 379
 380/* Cancel a replayable fault that could not be handled.
 381 *
 382 * Cancelling the fault will trigger recovery to reset the engine
 383 * and kill the offending channel (ie. GPU SIGSEGV).
 384 */
 385static void
 386nouveau_svm_fault_cancel(struct nouveau_svm *svm,
 387                         u64 inst, u8 hub, u8 gpc, u8 client)
 388{
 389        SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
 390        WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
 391                                 GP100_VMM_VN_FAULT_CANCEL,
 392                                 &(struct gp100_vmm_fault_cancel_v0) {
 393                                        .hub = hub,
 394                                        .gpc = gpc,
 395                                        .client = client,
 396                                        .inst = inst,
 397                                 }, sizeof(struct gp100_vmm_fault_cancel_v0)));
 398}
 399
 400static void
 401nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
 402                               struct nouveau_svm_fault *fault)
 403{
 404        nouveau_svm_fault_cancel(svm, fault->inst,
 405                                      fault->hub,
 406                                      fault->gpc,
 407                                      fault->client);
 408}
 409
 410static int
 411nouveau_svm_fault_cmp(const void *a, const void *b)
 412{
 413        const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
 414        const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
 415        int ret;
 416        if ((ret = (s64)fa->inst - fb->inst))
 417                return ret;
 418        if ((ret = (s64)fa->addr - fb->addr))
 419                return ret;
 420        /*XXX: atomic? */
 421        return (fa->access == 0 || fa->access == 3) -
 422               (fb->access == 0 || fb->access == 3);
 423}
 424
 425static void
 426nouveau_svm_fault_cache(struct nouveau_svm *svm,
 427                        struct nouveau_svm_fault_buffer *buffer, u32 offset)
 428{
 429        struct nvif_object *memory = &buffer->object;
 430        const u32 instlo = nvif_rd32(memory, offset + 0x00);
 431        const u32 insthi = nvif_rd32(memory, offset + 0x04);
 432        const u32 addrlo = nvif_rd32(memory, offset + 0x08);
 433        const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
 434        const u32 timelo = nvif_rd32(memory, offset + 0x10);
 435        const u32 timehi = nvif_rd32(memory, offset + 0x14);
 436        const u32 engine = nvif_rd32(memory, offset + 0x18);
 437        const u32   info = nvif_rd32(memory, offset + 0x1c);
 438        const u64   inst = (u64)insthi << 32 | instlo;
 439        const u8     gpc = (info & 0x1f000000) >> 24;
 440        const u8     hub = (info & 0x00100000) >> 20;
 441        const u8  client = (info & 0x00007f00) >> 8;
 442        struct nouveau_svm_fault *fault;
 443
 444        //XXX: i think we're supposed to spin waiting */
 445        if (WARN_ON(!(info & 0x80000000)))
 446                return;
 447
 448        nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
 449
 450        if (!buffer->fault[buffer->fault_nr]) {
 451                fault = kmalloc(sizeof(*fault), GFP_KERNEL);
 452                if (WARN_ON(!fault)) {
 453                        nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
 454                        return;
 455                }
 456                buffer->fault[buffer->fault_nr] = fault;
 457        }
 458
 459        fault = buffer->fault[buffer->fault_nr++];
 460        fault->inst   = inst;
 461        fault->addr   = (u64)addrhi << 32 | addrlo;
 462        fault->time   = (u64)timehi << 32 | timelo;
 463        fault->engine = engine;
 464        fault->gpc    = gpc;
 465        fault->hub    = hub;
 466        fault->access = (info & 0x000f0000) >> 16;
 467        fault->client = client;
 468        fault->fault  = (info & 0x0000001f);
 469
 470        SVM_DBG(svm, "fault %016llx %016llx %02x",
 471                fault->inst, fault->addr, fault->access);
 472}
 473
 474struct svm_notifier {
 475        struct mmu_interval_notifier notifier;
 476        struct nouveau_svmm *svmm;
 477};
 478
 479static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
 480                                         const struct mmu_notifier_range *range,
 481                                         unsigned long cur_seq)
 482{
 483        struct svm_notifier *sn =
 484                container_of(mni, struct svm_notifier, notifier);
 485
 486        /*
 487         * serializes the update to mni->invalidate_seq done by caller and
 488         * prevents invalidation of the PTE from progressing while HW is being
 489         * programmed. This is very hacky and only works because the normal
 490         * notifier that does invalidation is always called after the range
 491         * notifier.
 492         */
 493        if (mmu_notifier_range_blockable(range))
 494                mutex_lock(&sn->svmm->mutex);
 495        else if (!mutex_trylock(&sn->svmm->mutex))
 496                return false;
 497        mmu_interval_set_seq(mni, cur_seq);
 498        mutex_unlock(&sn->svmm->mutex);
 499        return true;
 500}
 501
 502static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
 503        .invalidate = nouveau_svm_range_invalidate,
 504};
 505
 506static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
 507                                    struct hmm_range *range,
 508                                    struct nouveau_pfnmap_args *args)
 509{
 510        struct page *page;
 511
 512        /*
 513         * The address prepared here is passed through nvif_object_ioctl()
 514         * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
 515         *
 516         * This is all just encoding the internal hmm representation into a
 517         * different nouveau internal representation.
 518         */
 519        if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
 520                args->p.phys[0] = 0;
 521                return;
 522        }
 523
 524        page = hmm_pfn_to_page(range->hmm_pfns[0]);
 525        /*
 526         * Only map compound pages to the GPU if the CPU is also mapping the
 527         * page as a compound page. Otherwise, the PTE protections might not be
 528         * consistent (e.g., CPU only maps part of a compound page).
 529         * Note that the underlying page might still be larger than the
 530         * CPU mapping (e.g., a PUD sized compound page partially mapped with
 531         * a PMD sized page table entry).
 532         */
 533        if (hmm_pfn_to_map_order(range->hmm_pfns[0])) {
 534                unsigned long addr = args->p.addr;
 535
 536                args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
 537                                PAGE_SHIFT;
 538                args->p.size = 1UL << args->p.page;
 539                args->p.addr &= ~(args->p.size - 1);
 540                page -= (addr - args->p.addr) >> PAGE_SHIFT;
 541        }
 542        if (is_device_private_page(page))
 543                args->p.phys[0] = nouveau_dmem_page_addr(page) |
 544                                NVIF_VMM_PFNMAP_V0_V |
 545                                NVIF_VMM_PFNMAP_V0_VRAM;
 546        else
 547                args->p.phys[0] = page_to_phys(page) |
 548                                NVIF_VMM_PFNMAP_V0_V |
 549                                NVIF_VMM_PFNMAP_V0_HOST;
 550        if (range->hmm_pfns[0] & HMM_PFN_WRITE)
 551                args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
 552}
 553
 554static int nouveau_range_fault(struct nouveau_svmm *svmm,
 555                               struct nouveau_drm *drm,
 556                               struct nouveau_pfnmap_args *args, u32 size,
 557                               unsigned long hmm_flags,
 558                               struct svm_notifier *notifier)
 559{
 560        unsigned long timeout =
 561                jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
 562        /* Have HMM fault pages within the fault window to the GPU. */
 563        unsigned long hmm_pfns[1];
 564        struct hmm_range range = {
 565                .notifier = &notifier->notifier,
 566                .start = notifier->notifier.interval_tree.start,
 567                .end = notifier->notifier.interval_tree.last + 1,
 568                .default_flags = hmm_flags,
 569                .hmm_pfns = hmm_pfns,
 570                .dev_private_owner = drm->dev,
 571        };
 572        struct mm_struct *mm = notifier->notifier.mm;
 573        int ret;
 574
 575        while (true) {
 576                if (time_after(jiffies, timeout))
 577                        return -EBUSY;
 578
 579                range.notifier_seq = mmu_interval_read_begin(range.notifier);
 580                mmap_read_lock(mm);
 581                ret = hmm_range_fault(&range);
 582                mmap_read_unlock(mm);
 583                if (ret) {
 584                        if (ret == -EBUSY)
 585                                continue;
 586                        return ret;
 587                }
 588
 589                mutex_lock(&svmm->mutex);
 590                if (mmu_interval_read_retry(range.notifier,
 591                                            range.notifier_seq)) {
 592                        mutex_unlock(&svmm->mutex);
 593                        continue;
 594                }
 595                break;
 596        }
 597
 598        nouveau_hmm_convert_pfn(drm, &range, args);
 599
 600        svmm->vmm->vmm.object.client->super = true;
 601        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
 602        svmm->vmm->vmm.object.client->super = false;
 603        mutex_unlock(&svmm->mutex);
 604
 605        return ret;
 606}
 607
 608static int
 609nouveau_svm_fault(struct nvif_notify *notify)
 610{
 611        struct nouveau_svm_fault_buffer *buffer =
 612                container_of(notify, typeof(*buffer), notify);
 613        struct nouveau_svm *svm =
 614                container_of(buffer, typeof(*svm), buffer[buffer->id]);
 615        struct nvif_object *device = &svm->drm->client.device.object;
 616        struct nouveau_svmm *svmm;
 617        struct {
 618                struct nouveau_pfnmap_args i;
 619                u64 phys[1];
 620        } args;
 621        unsigned long hmm_flags;
 622        u64 inst, start, limit;
 623        int fi, fn;
 624        int replay = 0, ret;
 625
 626        /* Parse available fault buffer entries into a cache, and update
 627         * the GET pointer so HW can reuse the entries.
 628         */
 629        SVM_DBG(svm, "fault handler");
 630        if (buffer->get == buffer->put) {
 631                buffer->put = nvif_rd32(device, buffer->putaddr);
 632                buffer->get = nvif_rd32(device, buffer->getaddr);
 633                if (buffer->get == buffer->put)
 634                        return NVIF_NOTIFY_KEEP;
 635        }
 636        buffer->fault_nr = 0;
 637
 638        SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
 639        while (buffer->get != buffer->put) {
 640                nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
 641                if (++buffer->get == buffer->entries)
 642                        buffer->get = 0;
 643        }
 644        nvif_wr32(device, buffer->getaddr, buffer->get);
 645        SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
 646
 647        /* Sort parsed faults by instance pointer to prevent unnecessary
 648         * instance to SVMM translations, followed by address and access
 649         * type to reduce the amount of work when handling the faults.
 650         */
 651        sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
 652             nouveau_svm_fault_cmp, NULL);
 653
 654        /* Lookup SVMM structure for each unique instance pointer. */
 655        mutex_lock(&svm->mutex);
 656        for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
 657                if (!svmm || buffer->fault[fi]->inst != inst) {
 658                        struct nouveau_ivmm *ivmm =
 659                                nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
 660                        svmm = ivmm ? ivmm->svmm : NULL;
 661                        inst = buffer->fault[fi]->inst;
 662                        SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
 663                }
 664                buffer->fault[fi]->svmm = svmm;
 665        }
 666        mutex_unlock(&svm->mutex);
 667
 668        /* Process list of faults. */
 669        args.i.i.version = 0;
 670        args.i.i.type = NVIF_IOCTL_V0_MTHD;
 671        args.i.m.version = 0;
 672        args.i.m.method = NVIF_VMM_V0_PFNMAP;
 673        args.i.p.version = 0;
 674
 675        for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
 676                struct svm_notifier notifier;
 677                struct mm_struct *mm;
 678
 679                /* Cancel any faults from non-SVM channels. */
 680                if (!(svmm = buffer->fault[fi]->svmm)) {
 681                        nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
 682                        continue;
 683                }
 684                SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
 685
 686                /* We try and group handling of faults within a small
 687                 * window into a single update.
 688                 */
 689                start = buffer->fault[fi]->addr;
 690                limit = start + PAGE_SIZE;
 691                if (start < svmm->unmanaged.limit)
 692                        limit = min_t(u64, limit, svmm->unmanaged.start);
 693
 694                /*
 695                 * Prepare the GPU-side update of all pages within the
 696                 * fault window, determining required pages and access
 697                 * permissions based on pending faults.
 698                 */
 699                args.i.p.addr = start;
 700                args.i.p.page = PAGE_SHIFT;
 701                args.i.p.size = PAGE_SIZE;
 702                /*
 703                 * Determine required permissions based on GPU fault
 704                 * access flags.
 705                 * XXX: atomic?
 706                 */
 707                switch (buffer->fault[fi]->access) {
 708                case 0: /* READ. */
 709                        hmm_flags = HMM_PFN_REQ_FAULT;
 710                        break;
 711                case 3: /* PREFETCH. */
 712                        hmm_flags = 0;
 713                        break;
 714                default:
 715                        hmm_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
 716                        break;
 717                }
 718
 719                mm = svmm->notifier.mm;
 720                if (!mmget_not_zero(mm)) {
 721                        nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
 722                        continue;
 723                }
 724
 725                notifier.svmm = svmm;
 726                ret = mmu_interval_notifier_insert(&notifier.notifier, mm,
 727                                                   args.i.p.addr, args.i.p.size,
 728                                                   &nouveau_svm_mni_ops);
 729                if (!ret) {
 730                        ret = nouveau_range_fault(svmm, svm->drm, &args.i,
 731                                sizeof(args), hmm_flags, &notifier);
 732                        mmu_interval_notifier_remove(&notifier.notifier);
 733                }
 734                mmput(mm);
 735
 736                limit = args.i.p.addr + args.i.p.size;
 737                for (fn = fi; ++fn < buffer->fault_nr; ) {
 738                        /* It's okay to skip over duplicate addresses from the
 739                         * same SVMM as faults are ordered by access type such
 740                         * that only the first one needs to be handled.
 741                         *
 742                         * ie. WRITE faults appear first, thus any handling of
 743                         * pending READ faults will already be satisfied.
 744                         * But if a large page is mapped, make sure subsequent
 745                         * fault addresses have sufficient access permission.
 746                         */
 747                        if (buffer->fault[fn]->svmm != svmm ||
 748                            buffer->fault[fn]->addr >= limit ||
 749                            (buffer->fault[fi]->access == 0 /* READ. */ &&
 750                             !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
 751                            (buffer->fault[fi]->access != 0 /* READ. */ &&
 752                             buffer->fault[fi]->access != 3 /* PREFETCH. */ &&
 753                             !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)))
 754                                break;
 755                }
 756
 757                /* If handling failed completely, cancel all faults. */
 758                if (ret) {
 759                        while (fi < fn) {
 760                                struct nouveau_svm_fault *fault =
 761                                        buffer->fault[fi++];
 762
 763                                nouveau_svm_fault_cancel_fault(svm, fault);
 764                        }
 765                } else
 766                        replay++;
 767        }
 768
 769        /* Issue fault replay to the GPU. */
 770        if (replay)
 771                nouveau_svm_fault_replay(svm);
 772        return NVIF_NOTIFY_KEEP;
 773}
 774
 775static struct nouveau_pfnmap_args *
 776nouveau_pfns_to_args(void *pfns)
 777{
 778        return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
 779}
 780
 781u64 *
 782nouveau_pfns_alloc(unsigned long npages)
 783{
 784        struct nouveau_pfnmap_args *args;
 785
 786        args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
 787        if (!args)
 788                return NULL;
 789
 790        args->i.type = NVIF_IOCTL_V0_MTHD;
 791        args->m.method = NVIF_VMM_V0_PFNMAP;
 792        args->p.page = PAGE_SHIFT;
 793
 794        return args->p.phys;
 795}
 796
 797void
 798nouveau_pfns_free(u64 *pfns)
 799{
 800        struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
 801
 802        kfree(args);
 803}
 804
 805void
 806nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
 807                 unsigned long addr, u64 *pfns, unsigned long npages)
 808{
 809        struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
 810        int ret;
 811
 812        args->p.addr = addr;
 813        args->p.size = npages << PAGE_SHIFT;
 814
 815        mutex_lock(&svmm->mutex);
 816
 817        svmm->vmm->vmm.object.client->super = true;
 818        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
 819                                npages * sizeof(args->p.phys[0]), NULL);
 820        svmm->vmm->vmm.object.client->super = false;
 821
 822        mutex_unlock(&svmm->mutex);
 823}
 824
 825static void
 826nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
 827{
 828        struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
 829        nvif_notify_put(&buffer->notify);
 830}
 831
 832static int
 833nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
 834{
 835        struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
 836        struct nvif_object *device = &svm->drm->client.device.object;
 837        buffer->get = nvif_rd32(device, buffer->getaddr);
 838        buffer->put = nvif_rd32(device, buffer->putaddr);
 839        SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
 840        return nvif_notify_get(&buffer->notify);
 841}
 842
 843static void
 844nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
 845{
 846        struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
 847        int i;
 848
 849        if (buffer->fault) {
 850                for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
 851                        kfree(buffer->fault[i]);
 852                kvfree(buffer->fault);
 853        }
 854
 855        nouveau_svm_fault_buffer_fini(svm, id);
 856
 857        nvif_notify_dtor(&buffer->notify);
 858        nvif_object_dtor(&buffer->object);
 859}
 860
 861static int
 862nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
 863{
 864        struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
 865        struct nouveau_drm *drm = svm->drm;
 866        struct nvif_object *device = &drm->client.device.object;
 867        struct nvif_clb069_v0 args = {};
 868        int ret;
 869
 870        buffer->id = id;
 871
 872        ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
 873                               sizeof(args), &buffer->object);
 874        if (ret < 0) {
 875                SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
 876                return ret;
 877        }
 878
 879        nvif_object_map(&buffer->object, NULL, 0);
 880        buffer->entries = args.entries;
 881        buffer->getaddr = args.get;
 882        buffer->putaddr = args.put;
 883
 884        ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
 885                               true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
 886                               &buffer->notify);
 887        if (ret)
 888                return ret;
 889
 890        buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
 891        if (!buffer->fault)
 892                return -ENOMEM;
 893
 894        return nouveau_svm_fault_buffer_init(svm, id);
 895}
 896
 897void
 898nouveau_svm_resume(struct nouveau_drm *drm)
 899{
 900        struct nouveau_svm *svm = drm->svm;
 901        if (svm)
 902                nouveau_svm_fault_buffer_init(svm, 0);
 903}
 904
 905void
 906nouveau_svm_suspend(struct nouveau_drm *drm)
 907{
 908        struct nouveau_svm *svm = drm->svm;
 909        if (svm)
 910                nouveau_svm_fault_buffer_fini(svm, 0);
 911}
 912
 913void
 914nouveau_svm_fini(struct nouveau_drm *drm)
 915{
 916        struct nouveau_svm *svm = drm->svm;
 917        if (svm) {
 918                nouveau_svm_fault_buffer_dtor(svm, 0);
 919                kfree(drm->svm);
 920                drm->svm = NULL;
 921        }
 922}
 923
 924void
 925nouveau_svm_init(struct nouveau_drm *drm)
 926{
 927        static const struct nvif_mclass buffers[] = {
 928                {   VOLTA_FAULT_BUFFER_A, 0 },
 929                { MAXWELL_FAULT_BUFFER_A, 0 },
 930                {}
 931        };
 932        struct nouveau_svm *svm;
 933        int ret;
 934
 935        /* Disable on Volta and newer until channel recovery is fixed,
 936         * otherwise clients will have a trivial way to trash the GPU
 937         * for everyone.
 938         */
 939        if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
 940                return;
 941
 942        if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
 943                return;
 944
 945        drm->svm->drm = drm;
 946        mutex_init(&drm->svm->mutex);
 947        INIT_LIST_HEAD(&drm->svm->inst);
 948
 949        ret = nvif_mclass(&drm->client.device.object, buffers);
 950        if (ret < 0) {
 951                SVM_DBG(svm, "No supported fault buffer class");
 952                nouveau_svm_fini(drm);
 953                return;
 954        }
 955
 956        ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
 957        if (ret) {
 958                nouveau_svm_fini(drm);
 959                return;
 960        }
 961
 962        SVM_DBG(svm, "Initialised");
 963}
 964