linux/arch/powerpc/kvm/book3s_64_vio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
   6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/string.h>
  11#include <linux/kvm.h>
  12#include <linux/kvm_host.h>
  13#include <linux/highmem.h>
  14#include <linux/gfp.h>
  15#include <linux/slab.h>
  16#include <linux/sched/signal.h>
  17#include <linux/hugetlb.h>
  18#include <linux/list.h>
  19#include <linux/anon_inodes.h>
  20#include <linux/iommu.h>
  21#include <linux/file.h>
  22#include <linux/mm.h>
  23
  24#include <asm/kvm_ppc.h>
  25#include <asm/kvm_book3s.h>
  26#include <asm/book3s/64/mmu-hash.h>
  27#include <asm/hvcall.h>
  28#include <asm/synch.h>
  29#include <asm/ppc-opcode.h>
  30#include <asm/udbg.h>
  31#include <asm/iommu.h>
  32#include <asm/tce.h>
  33#include <asm/mmu_context.h>
  34
  35static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
  36{
  37        return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
  38}
  39
  40static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
  41{
  42        unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
  43                        (tce_pages * sizeof(struct page *));
  44
  45        return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
  46}
  47
  48static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
  49{
  50        struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
  51                        struct kvmppc_spapr_tce_iommu_table, rcu);
  52
  53        iommu_tce_table_put(stit->tbl);
  54
  55        kfree(stit);
  56}
  57
  58static void kvm_spapr_tce_liobn_put(struct kref *kref)
  59{
  60        struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
  61                        struct kvmppc_spapr_tce_iommu_table, kref);
  62
  63        list_del_rcu(&stit->next);
  64
  65        call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
  66}
  67
  68extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  69                struct iommu_group *grp)
  70{
  71        int i;
  72        struct kvmppc_spapr_tce_table *stt;
  73        struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  74        struct iommu_table_group *table_group = NULL;
  75
  76        rcu_read_lock();
  77        list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  78
  79                table_group = iommu_group_get_iommudata(grp);
  80                if (WARN_ON(!table_group))
  81                        continue;
  82
  83                list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  84                        for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  85                                if (table_group->tables[i] != stit->tbl)
  86                                        continue;
  87
  88                                kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
  89                        }
  90                }
  91                cond_resched_rcu();
  92        }
  93        rcu_read_unlock();
  94}
  95
  96extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  97                struct iommu_group *grp)
  98{
  99        struct kvmppc_spapr_tce_table *stt = NULL;
 100        bool found = false;
 101        struct iommu_table *tbl = NULL;
 102        struct iommu_table_group *table_group;
 103        long i;
 104        struct kvmppc_spapr_tce_iommu_table *stit;
 105        struct fd f;
 106
 107        f = fdget(tablefd);
 108        if (!f.file)
 109                return -EBADF;
 110
 111        rcu_read_lock();
 112        list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
 113                if (stt == f.file->private_data) {
 114                        found = true;
 115                        break;
 116                }
 117        }
 118        rcu_read_unlock();
 119
 120        fdput(f);
 121
 122        if (!found)
 123                return -EINVAL;
 124
 125        table_group = iommu_group_get_iommudata(grp);
 126        if (WARN_ON(!table_group))
 127                return -EFAULT;
 128
 129        for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
 130                struct iommu_table *tbltmp = table_group->tables[i];
 131
 132                if (!tbltmp)
 133                        continue;
 134                /* Make sure hardware table parameters are compatible */
 135                if ((tbltmp->it_page_shift <= stt->page_shift) &&
 136                                (tbltmp->it_offset << tbltmp->it_page_shift ==
 137                                 stt->offset << stt->page_shift) &&
 138                                (tbltmp->it_size << tbltmp->it_page_shift >=
 139                                 stt->size << stt->page_shift)) {
 140                        /*
 141                         * Reference the table to avoid races with
 142                         * add/remove DMA windows.
 143                         */
 144                        tbl = iommu_tce_table_get(tbltmp);
 145                        break;
 146                }
 147        }
 148        if (!tbl)
 149                return -EINVAL;
 150
 151        rcu_read_lock();
 152        list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
 153                if (tbl != stit->tbl)
 154                        continue;
 155
 156                if (!kref_get_unless_zero(&stit->kref)) {
 157                        /* stit is being destroyed */
 158                        iommu_tce_table_put(tbl);
 159                        rcu_read_unlock();
 160                        return -ENOTTY;
 161                }
 162                /*
 163                 * The table is already known to this KVM, we just increased
 164                 * its KVM reference counter and can return.
 165                 */
 166                rcu_read_unlock();
 167                return 0;
 168        }
 169        rcu_read_unlock();
 170
 171        stit = kzalloc(sizeof(*stit), GFP_KERNEL);
 172        if (!stit) {
 173                iommu_tce_table_put(tbl);
 174                return -ENOMEM;
 175        }
 176
 177        stit->tbl = tbl;
 178        kref_init(&stit->kref);
 179
 180        list_add_rcu(&stit->next, &stt->iommu_tables);
 181
 182        return 0;
 183}
 184
 185static void release_spapr_tce_table(struct rcu_head *head)
 186{
 187        struct kvmppc_spapr_tce_table *stt = container_of(head,
 188                        struct kvmppc_spapr_tce_table, rcu);
 189        unsigned long i, npages = kvmppc_tce_pages(stt->size);
 190
 191        for (i = 0; i < npages; i++)
 192                if (stt->pages[i])
 193                        __free_page(stt->pages[i]);
 194
 195        kfree(stt);
 196}
 197
 198static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
 199                unsigned long sttpage)
 200{
 201        struct page *page = stt->pages[sttpage];
 202
 203        if (page)
 204                return page;
 205
 206        mutex_lock(&stt->alloc_lock);
 207        page = stt->pages[sttpage];
 208        if (!page) {
 209                page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 210                WARN_ON_ONCE(!page);
 211                if (page)
 212                        stt->pages[sttpage] = page;
 213        }
 214        mutex_unlock(&stt->alloc_lock);
 215
 216        return page;
 217}
 218
 219static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
 220{
 221        struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
 222        struct page *page;
 223
 224        if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
 225                return VM_FAULT_SIGBUS;
 226
 227        page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
 228        if (!page)
 229                return VM_FAULT_OOM;
 230
 231        get_page(page);
 232        vmf->page = page;
 233        return 0;
 234}
 235
 236static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
 237        .fault = kvm_spapr_tce_fault,
 238};
 239
 240static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
 241{
 242        vma->vm_ops = &kvm_spapr_tce_vm_ops;
 243        return 0;
 244}
 245
 246static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
 247{
 248        struct kvmppc_spapr_tce_table *stt = filp->private_data;
 249        struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
 250        struct kvm *kvm = stt->kvm;
 251
 252        mutex_lock(&kvm->lock);
 253        list_del_rcu(&stt->list);
 254        mutex_unlock(&kvm->lock);
 255
 256        list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
 257                WARN_ON(!kref_read(&stit->kref));
 258                while (1) {
 259                        if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
 260                                break;
 261                }
 262        }
 263
 264        account_locked_vm(kvm->mm,
 265                kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
 266
 267        kvm_put_kvm(stt->kvm);
 268
 269        call_rcu(&stt->rcu, release_spapr_tce_table);
 270
 271        return 0;
 272}
 273
 274static const struct file_operations kvm_spapr_tce_fops = {
 275        .mmap           = kvm_spapr_tce_mmap,
 276        .release        = kvm_spapr_tce_release,
 277};
 278
 279long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 280                                   struct kvm_create_spapr_tce_64 *args)
 281{
 282        struct kvmppc_spapr_tce_table *stt = NULL;
 283        struct kvmppc_spapr_tce_table *siter;
 284        struct mm_struct *mm = kvm->mm;
 285        unsigned long npages, size = args->size;
 286        int ret;
 287
 288        if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
 289                (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
 290                return -EINVAL;
 291
 292        npages = kvmppc_tce_pages(size);
 293        ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
 294        if (ret)
 295                return ret;
 296
 297        ret = -ENOMEM;
 298        stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
 299                      GFP_KERNEL);
 300        if (!stt)
 301                goto fail_acct;
 302
 303        stt->liobn = args->liobn;
 304        stt->page_shift = args->page_shift;
 305        stt->offset = args->offset;
 306        stt->size = size;
 307        stt->kvm = kvm;
 308        mutex_init(&stt->alloc_lock);
 309        INIT_LIST_HEAD_RCU(&stt->iommu_tables);
 310
 311        mutex_lock(&kvm->lock);
 312
 313        /* Check this LIOBN hasn't been previously allocated */
 314        ret = 0;
 315        list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
 316                if (siter->liobn == args->liobn) {
 317                        ret = -EBUSY;
 318                        break;
 319                }
 320        }
 321
 322        kvm_get_kvm(kvm);
 323        if (!ret)
 324                ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
 325                                       stt, O_RDWR | O_CLOEXEC);
 326
 327        if (ret >= 0)
 328                list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
 329        else
 330                kvm_put_kvm_no_destroy(kvm);
 331
 332        mutex_unlock(&kvm->lock);
 333
 334        if (ret >= 0)
 335                return ret;
 336
 337        kfree(stt);
 338 fail_acct:
 339        account_locked_vm(mm, kvmppc_stt_pages(npages), false);
 340        return ret;
 341}
 342
 343static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
 344                unsigned long *ua)
 345{
 346        unsigned long gfn = tce >> PAGE_SHIFT;
 347        struct kvm_memory_slot *memslot;
 348
 349        memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
 350        if (!memslot)
 351                return -EINVAL;
 352
 353        *ua = __gfn_to_hva_memslot(memslot, gfn) |
 354                (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
 355
 356        return 0;
 357}
 358
 359static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
 360                unsigned long tce)
 361{
 362        unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
 363        enum dma_data_direction dir = iommu_tce_direction(tce);
 364        struct kvmppc_spapr_tce_iommu_table *stit;
 365        unsigned long ua = 0;
 366
 367        /* Allow userspace to poison TCE table */
 368        if (dir == DMA_NONE)
 369                return H_SUCCESS;
 370
 371        if (iommu_tce_check_gpa(stt->page_shift, gpa))
 372                return H_TOO_HARD;
 373
 374        if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
 375                return H_TOO_HARD;
 376
 377        rcu_read_lock();
 378        list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
 379                unsigned long hpa = 0;
 380                struct mm_iommu_table_group_mem_t *mem;
 381                long shift = stit->tbl->it_page_shift;
 382
 383                mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
 384                if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
 385                        rcu_read_unlock();
 386                        return H_TOO_HARD;
 387                }
 388        }
 389        rcu_read_unlock();
 390
 391        return H_SUCCESS;
 392}
 393
 394/*
 395 * Handles TCE requests for emulated devices.
 396 * Puts guest TCE values to the table and expects user space to convert them.
 397 * Cannot fail so kvmppc_tce_validate must be called before it.
 398 */
 399static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
 400                unsigned long idx, unsigned long tce)
 401{
 402        struct page *page;
 403        u64 *tbl;
 404        unsigned long sttpage;
 405
 406        idx -= stt->offset;
 407        sttpage = idx / TCES_PER_PAGE;
 408        page = stt->pages[sttpage];
 409
 410        if (!page) {
 411                /* We allow any TCE, not just with read|write permissions */
 412                if (!tce)
 413                        return;
 414
 415                page = kvm_spapr_get_tce_page(stt, sttpage);
 416                if (!page)
 417                        return;
 418        }
 419        tbl = page_to_virt(page);
 420
 421        tbl[idx % TCES_PER_PAGE] = tce;
 422}
 423
 424static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
 425                unsigned long entry)
 426{
 427        unsigned long hpa = 0;
 428        enum dma_data_direction dir = DMA_NONE;
 429
 430        iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
 431}
 432
 433static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
 434                struct iommu_table *tbl, unsigned long entry)
 435{
 436        struct mm_iommu_table_group_mem_t *mem = NULL;
 437        const unsigned long pgsize = 1ULL << tbl->it_page_shift;
 438        __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
 439
 440        if (!pua)
 441                return H_SUCCESS;
 442
 443        mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
 444        if (!mem)
 445                return H_TOO_HARD;
 446
 447        mm_iommu_mapped_dec(mem);
 448
 449        *pua = cpu_to_be64(0);
 450
 451        return H_SUCCESS;
 452}
 453
 454static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
 455                struct iommu_table *tbl, unsigned long entry)
 456{
 457        enum dma_data_direction dir = DMA_NONE;
 458        unsigned long hpa = 0;
 459        long ret;
 460
 461        if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
 462                                        &dir)))
 463                return H_TOO_HARD;
 464
 465        if (dir == DMA_NONE)
 466                return H_SUCCESS;
 467
 468        ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
 469        if (ret != H_SUCCESS)
 470                iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
 471
 472        return ret;
 473}
 474
 475static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
 476                struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
 477                unsigned long entry)
 478{
 479        unsigned long i, ret = H_SUCCESS;
 480        unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
 481        unsigned long io_entry = entry * subpages;
 482
 483        for (i = 0; i < subpages; ++i) {
 484                ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
 485                if (ret != H_SUCCESS)
 486                        break;
 487        }
 488
 489        return ret;
 490}
 491
 492static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
 493                unsigned long entry, unsigned long ua,
 494                enum dma_data_direction dir)
 495{
 496        long ret;
 497        unsigned long hpa;
 498        __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 499        struct mm_iommu_table_group_mem_t *mem;
 500
 501        if (!pua)
 502                /* it_userspace allocation might be delayed */
 503                return H_TOO_HARD;
 504
 505        mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
 506        if (!mem)
 507                /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
 508                return H_TOO_HARD;
 509
 510        if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
 511                return H_TOO_HARD;
 512
 513        if (mm_iommu_mapped_inc(mem))
 514                return H_TOO_HARD;
 515
 516        ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
 517        if (WARN_ON_ONCE(ret)) {
 518                mm_iommu_mapped_dec(mem);
 519                return H_TOO_HARD;
 520        }
 521
 522        if (dir != DMA_NONE)
 523                kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
 524
 525        *pua = cpu_to_be64(ua);
 526
 527        return 0;
 528}
 529
 530static long kvmppc_tce_iommu_map(struct kvm *kvm,
 531                struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
 532                unsigned long entry, unsigned long ua,
 533                enum dma_data_direction dir)
 534{
 535        unsigned long i, pgoff, ret = H_SUCCESS;
 536        unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
 537        unsigned long io_entry = entry * subpages;
 538
 539        for (i = 0, pgoff = 0; i < subpages;
 540                        ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
 541
 542                ret = kvmppc_tce_iommu_do_map(kvm, tbl,
 543                                io_entry + i, ua + pgoff, dir);
 544                if (ret != H_SUCCESS)
 545                        break;
 546        }
 547
 548        return ret;
 549}
 550
 551long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 552                      unsigned long ioba, unsigned long tce)
 553{
 554        struct kvmppc_spapr_tce_table *stt;
 555        long ret, idx;
 556        struct kvmppc_spapr_tce_iommu_table *stit;
 557        unsigned long entry, ua = 0;
 558        enum dma_data_direction dir;
 559
 560        /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
 561        /*          liobn, ioba, tce); */
 562
 563        stt = kvmppc_find_table(vcpu->kvm, liobn);
 564        if (!stt)
 565                return H_TOO_HARD;
 566
 567        ret = kvmppc_ioba_validate(stt, ioba, 1);
 568        if (ret != H_SUCCESS)
 569                return ret;
 570
 571        idx = srcu_read_lock(&vcpu->kvm->srcu);
 572
 573        ret = kvmppc_tce_validate(stt, tce);
 574        if (ret != H_SUCCESS)
 575                goto unlock_exit;
 576
 577        dir = iommu_tce_direction(tce);
 578
 579        if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
 580                ret = H_PARAMETER;
 581                goto unlock_exit;
 582        }
 583
 584        entry = ioba >> stt->page_shift;
 585
 586        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 587                if (dir == DMA_NONE)
 588                        ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
 589                                        stit->tbl, entry);
 590                else
 591                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
 592                                        entry, ua, dir);
 593
 594                iommu_tce_kill(stit->tbl, entry, 1);
 595
 596                if (ret != H_SUCCESS) {
 597                        kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
 598                        goto unlock_exit;
 599                }
 600        }
 601
 602        kvmppc_tce_put(stt, entry, tce);
 603
 604unlock_exit:
 605        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 606
 607        return ret;
 608}
 609EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 610
 611long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 612                unsigned long liobn, unsigned long ioba,
 613                unsigned long tce_list, unsigned long npages)
 614{
 615        struct kvmppc_spapr_tce_table *stt;
 616        long i, ret = H_SUCCESS, idx;
 617        unsigned long entry, ua = 0;
 618        u64 __user *tces;
 619        u64 tce;
 620        struct kvmppc_spapr_tce_iommu_table *stit;
 621
 622        stt = kvmppc_find_table(vcpu->kvm, liobn);
 623        if (!stt)
 624                return H_TOO_HARD;
 625
 626        entry = ioba >> stt->page_shift;
 627        /*
 628         * SPAPR spec says that the maximum size of the list is 512 TCEs
 629         * so the whole table fits in 4K page
 630         */
 631        if (npages > 512)
 632                return H_PARAMETER;
 633
 634        if (tce_list & (SZ_4K - 1))
 635                return H_PARAMETER;
 636
 637        ret = kvmppc_ioba_validate(stt, ioba, npages);
 638        if (ret != H_SUCCESS)
 639                return ret;
 640
 641        idx = srcu_read_lock(&vcpu->kvm->srcu);
 642        if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
 643                ret = H_TOO_HARD;
 644                goto unlock_exit;
 645        }
 646        tces = (u64 __user *) ua;
 647
 648        for (i = 0; i < npages; ++i) {
 649                if (get_user(tce, tces + i)) {
 650                        ret = H_TOO_HARD;
 651                        goto unlock_exit;
 652                }
 653                tce = be64_to_cpu(tce);
 654
 655                ret = kvmppc_tce_validate(stt, tce);
 656                if (ret != H_SUCCESS)
 657                        goto unlock_exit;
 658        }
 659
 660        for (i = 0; i < npages; ++i) {
 661                /*
 662                 * This looks unsafe, because we validate, then regrab
 663                 * the TCE from userspace which could have been changed by
 664                 * another thread.
 665                 *
 666                 * But it actually is safe, because the relevant checks will be
 667                 * re-executed in the following code.  If userspace tries to
 668                 * change this dodgily it will result in a messier failure mode
 669                 * but won't threaten the host.
 670                 */
 671                if (get_user(tce, tces + i)) {
 672                        ret = H_TOO_HARD;
 673                        goto invalidate_exit;
 674                }
 675                tce = be64_to_cpu(tce);
 676
 677                if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
 678                        ret = H_PARAMETER;
 679                        goto invalidate_exit;
 680                }
 681
 682                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 683                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
 684                                        stit->tbl, entry + i, ua,
 685                                        iommu_tce_direction(tce));
 686
 687                        if (ret != H_SUCCESS) {
 688                                kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
 689                                                entry);
 690                                goto invalidate_exit;
 691                        }
 692                }
 693
 694                kvmppc_tce_put(stt, entry + i, tce);
 695        }
 696
 697invalidate_exit:
 698        list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
 699                iommu_tce_kill(stit->tbl, entry, npages);
 700
 701unlock_exit:
 702        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 703
 704        return ret;
 705}
 706EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
 707
 708long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
 709                unsigned long liobn, unsigned long ioba,
 710                unsigned long tce_value, unsigned long npages)
 711{
 712        struct kvmppc_spapr_tce_table *stt;
 713        long i, ret;
 714        struct kvmppc_spapr_tce_iommu_table *stit;
 715
 716        stt = kvmppc_find_table(vcpu->kvm, liobn);
 717        if (!stt)
 718                return H_TOO_HARD;
 719
 720        ret = kvmppc_ioba_validate(stt, ioba, npages);
 721        if (ret != H_SUCCESS)
 722                return ret;
 723
 724        /* Check permission bits only to allow userspace poison TCE for debug */
 725        if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
 726                return H_PARAMETER;
 727
 728        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 729                unsigned long entry = ioba >> stt->page_shift;
 730
 731                for (i = 0; i < npages; ++i) {
 732                        ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
 733                                        stit->tbl, entry + i);
 734
 735                        if (ret == H_SUCCESS)
 736                                continue;
 737
 738                        if (ret == H_TOO_HARD)
 739                                goto invalidate_exit;
 740
 741                        WARN_ON_ONCE(1);
 742                        kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
 743                }
 744        }
 745
 746        for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
 747                kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
 748
 749invalidate_exit:
 750        list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
 751                iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
 752
 753        return ret;
 754}
 755EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
 756