linux/arch/powerpc/kvm/book3s_64_vio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
   6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/string.h>
  11#include <linux/kvm.h>
  12#include <linux/kvm_host.h>
  13#include <linux/highmem.h>
  14#include <linux/gfp.h>
  15#include <linux/slab.h>
  16#include <linux/sched/signal.h>
  17#include <linux/hugetlb.h>
  18#include <linux/list.h>
  19#include <linux/anon_inodes.h>
  20#include <linux/iommu.h>
  21#include <linux/file.h>
  22#include <linux/mm.h>
  23
  24#include <asm/kvm_ppc.h>
  25#include <asm/kvm_book3s.h>
  26#include <asm/book3s/64/mmu-hash.h>
  27#include <asm/hvcall.h>
  28#include <asm/synch.h>
  29#include <asm/ppc-opcode.h>
  30#include <asm/kvm_host.h>
  31#include <asm/udbg.h>
  32#include <asm/iommu.h>
  33#include <asm/tce.h>
  34#include <asm/mmu_context.h>
  35
  36static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
  37{
  38        return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
  39}
  40
  41static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
  42{
  43        unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
  44                        (tce_pages * sizeof(struct page *));
  45
  46        return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
  47}
  48
  49static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
  50{
  51        struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
  52                        struct kvmppc_spapr_tce_iommu_table, rcu);
  53
  54        iommu_tce_table_put(stit->tbl);
  55
  56        kfree(stit);
  57}
  58
  59static void kvm_spapr_tce_liobn_put(struct kref *kref)
  60{
  61        struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
  62                        struct kvmppc_spapr_tce_iommu_table, kref);
  63
  64        list_del_rcu(&stit->next);
  65
  66        call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
  67}
  68
  69extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  70                struct iommu_group *grp)
  71{
  72        int i;
  73        struct kvmppc_spapr_tce_table *stt;
  74        struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  75        struct iommu_table_group *table_group = NULL;
  76
  77        list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  78
  79                table_group = iommu_group_get_iommudata(grp);
  80                if (WARN_ON(!table_group))
  81                        continue;
  82
  83                list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  84                        for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  85                                if (table_group->tables[i] != stit->tbl)
  86                                        continue;
  87
  88                                kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
  89                        }
  90                }
  91        }
  92}
  93
  94extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  95                struct iommu_group *grp)
  96{
  97        struct kvmppc_spapr_tce_table *stt = NULL;
  98        bool found = false;
  99        struct iommu_table *tbl = NULL;
 100        struct iommu_table_group *table_group;
 101        long i;
 102        struct kvmppc_spapr_tce_iommu_table *stit;
 103        struct fd f;
 104
 105        f = fdget(tablefd);
 106        if (!f.file)
 107                return -EBADF;
 108
 109        list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
 110                if (stt == f.file->private_data) {
 111                        found = true;
 112                        break;
 113                }
 114        }
 115
 116        fdput(f);
 117
 118        if (!found)
 119                return -EINVAL;
 120
 121        table_group = iommu_group_get_iommudata(grp);
 122        if (WARN_ON(!table_group))
 123                return -EFAULT;
 124
 125        for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
 126                struct iommu_table *tbltmp = table_group->tables[i];
 127
 128                if (!tbltmp)
 129                        continue;
 130                /* Make sure hardware table parameters are compatible */
 131                if ((tbltmp->it_page_shift <= stt->page_shift) &&
 132                                (tbltmp->it_offset << tbltmp->it_page_shift ==
 133                                 stt->offset << stt->page_shift) &&
 134                                (tbltmp->it_size << tbltmp->it_page_shift >=
 135                                 stt->size << stt->page_shift)) {
 136                        /*
 137                         * Reference the table to avoid races with
 138                         * add/remove DMA windows.
 139                         */
 140                        tbl = iommu_tce_table_get(tbltmp);
 141                        break;
 142                }
 143        }
 144        if (!tbl)
 145                return -EINVAL;
 146
 147        list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
 148                if (tbl != stit->tbl)
 149                        continue;
 150
 151                if (!kref_get_unless_zero(&stit->kref)) {
 152                        /* stit is being destroyed */
 153                        iommu_tce_table_put(tbl);
 154                        return -ENOTTY;
 155                }
 156                /*
 157                 * The table is already known to this KVM, we just increased
 158                 * its KVM reference counter and can return.
 159                 */
 160                return 0;
 161        }
 162
 163        stit = kzalloc(sizeof(*stit), GFP_KERNEL);
 164        if (!stit) {
 165                iommu_tce_table_put(tbl);
 166                return -ENOMEM;
 167        }
 168
 169        stit->tbl = tbl;
 170        kref_init(&stit->kref);
 171
 172        list_add_rcu(&stit->next, &stt->iommu_tables);
 173
 174        return 0;
 175}
 176
 177static void release_spapr_tce_table(struct rcu_head *head)
 178{
 179        struct kvmppc_spapr_tce_table *stt = container_of(head,
 180                        struct kvmppc_spapr_tce_table, rcu);
 181        unsigned long i, npages = kvmppc_tce_pages(stt->size);
 182
 183        for (i = 0; i < npages; i++)
 184                if (stt->pages[i])
 185                        __free_page(stt->pages[i]);
 186
 187        kfree(stt);
 188}
 189
 190static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
 191                unsigned long sttpage)
 192{
 193        struct page *page = stt->pages[sttpage];
 194
 195        if (page)
 196                return page;
 197
 198        mutex_lock(&stt->alloc_lock);
 199        page = stt->pages[sttpage];
 200        if (!page) {
 201                page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 202                WARN_ON_ONCE(!page);
 203                if (page)
 204                        stt->pages[sttpage] = page;
 205        }
 206        mutex_unlock(&stt->alloc_lock);
 207
 208        return page;
 209}
 210
 211static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
 212{
 213        struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
 214        struct page *page;
 215
 216        if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
 217                return VM_FAULT_SIGBUS;
 218
 219        page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
 220        if (!page)
 221                return VM_FAULT_OOM;
 222
 223        get_page(page);
 224        vmf->page = page;
 225        return 0;
 226}
 227
 228static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
 229        .fault = kvm_spapr_tce_fault,
 230};
 231
 232static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
 233{
 234        vma->vm_ops = &kvm_spapr_tce_vm_ops;
 235        return 0;
 236}
 237
 238static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
 239{
 240        struct kvmppc_spapr_tce_table *stt = filp->private_data;
 241        struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
 242        struct kvm *kvm = stt->kvm;
 243
 244        mutex_lock(&kvm->lock);
 245        list_del_rcu(&stt->list);
 246        mutex_unlock(&kvm->lock);
 247
 248        list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
 249                WARN_ON(!kref_read(&stit->kref));
 250                while (1) {
 251                        if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
 252                                break;
 253                }
 254        }
 255
 256        kvm_put_kvm(stt->kvm);
 257
 258        account_locked_vm(current->mm,
 259                kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
 260        call_rcu(&stt->rcu, release_spapr_tce_table);
 261
 262        return 0;
 263}
 264
 265static const struct file_operations kvm_spapr_tce_fops = {
 266        .mmap           = kvm_spapr_tce_mmap,
 267        .release        = kvm_spapr_tce_release,
 268};
 269
 270long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 271                                   struct kvm_create_spapr_tce_64 *args)
 272{
 273        struct kvmppc_spapr_tce_table *stt = NULL;
 274        struct kvmppc_spapr_tce_table *siter;
 275        unsigned long npages, size = args->size;
 276        int ret = -ENOMEM;
 277
 278        if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
 279                (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
 280                return -EINVAL;
 281
 282        npages = kvmppc_tce_pages(size);
 283        ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
 284        if (ret)
 285                return ret;
 286
 287        ret = -ENOMEM;
 288        stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
 289                      GFP_KERNEL);
 290        if (!stt)
 291                goto fail_acct;
 292
 293        stt->liobn = args->liobn;
 294        stt->page_shift = args->page_shift;
 295        stt->offset = args->offset;
 296        stt->size = size;
 297        stt->kvm = kvm;
 298        mutex_init(&stt->alloc_lock);
 299        INIT_LIST_HEAD_RCU(&stt->iommu_tables);
 300
 301        mutex_lock(&kvm->lock);
 302
 303        /* Check this LIOBN hasn't been previously allocated */
 304        ret = 0;
 305        list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
 306                if (siter->liobn == args->liobn) {
 307                        ret = -EBUSY;
 308                        break;
 309                }
 310        }
 311
 312        kvm_get_kvm(kvm);
 313        if (!ret)
 314                ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
 315                                       stt, O_RDWR | O_CLOEXEC);
 316
 317        if (ret >= 0)
 318                list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
 319        else
 320                kvm_put_kvm(kvm);
 321
 322        mutex_unlock(&kvm->lock);
 323
 324        if (ret >= 0)
 325                return ret;
 326
 327        kfree(stt);
 328 fail_acct:
 329        account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
 330        return ret;
 331}
 332
 333static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
 334                unsigned long *ua)
 335{
 336        unsigned long gfn = tce >> PAGE_SHIFT;
 337        struct kvm_memory_slot *memslot;
 338
 339        memslot = search_memslots(kvm_memslots(kvm), gfn);
 340        if (!memslot)
 341                return -EINVAL;
 342
 343        *ua = __gfn_to_hva_memslot(memslot, gfn) |
 344                (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
 345
 346        return 0;
 347}
 348
 349static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
 350                unsigned long tce)
 351{
 352        unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
 353        enum dma_data_direction dir = iommu_tce_direction(tce);
 354        struct kvmppc_spapr_tce_iommu_table *stit;
 355        unsigned long ua = 0;
 356
 357        /* Allow userspace to poison TCE table */
 358        if (dir == DMA_NONE)
 359                return H_SUCCESS;
 360
 361        if (iommu_tce_check_gpa(stt->page_shift, gpa))
 362                return H_TOO_HARD;
 363
 364        if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
 365                return H_TOO_HARD;
 366
 367        list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
 368                unsigned long hpa = 0;
 369                struct mm_iommu_table_group_mem_t *mem;
 370                long shift = stit->tbl->it_page_shift;
 371
 372                mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
 373                if (!mem)
 374                        return H_TOO_HARD;
 375
 376                if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
 377                        return H_TOO_HARD;
 378        }
 379
 380        return H_SUCCESS;
 381}
 382
 383/*
 384 * Handles TCE requests for emulated devices.
 385 * Puts guest TCE values to the table and expects user space to convert them.
 386 * Cannot fail so kvmppc_tce_validate must be called before it.
 387 */
 388static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
 389                unsigned long idx, unsigned long tce)
 390{
 391        struct page *page;
 392        u64 *tbl;
 393        unsigned long sttpage;
 394
 395        idx -= stt->offset;
 396        sttpage = idx / TCES_PER_PAGE;
 397        page = stt->pages[sttpage];
 398
 399        if (!page) {
 400                /* We allow any TCE, not just with read|write permissions */
 401                if (!tce)
 402                        return;
 403
 404                page = kvm_spapr_get_tce_page(stt, sttpage);
 405                if (!page)
 406                        return;
 407        }
 408        tbl = page_to_virt(page);
 409
 410        tbl[idx % TCES_PER_PAGE] = tce;
 411}
 412
 413static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
 414                unsigned long entry)
 415{
 416        unsigned long hpa = 0;
 417        enum dma_data_direction dir = DMA_NONE;
 418
 419        iommu_tce_xchg(mm, tbl, entry, &hpa, &dir);
 420}
 421
 422static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
 423                struct iommu_table *tbl, unsigned long entry)
 424{
 425        struct mm_iommu_table_group_mem_t *mem = NULL;
 426        const unsigned long pgsize = 1ULL << tbl->it_page_shift;
 427        __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
 428
 429        if (!pua)
 430                return H_SUCCESS;
 431
 432        mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
 433        if (!mem)
 434                return H_TOO_HARD;
 435
 436        mm_iommu_mapped_dec(mem);
 437
 438        *pua = cpu_to_be64(0);
 439
 440        return H_SUCCESS;
 441}
 442
 443static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
 444                struct iommu_table *tbl, unsigned long entry)
 445{
 446        enum dma_data_direction dir = DMA_NONE;
 447        unsigned long hpa = 0;
 448        long ret;
 449
 450        if (WARN_ON_ONCE(iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir)))
 451                return H_TOO_HARD;
 452
 453        if (dir == DMA_NONE)
 454                return H_SUCCESS;
 455
 456        ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
 457        if (ret != H_SUCCESS)
 458                iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
 459
 460        return ret;
 461}
 462
 463static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
 464                struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
 465                unsigned long entry)
 466{
 467        unsigned long i, ret = H_SUCCESS;
 468        unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
 469        unsigned long io_entry = entry * subpages;
 470
 471        for (i = 0; i < subpages; ++i) {
 472                ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
 473                if (ret != H_SUCCESS)
 474                        break;
 475        }
 476
 477        return ret;
 478}
 479
 480long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
 481                unsigned long entry, unsigned long ua,
 482                enum dma_data_direction dir)
 483{
 484        long ret;
 485        unsigned long hpa;
 486        __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 487        struct mm_iommu_table_group_mem_t *mem;
 488
 489        if (!pua)
 490                /* it_userspace allocation might be delayed */
 491                return H_TOO_HARD;
 492
 493        mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
 494        if (!mem)
 495                /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
 496                return H_TOO_HARD;
 497
 498        if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
 499                return H_TOO_HARD;
 500
 501        if (mm_iommu_mapped_inc(mem))
 502                return H_TOO_HARD;
 503
 504        ret = iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
 505        if (WARN_ON_ONCE(ret)) {
 506                mm_iommu_mapped_dec(mem);
 507                return H_TOO_HARD;
 508        }
 509
 510        if (dir != DMA_NONE)
 511                kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
 512
 513        *pua = cpu_to_be64(ua);
 514
 515        return 0;
 516}
 517
 518static long kvmppc_tce_iommu_map(struct kvm *kvm,
 519                struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
 520                unsigned long entry, unsigned long ua,
 521                enum dma_data_direction dir)
 522{
 523        unsigned long i, pgoff, ret = H_SUCCESS;
 524        unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
 525        unsigned long io_entry = entry * subpages;
 526
 527        for (i = 0, pgoff = 0; i < subpages;
 528                        ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
 529
 530                ret = kvmppc_tce_iommu_do_map(kvm, tbl,
 531                                io_entry + i, ua + pgoff, dir);
 532                if (ret != H_SUCCESS)
 533                        break;
 534        }
 535
 536        return ret;
 537}
 538
 539long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 540                      unsigned long ioba, unsigned long tce)
 541{
 542        struct kvmppc_spapr_tce_table *stt;
 543        long ret, idx;
 544        struct kvmppc_spapr_tce_iommu_table *stit;
 545        unsigned long entry, ua = 0;
 546        enum dma_data_direction dir;
 547
 548        /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
 549        /*          liobn, ioba, tce); */
 550
 551        stt = kvmppc_find_table(vcpu->kvm, liobn);
 552        if (!stt)
 553                return H_TOO_HARD;
 554
 555        ret = kvmppc_ioba_validate(stt, ioba, 1);
 556        if (ret != H_SUCCESS)
 557                return ret;
 558
 559        idx = srcu_read_lock(&vcpu->kvm->srcu);
 560
 561        ret = kvmppc_tce_validate(stt, tce);
 562        if (ret != H_SUCCESS)
 563                goto unlock_exit;
 564
 565        dir = iommu_tce_direction(tce);
 566
 567        if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
 568                ret = H_PARAMETER;
 569                goto unlock_exit;
 570        }
 571
 572        entry = ioba >> stt->page_shift;
 573
 574        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 575                if (dir == DMA_NONE)
 576                        ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
 577                                        stit->tbl, entry);
 578                else
 579                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
 580                                        entry, ua, dir);
 581
 582                if (ret != H_SUCCESS) {
 583                        kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
 584                        goto unlock_exit;
 585                }
 586        }
 587
 588        kvmppc_tce_put(stt, entry, tce);
 589
 590unlock_exit:
 591        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 592
 593        return ret;
 594}
 595EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 596
 597long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 598                unsigned long liobn, unsigned long ioba,
 599                unsigned long tce_list, unsigned long npages)
 600{
 601        struct kvmppc_spapr_tce_table *stt;
 602        long i, ret = H_SUCCESS, idx;
 603        unsigned long entry, ua = 0;
 604        u64 __user *tces;
 605        u64 tce;
 606        struct kvmppc_spapr_tce_iommu_table *stit;
 607
 608        stt = kvmppc_find_table(vcpu->kvm, liobn);
 609        if (!stt)
 610                return H_TOO_HARD;
 611
 612        entry = ioba >> stt->page_shift;
 613        /*
 614         * SPAPR spec says that the maximum size of the list is 512 TCEs
 615         * so the whole table fits in 4K page
 616         */
 617        if (npages > 512)
 618                return H_PARAMETER;
 619
 620        if (tce_list & (SZ_4K - 1))
 621                return H_PARAMETER;
 622
 623        ret = kvmppc_ioba_validate(stt, ioba, npages);
 624        if (ret != H_SUCCESS)
 625                return ret;
 626
 627        idx = srcu_read_lock(&vcpu->kvm->srcu);
 628        if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
 629                ret = H_TOO_HARD;
 630                goto unlock_exit;
 631        }
 632        tces = (u64 __user *) ua;
 633
 634        for (i = 0; i < npages; ++i) {
 635                if (get_user(tce, tces + i)) {
 636                        ret = H_TOO_HARD;
 637                        goto unlock_exit;
 638                }
 639                tce = be64_to_cpu(tce);
 640
 641                ret = kvmppc_tce_validate(stt, tce);
 642                if (ret != H_SUCCESS)
 643                        goto unlock_exit;
 644        }
 645
 646        for (i = 0; i < npages; ++i) {
 647                /*
 648                 * This looks unsafe, because we validate, then regrab
 649                 * the TCE from userspace which could have been changed by
 650                 * another thread.
 651                 *
 652                 * But it actually is safe, because the relevant checks will be
 653                 * re-executed in the following code.  If userspace tries to
 654                 * change this dodgily it will result in a messier failure mode
 655                 * but won't threaten the host.
 656                 */
 657                if (get_user(tce, tces + i)) {
 658                        ret = H_TOO_HARD;
 659                        goto unlock_exit;
 660                }
 661                tce = be64_to_cpu(tce);
 662
 663                if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
 664                        ret = H_PARAMETER;
 665                        goto unlock_exit;
 666                }
 667
 668                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 669                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
 670                                        stit->tbl, entry + i, ua,
 671                                        iommu_tce_direction(tce));
 672
 673                        if (ret != H_SUCCESS) {
 674                                kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
 675                                                entry);
 676                                goto unlock_exit;
 677                        }
 678                }
 679
 680                kvmppc_tce_put(stt, entry + i, tce);
 681        }
 682
 683unlock_exit:
 684        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 685
 686        return ret;
 687}
 688EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
 689
 690long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
 691                unsigned long liobn, unsigned long ioba,
 692                unsigned long tce_value, unsigned long npages)
 693{
 694        struct kvmppc_spapr_tce_table *stt;
 695        long i, ret;
 696        struct kvmppc_spapr_tce_iommu_table *stit;
 697
 698        stt = kvmppc_find_table(vcpu->kvm, liobn);
 699        if (!stt)
 700                return H_TOO_HARD;
 701
 702        ret = kvmppc_ioba_validate(stt, ioba, npages);
 703        if (ret != H_SUCCESS)
 704                return ret;
 705
 706        /* Check permission bits only to allow userspace poison TCE for debug */
 707        if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
 708                return H_PARAMETER;
 709
 710        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 711                unsigned long entry = ioba >> stt->page_shift;
 712
 713                for (i = 0; i < npages; ++i) {
 714                        ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
 715                                        stit->tbl, entry + i);
 716
 717                        if (ret == H_SUCCESS)
 718                                continue;
 719
 720                        if (ret == H_TOO_HARD)
 721                                return ret;
 722
 723                        WARN_ON_ONCE(1);
 724                        kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
 725                }
 726        }
 727
 728        for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
 729                kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
 730
 731        return H_SUCCESS;
 732}
 733EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
 734