linux/arch/powerpc/kvm/book3s_64_vio_hv.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  18 */
  19
  20#include <linux/types.h>
  21#include <linux/string.h>
  22#include <linux/kvm.h>
  23#include <linux/kvm_host.h>
  24#include <linux/highmem.h>
  25#include <linux/gfp.h>
  26#include <linux/slab.h>
  27#include <linux/hugetlb.h>
  28#include <linux/list.h>
  29
  30#include <asm/tlbflush.h>
  31#include <asm/kvm_ppc.h>
  32#include <asm/kvm_book3s.h>
  33#include <asm/book3s/64/mmu-hash.h>
  34#include <asm/mmu_context.h>
  35#include <asm/hvcall.h>
  36#include <asm/synch.h>
  37#include <asm/ppc-opcode.h>
  38#include <asm/kvm_host.h>
  39#include <asm/udbg.h>
  40#include <asm/iommu.h>
  41#include <asm/tce.h>
  42#include <asm/pte-walk.h>
  43
  44#ifdef CONFIG_BUG
  45
  46#define WARN_ON_ONCE_RM(condition)      ({                      \
  47        static bool __section(.data.unlikely) __warned;         \
  48        int __ret_warn_once = !!(condition);                    \
  49                                                                \
  50        if (unlikely(__ret_warn_once && !__warned)) {           \
  51                __warned = true;                                \
  52                pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n",      \
  53                                __stringify(condition),         \
  54                                __func__, __LINE__);            \
  55                dump_stack();                                   \
  56        }                                                       \
  57        unlikely(__ret_warn_once);                              \
  58})
  59
  60#else
  61
  62#define WARN_ON_ONCE_RM(condition) ({                           \
  63        int __ret_warn_on = !!(condition);                      \
  64        unlikely(__ret_warn_on);                                \
  65})
  66
  67#endif
  68
  69#define TCES_PER_PAGE   (PAGE_SIZE / sizeof(u64))
  70
  71/*
  72 * Finds a TCE table descriptor by LIOBN.
  73 *
  74 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
  75 *          mode on PR KVM
  76 */
  77struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
  78                unsigned long liobn)
  79{
  80        struct kvmppc_spapr_tce_table *stt;
  81
  82        list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
  83                if (stt->liobn == liobn)
  84                        return stt;
  85
  86        return NULL;
  87}
  88EXPORT_SYMBOL_GPL(kvmppc_find_table);
  89
  90/*
  91 * Validates TCE address.
  92 * At the moment flags and page mask are validated.
  93 * As the host kernel does not access those addresses (just puts them
  94 * to the table and user space is supposed to process them), we can skip
  95 * checking other things (such as TCE is a guest RAM address or the page
  96 * was actually allocated).
  97 *
  98 * WARNING: This will be called in real-mode on HV KVM and virtual
  99 *          mode on PR KVM
 100 */
 101long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
 102{
 103        unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
 104        enum dma_data_direction dir = iommu_tce_direction(tce);
 105
 106        /* Allow userspace to poison TCE table */
 107        if (dir == DMA_NONE)
 108                return H_SUCCESS;
 109
 110        if (iommu_tce_check_gpa(stt->page_shift, gpa))
 111                return H_PARAMETER;
 112
 113        return H_SUCCESS;
 114}
 115EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
 116
 117/* Note on the use of page_address() in real mode,
 118 *
 119 * It is safe to use page_address() in real mode on ppc64 because
 120 * page_address() is always defined as lowmem_page_address()
 121 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
 122 * operation and does not access page struct.
 123 *
 124 * Theoretically page_address() could be defined different
 125 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
 126 * would have to be enabled.
 127 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
 128 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
 129 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
 130 * is not expected to be enabled on ppc32, page_address()
 131 * is safe for ppc32 as well.
 132 *
 133 * WARNING: This will be called in real-mode on HV KVM and virtual
 134 *          mode on PR KVM
 135 */
 136static u64 *kvmppc_page_address(struct page *page)
 137{
 138#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
 139#error TODO: fix to avoid page_address() here
 140#endif
 141        return (u64 *) page_address(page);
 142}
 143
 144/*
 145 * Handles TCE requests for emulated devices.
 146 * Puts guest TCE values to the table and expects user space to convert them.
 147 * Called in both real and virtual modes.
 148 * Cannot fail so kvmppc_tce_validate must be called before it.
 149 *
 150 * WARNING: This will be called in real-mode on HV KVM and virtual
 151 *          mode on PR KVM
 152 */
 153void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
 154                unsigned long idx, unsigned long tce)
 155{
 156        struct page *page;
 157        u64 *tbl;
 158
 159        idx -= stt->offset;
 160        page = stt->pages[idx / TCES_PER_PAGE];
 161        tbl = kvmppc_page_address(page);
 162
 163        tbl[idx % TCES_PER_PAGE] = tce;
 164}
 165EXPORT_SYMBOL_GPL(kvmppc_tce_put);
 166
 167long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
 168                unsigned long *ua, unsigned long **prmap)
 169{
 170        unsigned long gfn = gpa >> PAGE_SHIFT;
 171        struct kvm_memory_slot *memslot;
 172
 173        memslot = search_memslots(kvm_memslots(kvm), gfn);
 174        if (!memslot)
 175                return -EINVAL;
 176
 177        *ua = __gfn_to_hva_memslot(memslot, gfn) |
 178                (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
 179
 180#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 181        if (prmap)
 182                *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
 183#endif
 184
 185        return 0;
 186}
 187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
 188
 189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 190static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
 191{
 192        unsigned long hpa = 0;
 193        enum dma_data_direction dir = DMA_NONE;
 194
 195        iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
 196}
 197
 198static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
 199                struct iommu_table *tbl, unsigned long entry)
 200{
 201        struct mm_iommu_table_group_mem_t *mem = NULL;
 202        const unsigned long pgsize = 1ULL << tbl->it_page_shift;
 203        unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 204
 205        if (!pua)
 206                /* it_userspace allocation might be delayed */
 207                return H_TOO_HARD;
 208
 209        pua = (void *) vmalloc_to_phys(pua);
 210        if (WARN_ON_ONCE_RM(!pua))
 211                return H_HARDWARE;
 212
 213        mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
 214        if (!mem)
 215                return H_TOO_HARD;
 216
 217        mm_iommu_mapped_dec(mem);
 218
 219        *pua = 0;
 220
 221        return H_SUCCESS;
 222}
 223
 224static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
 225                struct iommu_table *tbl, unsigned long entry)
 226{
 227        enum dma_data_direction dir = DMA_NONE;
 228        unsigned long hpa = 0;
 229        long ret;
 230
 231        if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
 232                /*
 233                 * real mode xchg can fail if struct page crosses
 234                 * a page boundary
 235                 */
 236                return H_TOO_HARD;
 237
 238        if (dir == DMA_NONE)
 239                return H_SUCCESS;
 240
 241        ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
 242        if (ret)
 243                iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
 244
 245        return ret;
 246}
 247
 248static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
 249                struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
 250                unsigned long entry)
 251{
 252        unsigned long i, ret = H_SUCCESS;
 253        unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
 254        unsigned long io_entry = entry * subpages;
 255
 256        for (i = 0; i < subpages; ++i) {
 257                ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
 258                if (ret != H_SUCCESS)
 259                        break;
 260        }
 261
 262        return ret;
 263}
 264
 265static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
 266                unsigned long entry, unsigned long ua,
 267                enum dma_data_direction dir)
 268{
 269        long ret;
 270        unsigned long hpa = 0;
 271        unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 272        struct mm_iommu_table_group_mem_t *mem;
 273
 274        if (!pua)
 275                /* it_userspace allocation might be delayed */
 276                return H_TOO_HARD;
 277
 278        mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
 279        if (!mem)
 280                return H_TOO_HARD;
 281
 282        if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
 283                        &hpa)))
 284                return H_HARDWARE;
 285
 286        pua = (void *) vmalloc_to_phys(pua);
 287        if (WARN_ON_ONCE_RM(!pua))
 288                return H_HARDWARE;
 289
 290        if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
 291                return H_CLOSED;
 292
 293        ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
 294        if (ret) {
 295                mm_iommu_mapped_dec(mem);
 296                /*
 297                 * real mode xchg can fail if struct page crosses
 298                 * a page boundary
 299                 */
 300                return H_TOO_HARD;
 301        }
 302
 303        if (dir != DMA_NONE)
 304                kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
 305
 306        *pua = ua;
 307
 308        return 0;
 309}
 310
 311static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
 312                struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
 313                unsigned long entry, unsigned long ua,
 314                enum dma_data_direction dir)
 315{
 316        unsigned long i, pgoff, ret = H_SUCCESS;
 317        unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
 318        unsigned long io_entry = entry * subpages;
 319
 320        for (i = 0, pgoff = 0; i < subpages;
 321                        ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
 322
 323                ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
 324                                io_entry + i, ua + pgoff, dir);
 325                if (ret != H_SUCCESS)
 326                        break;
 327        }
 328
 329        return ret;
 330}
 331
 332long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 333                unsigned long ioba, unsigned long tce)
 334{
 335        struct kvmppc_spapr_tce_table *stt;
 336        long ret;
 337        struct kvmppc_spapr_tce_iommu_table *stit;
 338        unsigned long entry, ua = 0;
 339        enum dma_data_direction dir;
 340
 341        /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
 342        /*          liobn, ioba, tce); */
 343
 344        /* For radix, we might be in virtual mode, so punt */
 345        if (kvm_is_radix(vcpu->kvm))
 346                return H_TOO_HARD;
 347
 348        stt = kvmppc_find_table(vcpu->kvm, liobn);
 349        if (!stt)
 350                return H_TOO_HARD;
 351
 352        ret = kvmppc_ioba_validate(stt, ioba, 1);
 353        if (ret != H_SUCCESS)
 354                return ret;
 355
 356        ret = kvmppc_tce_validate(stt, tce);
 357        if (ret != H_SUCCESS)
 358                return ret;
 359
 360        dir = iommu_tce_direction(tce);
 361        if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
 362                        tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
 363                return H_PARAMETER;
 364
 365        entry = ioba >> stt->page_shift;
 366
 367        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 368                if (dir == DMA_NONE)
 369                        ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
 370                                        stit->tbl, entry);
 371                else
 372                        ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
 373                                        stit->tbl, entry, ua, dir);
 374
 375                if (ret == H_SUCCESS)
 376                        continue;
 377
 378                if (ret == H_TOO_HARD)
 379                        return ret;
 380
 381                WARN_ON_ONCE_RM(1);
 382                kvmppc_rm_clear_tce(stit->tbl, entry);
 383        }
 384
 385        kvmppc_tce_put(stt, entry, tce);
 386
 387        return H_SUCCESS;
 388}
 389
 390static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
 391                unsigned long ua, unsigned long *phpa)
 392{
 393        pte_t *ptep, pte;
 394        unsigned shift = 0;
 395
 396        /*
 397         * Called in real mode with MSR_EE = 0. We are safe here.
 398         * It is ok to do the lookup with arch.pgdir here, because
 399         * we are doing this on secondary cpus and current task there
 400         * is not the hypervisor. Also this is safe against THP in the
 401         * host, because an IPI to primary thread will wait for the secondary
 402         * to exit which will agains result in the below page table walk
 403         * to finish.
 404         */
 405        ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
 406        if (!ptep || !pte_present(*ptep))
 407                return -ENXIO;
 408        pte = *ptep;
 409
 410        if (!shift)
 411                shift = PAGE_SHIFT;
 412
 413        /* Avoid handling anything potentially complicated in realmode */
 414        if (shift > PAGE_SHIFT)
 415                return -EAGAIN;
 416
 417        if (!pte_young(pte))
 418                return -EAGAIN;
 419
 420        *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
 421                        (ua & ~PAGE_MASK);
 422
 423        return 0;
 424}
 425
 426long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 427                unsigned long liobn, unsigned long ioba,
 428                unsigned long tce_list, unsigned long npages)
 429{
 430        struct kvmppc_spapr_tce_table *stt;
 431        long i, ret = H_SUCCESS;
 432        unsigned long tces, entry, ua = 0;
 433        unsigned long *rmap = NULL;
 434        bool prereg = false;
 435        struct kvmppc_spapr_tce_iommu_table *stit;
 436
 437        /* For radix, we might be in virtual mode, so punt */
 438        if (kvm_is_radix(vcpu->kvm))
 439                return H_TOO_HARD;
 440
 441        stt = kvmppc_find_table(vcpu->kvm, liobn);
 442        if (!stt)
 443                return H_TOO_HARD;
 444
 445        entry = ioba >> stt->page_shift;
 446        /*
 447         * The spec says that the maximum size of the list is 512 TCEs
 448         * so the whole table addressed resides in 4K page
 449         */
 450        if (npages > 512)
 451                return H_PARAMETER;
 452
 453        if (tce_list & (SZ_4K - 1))
 454                return H_PARAMETER;
 455
 456        ret = kvmppc_ioba_validate(stt, ioba, npages);
 457        if (ret != H_SUCCESS)
 458                return ret;
 459
 460        if (mm_iommu_preregistered(vcpu->kvm->mm)) {
 461                /*
 462                 * We get here if guest memory was pre-registered which
 463                 * is normally VFIO case and gpa->hpa translation does not
 464                 * depend on hpt.
 465                 */
 466                struct mm_iommu_table_group_mem_t *mem;
 467
 468                if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
 469                        return H_TOO_HARD;
 470
 471                mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
 472                if (mem)
 473                        prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
 474                                        IOMMU_PAGE_SHIFT_4K, &tces) == 0;
 475        }
 476
 477        if (!prereg) {
 478                /*
 479                 * This is usually a case of a guest with emulated devices only
 480                 * when TCE list is not in preregistered memory.
 481                 * We do not require memory to be preregistered in this case
 482                 * so lock rmap and do __find_linux_pte_or_hugepte().
 483                 */
 484                if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
 485                        return H_TOO_HARD;
 486
 487                rmap = (void *) vmalloc_to_phys(rmap);
 488                if (WARN_ON_ONCE_RM(!rmap))
 489                        return H_HARDWARE;
 490
 491                /*
 492                 * Synchronize with the MMU notifier callbacks in
 493                 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
 494                 * While we have the rmap lock, code running on other CPUs
 495                 * cannot finish unmapping the host real page that backs
 496                 * this guest real page, so we are OK to access the host
 497                 * real page.
 498                 */
 499                lock_rmap(rmap);
 500                if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
 501                        ret = H_TOO_HARD;
 502                        goto unlock_exit;
 503                }
 504        }
 505
 506        for (i = 0; i < npages; ++i) {
 507                unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
 508
 509                ret = kvmppc_tce_validate(stt, tce);
 510                if (ret != H_SUCCESS)
 511                        goto unlock_exit;
 512
 513                ua = 0;
 514                if (kvmppc_gpa_to_ua(vcpu->kvm,
 515                                tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
 516                                &ua, NULL))
 517                        return H_PARAMETER;
 518
 519                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 520                        ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
 521                                        stit->tbl, entry + i, ua,
 522                                        iommu_tce_direction(tce));
 523
 524                        if (ret == H_SUCCESS)
 525                                continue;
 526
 527                        if (ret == H_TOO_HARD)
 528                                goto unlock_exit;
 529
 530                        WARN_ON_ONCE_RM(1);
 531                        kvmppc_rm_clear_tce(stit->tbl, entry);
 532                }
 533
 534                kvmppc_tce_put(stt, entry + i, tce);
 535        }
 536
 537unlock_exit:
 538        if (rmap)
 539                unlock_rmap(rmap);
 540
 541        return ret;
 542}
 543
 544long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
 545                unsigned long liobn, unsigned long ioba,
 546                unsigned long tce_value, unsigned long npages)
 547{
 548        struct kvmppc_spapr_tce_table *stt;
 549        long i, ret;
 550        struct kvmppc_spapr_tce_iommu_table *stit;
 551
 552        /* For radix, we might be in virtual mode, so punt */
 553        if (kvm_is_radix(vcpu->kvm))
 554                return H_TOO_HARD;
 555
 556        stt = kvmppc_find_table(vcpu->kvm, liobn);
 557        if (!stt)
 558                return H_TOO_HARD;
 559
 560        ret = kvmppc_ioba_validate(stt, ioba, npages);
 561        if (ret != H_SUCCESS)
 562                return ret;
 563
 564        /* Check permission bits only to allow userspace poison TCE for debug */
 565        if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
 566                return H_PARAMETER;
 567
 568        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
 569                unsigned long entry = ioba >> stt->page_shift;
 570
 571                for (i = 0; i < npages; ++i) {
 572                        ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
 573                                        stit->tbl, entry + i);
 574
 575                        if (ret == H_SUCCESS)
 576                                continue;
 577
 578                        if (ret == H_TOO_HARD)
 579                                return ret;
 580
 581                        WARN_ON_ONCE_RM(1);
 582                        kvmppc_rm_clear_tce(stit->tbl, entry);
 583                }
 584        }
 585
 586        for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
 587                kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
 588
 589        return H_SUCCESS;
 590}
 591
 592/* This can be called in either virtual mode or real mode */
 593long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 594                      unsigned long ioba)
 595{
 596        struct kvmppc_spapr_tce_table *stt;
 597        long ret;
 598        unsigned long idx;
 599        struct page *page;
 600        u64 *tbl;
 601
 602        stt = kvmppc_find_table(vcpu->kvm, liobn);
 603        if (!stt)
 604                return H_TOO_HARD;
 605
 606        ret = kvmppc_ioba_validate(stt, ioba, 1);
 607        if (ret != H_SUCCESS)
 608                return ret;
 609
 610        idx = (ioba >> stt->page_shift) - stt->offset;
 611        page = stt->pages[idx / TCES_PER_PAGE];
 612        tbl = (u64 *)page_address(page);
 613
 614        vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
 615
 616        return H_SUCCESS;
 617}
 618EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
 619
 620#endif /* KVM_BOOK3S_HV_POSSIBLE */
 621