linux/drivers/s390/cio/vfio_ccw_cp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * channel program interfaces
   4 *
   5 * Copyright IBM Corp. 2017
   6 *
   7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   8 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
   9 */
  10
  11#include <linux/ratelimit.h>
  12#include <linux/mm.h>
  13#include <linux/slab.h>
  14#include <linux/iommu.h>
  15#include <linux/vfio.h>
  16#include <asm/idals.h>
  17
  18#include "vfio_ccw_cp.h"
  19
  20struct pfn_array {
  21        /* Starting guest physical I/O address. */
  22        unsigned long           pa_iova;
  23        /* Array that stores PFNs of the pages need to pin. */
  24        unsigned long           *pa_iova_pfn;
  25        /* Array that receives PFNs of the pages pinned. */
  26        unsigned long           *pa_pfn;
  27        /* Number of pages pinned from @pa_iova. */
  28        int                     pa_nr;
  29};
  30
  31struct ccwchain {
  32        struct list_head        next;
  33        struct ccw1             *ch_ccw;
  34        /* Guest physical address of the current chain. */
  35        u64                     ch_iova;
  36        /* Count of the valid ccws in chain. */
  37        int                     ch_len;
  38        /* Pinned PAGEs for the original data. */
  39        struct pfn_array        *ch_pa;
  40};
  41
  42/*
  43 * pfn_array_alloc() - alloc memory for PFNs
  44 * @pa: pfn_array on which to perform the operation
  45 * @iova: target guest physical address
  46 * @len: number of bytes that should be pinned from @iova
  47 *
  48 * Attempt to allocate memory for PFNs.
  49 *
  50 * Usage of pfn_array:
  51 * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
  52 * this structure will be filled in by this function.
  53 *
  54 * Returns:
  55 *         0 if PFNs are allocated
  56 *   -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
  57 *   -ENOMEM if alloc failed
  58 */
  59static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
  60{
  61        int i;
  62
  63        if (pa->pa_nr || pa->pa_iova_pfn)
  64                return -EINVAL;
  65
  66        pa->pa_iova = iova;
  67
  68        pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  69        if (!pa->pa_nr)
  70                return -EINVAL;
  71
  72        pa->pa_iova_pfn = kcalloc(pa->pa_nr,
  73                                  sizeof(*pa->pa_iova_pfn) +
  74                                  sizeof(*pa->pa_pfn),
  75                                  GFP_KERNEL);
  76        if (unlikely(!pa->pa_iova_pfn)) {
  77                pa->pa_nr = 0;
  78                return -ENOMEM;
  79        }
  80        pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
  81
  82        pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
  83        pa->pa_pfn[0] = -1ULL;
  84        for (i = 1; i < pa->pa_nr; i++) {
  85                pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
  86                pa->pa_pfn[i] = -1ULL;
  87        }
  88
  89        return 0;
  90}
  91
  92/*
  93 * pfn_array_pin() - Pin user pages in memory
  94 * @pa: pfn_array on which to perform the operation
  95 * @mdev: the mediated device to perform pin operations
  96 *
  97 * Returns number of pages pinned upon success.
  98 * If the pin request partially succeeds, or fails completely,
  99 * all pages are left unpinned and a negative error value is returned.
 100 */
 101static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
 102{
 103        int ret = 0;
 104
 105        ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
 106                             IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
 107
 108        if (ret < 0) {
 109                goto err_out;
 110        } else if (ret > 0 && ret != pa->pa_nr) {
 111                vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
 112                ret = -EINVAL;
 113                goto err_out;
 114        }
 115
 116        return ret;
 117
 118err_out:
 119        pa->pa_nr = 0;
 120
 121        return ret;
 122}
 123
 124/* Unpin the pages before releasing the memory. */
 125static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
 126{
 127        /* Only unpin if any pages were pinned to begin with */
 128        if (pa->pa_nr)
 129                vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
 130        pa->pa_nr = 0;
 131        kfree(pa->pa_iova_pfn);
 132}
 133
 134static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
 135{
 136        unsigned long iova_pfn = iova >> PAGE_SHIFT;
 137        int i;
 138
 139        for (i = 0; i < pa->pa_nr; i++)
 140                if (pa->pa_iova_pfn[i] == iova_pfn)
 141                        return true;
 142
 143        return false;
 144}
 145/* Create the list of IDAL words for a pfn_array. */
 146static inline void pfn_array_idal_create_words(
 147        struct pfn_array *pa,
 148        unsigned long *idaws)
 149{
 150        int i;
 151
 152        /*
 153         * Idal words (execept the first one) rely on the memory being 4k
 154         * aligned. If a user virtual address is 4K aligned, then it's
 155         * corresponding kernel physical address will also be 4K aligned. Thus
 156         * there will be no problem here to simply use the phys to create an
 157         * idaw.
 158         */
 159
 160        for (i = 0; i < pa->pa_nr; i++)
 161                idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
 162
 163        /* Adjust the first IDAW, since it may not start on a page boundary */
 164        idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
 165}
 166
 167static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
 168{
 169        struct ccw0 ccw0;
 170        struct ccw1 *pccw1 = source;
 171        int i;
 172
 173        for (i = 0; i < len; i++) {
 174                ccw0 = *(struct ccw0 *)pccw1;
 175                if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
 176                        pccw1->cmd_code = CCW_CMD_TIC;
 177                        pccw1->flags = 0;
 178                        pccw1->count = 0;
 179                } else {
 180                        pccw1->cmd_code = ccw0.cmd_code;
 181                        pccw1->flags = ccw0.flags;
 182                        pccw1->count = ccw0.count;
 183                }
 184                pccw1->cda = ccw0.cda;
 185                pccw1++;
 186        }
 187}
 188
 189/*
 190 * Within the domain (@mdev), copy @n bytes from a guest physical
 191 * address (@iova) to a host physical address (@to).
 192 */
 193static long copy_from_iova(struct device *mdev,
 194                           void *to, u64 iova,
 195                           unsigned long n)
 196{
 197        struct pfn_array pa = {0};
 198        u64 from;
 199        int i, ret;
 200        unsigned long l, m;
 201
 202        ret = pfn_array_alloc(&pa, iova, n);
 203        if (ret < 0)
 204                return ret;
 205
 206        ret = pfn_array_pin(&pa, mdev);
 207        if (ret < 0) {
 208                pfn_array_unpin_free(&pa, mdev);
 209                return ret;
 210        }
 211
 212        l = n;
 213        for (i = 0; i < pa.pa_nr; i++) {
 214                from = pa.pa_pfn[i] << PAGE_SHIFT;
 215                m = PAGE_SIZE;
 216                if (i == 0) {
 217                        from += iova & (PAGE_SIZE - 1);
 218                        m -= iova & (PAGE_SIZE - 1);
 219                }
 220
 221                m = min(l, m);
 222                memcpy(to + (n - l), (void *)from, m);
 223
 224                l -= m;
 225                if (l == 0)
 226                        break;
 227        }
 228
 229        pfn_array_unpin_free(&pa, mdev);
 230
 231        return l;
 232}
 233
 234/*
 235 * Helpers to operate ccwchain.
 236 */
 237#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
 238#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
 239#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
 240
 241#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
 242
 243#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
 244
 245#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
 246#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
 247
 248#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
 249
 250/*
 251 * ccw_does_data_transfer()
 252 *
 253 * Determine whether a CCW will move any data, such that the guest pages
 254 * would need to be pinned before performing the I/O.
 255 *
 256 * Returns 1 if yes, 0 if no.
 257 */
 258static inline int ccw_does_data_transfer(struct ccw1 *ccw)
 259{
 260        /* If the count field is zero, then no data will be transferred */
 261        if (ccw->count == 0)
 262                return 0;
 263
 264        /* If the command is a NOP, then no data will be transferred */
 265        if (ccw_is_noop(ccw))
 266                return 0;
 267
 268        /* If the skip flag is off, then data will be transferred */
 269        if (!ccw_is_skip(ccw))
 270                return 1;
 271
 272        /*
 273         * If the skip flag is on, it is only meaningful if the command
 274         * code is a read, read backward, sense, or sense ID.  In those
 275         * cases, no data will be transferred.
 276         */
 277        if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
 278                return 0;
 279
 280        if (ccw_is_sense(ccw))
 281                return 0;
 282
 283        /* The skip flag is on, but it is ignored for this command code. */
 284        return 1;
 285}
 286
 287/*
 288 * is_cpa_within_range()
 289 *
 290 * @cpa: channel program address being questioned
 291 * @head: address of the beginning of a CCW chain
 292 * @len: number of CCWs within the chain
 293 *
 294 * Determine whether the address of a CCW (whether a new chain,
 295 * or the target of a TIC) falls within a range (including the end points).
 296 *
 297 * Returns 1 if yes, 0 if no.
 298 */
 299static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
 300{
 301        u32 tail = head + (len - 1) * sizeof(struct ccw1);
 302
 303        return (head <= cpa && cpa <= tail);
 304}
 305
 306static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
 307{
 308        if (!ccw_is_tic(ccw))
 309                return 0;
 310
 311        return is_cpa_within_range(ccw->cda, head, len);
 312}
 313
 314static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
 315{
 316        struct ccwchain *chain;
 317        void *data;
 318        size_t size;
 319
 320        /* Make ccw address aligned to 8. */
 321        size = ((sizeof(*chain) + 7L) & -8L) +
 322                sizeof(*chain->ch_ccw) * len +
 323                sizeof(*chain->ch_pa) * len;
 324        chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
 325        if (!chain)
 326                return NULL;
 327
 328        data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
 329        chain->ch_ccw = (struct ccw1 *)data;
 330
 331        data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
 332        chain->ch_pa = (struct pfn_array *)data;
 333
 334        chain->ch_len = len;
 335
 336        list_add_tail(&chain->next, &cp->ccwchain_list);
 337
 338        return chain;
 339}
 340
 341static void ccwchain_free(struct ccwchain *chain)
 342{
 343        list_del(&chain->next);
 344        kfree(chain);
 345}
 346
 347/* Free resource for a ccw that allocated memory for its cda. */
 348static void ccwchain_cda_free(struct ccwchain *chain, int idx)
 349{
 350        struct ccw1 *ccw = chain->ch_ccw + idx;
 351
 352        if (ccw_is_tic(ccw))
 353                return;
 354
 355        kfree((void *)(u64)ccw->cda);
 356}
 357
 358/**
 359 * ccwchain_calc_length - calculate the length of the ccw chain.
 360 * @iova: guest physical address of the target ccw chain
 361 * @cp: channel_program on which to perform the operation
 362 *
 363 * This is the chain length not considering any TICs.
 364 * You need to do a new round for each TIC target.
 365 *
 366 * The program is also validated for absence of not yet supported
 367 * indirect data addressing scenarios.
 368 *
 369 * Returns: the length of the ccw chain or -errno.
 370 */
 371static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
 372{
 373        struct ccw1 *ccw = cp->guest_cp;
 374        int cnt = 0;
 375
 376        do {
 377                cnt++;
 378
 379                /*
 380                 * As we don't want to fail direct addressing even if the
 381                 * orb specified one of the unsupported formats, we defer
 382                 * checking for IDAWs in unsupported formats to here.
 383                 */
 384                if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
 385                        return -EOPNOTSUPP;
 386
 387                /*
 388                 * We want to keep counting if the current CCW has the
 389                 * command-chaining flag enabled, or if it is a TIC CCW
 390                 * that loops back into the current chain.  The latter
 391                 * is used for device orientation, where the CCW PRIOR to
 392                 * the TIC can either jump to the TIC or a CCW immediately
 393                 * after the TIC, depending on the results of its operation.
 394                 */
 395                if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt))
 396                        break;
 397
 398                ccw++;
 399        } while (cnt < CCWCHAIN_LEN_MAX + 1);
 400
 401        if (cnt == CCWCHAIN_LEN_MAX + 1)
 402                cnt = -EINVAL;
 403
 404        return cnt;
 405}
 406
 407static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
 408{
 409        struct ccwchain *chain;
 410        u32 ccw_head;
 411
 412        list_for_each_entry(chain, &cp->ccwchain_list, next) {
 413                ccw_head = chain->ch_iova;
 414                if (is_cpa_within_range(tic->cda, ccw_head, chain->ch_len))
 415                        return 1;
 416        }
 417
 418        return 0;
 419}
 420
 421static int ccwchain_loop_tic(struct ccwchain *chain,
 422                             struct channel_program *cp);
 423
 424static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
 425{
 426        struct ccwchain *chain;
 427        int len, ret;
 428
 429        /* Copy 2K (the most we support today) of possible CCWs */
 430        len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
 431                             CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
 432        if (len)
 433                return len;
 434
 435        /* Convert any Format-0 CCWs to Format-1 */
 436        if (!cp->orb.cmd.fmt)
 437                convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
 438
 439        /* Count the CCWs in the current chain */
 440        len = ccwchain_calc_length(cda, cp);
 441        if (len < 0)
 442                return len;
 443
 444        /* Need alloc a new chain for this one. */
 445        chain = ccwchain_alloc(cp, len);
 446        if (!chain)
 447                return -ENOMEM;
 448        chain->ch_iova = cda;
 449
 450        /* Copy the actual CCWs into the new chain */
 451        memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
 452
 453        /* Loop for tics on this new chain. */
 454        ret = ccwchain_loop_tic(chain, cp);
 455
 456        if (ret)
 457                ccwchain_free(chain);
 458
 459        return ret;
 460}
 461
 462/* Loop for TICs. */
 463static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
 464{
 465        struct ccw1 *tic;
 466        int i, ret;
 467
 468        for (i = 0; i < chain->ch_len; i++) {
 469                tic = chain->ch_ccw + i;
 470
 471                if (!ccw_is_tic(tic))
 472                        continue;
 473
 474                /* May transfer to an existing chain. */
 475                if (tic_target_chain_exists(tic, cp))
 476                        continue;
 477
 478                /* Build a ccwchain for the next segment */
 479                ret = ccwchain_handle_ccw(tic->cda, cp);
 480                if (ret)
 481                        return ret;
 482        }
 483
 484        return 0;
 485}
 486
 487static int ccwchain_fetch_tic(struct ccwchain *chain,
 488                              int idx,
 489                              struct channel_program *cp)
 490{
 491        struct ccw1 *ccw = chain->ch_ccw + idx;
 492        struct ccwchain *iter;
 493        u32 ccw_head;
 494
 495        list_for_each_entry(iter, &cp->ccwchain_list, next) {
 496                ccw_head = iter->ch_iova;
 497                if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
 498                        ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
 499                                                     (ccw->cda - ccw_head));
 500                        return 0;
 501                }
 502        }
 503
 504        return -EFAULT;
 505}
 506
 507static int ccwchain_fetch_direct(struct ccwchain *chain,
 508                                 int idx,
 509                                 struct channel_program *cp)
 510{
 511        struct ccw1 *ccw;
 512        struct pfn_array *pa;
 513        u64 iova;
 514        unsigned long *idaws;
 515        int ret;
 516        int bytes = 1;
 517        int idaw_nr, idal_len;
 518        int i;
 519
 520        ccw = chain->ch_ccw + idx;
 521
 522        if (ccw->count)
 523                bytes = ccw->count;
 524
 525        /* Calculate size of IDAL */
 526        if (ccw_is_idal(ccw)) {
 527                /* Read first IDAW to see if it's 4K-aligned or not. */
 528                /* All subsequent IDAws will be 4K-aligned. */
 529                ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
 530                if (ret)
 531                        return ret;
 532        } else {
 533                iova = ccw->cda;
 534        }
 535        idaw_nr = idal_nr_words((void *)iova, bytes);
 536        idal_len = idaw_nr * sizeof(*idaws);
 537
 538        /* Allocate an IDAL from host storage */
 539        idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
 540        if (!idaws) {
 541                ret = -ENOMEM;
 542                goto out_init;
 543        }
 544
 545        /*
 546         * Allocate an array of pfn's for pages to pin/translate.
 547         * The number of pages is actually the count of the idaws
 548         * required for the data transfer, since we only only support
 549         * 4K IDAWs today.
 550         */
 551        pa = chain->ch_pa + idx;
 552        ret = pfn_array_alloc(pa, iova, bytes);
 553        if (ret < 0)
 554                goto out_free_idaws;
 555
 556        if (ccw_is_idal(ccw)) {
 557                /* Copy guest IDAL into host IDAL */
 558                ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
 559                if (ret)
 560                        goto out_unpin;
 561
 562                /*
 563                 * Copy guest IDAWs into pfn_array, in case the memory they
 564                 * occupy is not contiguous.
 565                 */
 566                for (i = 0; i < idaw_nr; i++)
 567                        pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
 568        } else {
 569                /*
 570                 * No action is required here; the iova addresses in pfn_array
 571                 * were initialized sequentially in pfn_array_alloc() beginning
 572                 * with the contents of ccw->cda.
 573                 */
 574        }
 575
 576        if (ccw_does_data_transfer(ccw)) {
 577                ret = pfn_array_pin(pa, cp->mdev);
 578                if (ret < 0)
 579                        goto out_unpin;
 580        } else {
 581                pa->pa_nr = 0;
 582        }
 583
 584        ccw->cda = (__u32) virt_to_phys(idaws);
 585        ccw->flags |= CCW_FLAG_IDA;
 586
 587        /* Populate the IDAL with pinned/translated addresses from pfn */
 588        pfn_array_idal_create_words(pa, idaws);
 589
 590        return 0;
 591
 592out_unpin:
 593        pfn_array_unpin_free(pa, cp->mdev);
 594out_free_idaws:
 595        kfree(idaws);
 596out_init:
 597        ccw->cda = 0;
 598        return ret;
 599}
 600
 601/*
 602 * Fetch one ccw.
 603 * To reduce memory copy, we'll pin the cda page in memory,
 604 * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
 605 * direct ccws to idal ccws.
 606 */
 607static int ccwchain_fetch_one(struct ccwchain *chain,
 608                              int idx,
 609                              struct channel_program *cp)
 610{
 611        struct ccw1 *ccw = chain->ch_ccw + idx;
 612
 613        if (ccw_is_tic(ccw))
 614                return ccwchain_fetch_tic(chain, idx, cp);
 615
 616        return ccwchain_fetch_direct(chain, idx, cp);
 617}
 618
 619/**
 620 * cp_init() - allocate ccwchains for a channel program.
 621 * @cp: channel_program on which to perform the operation
 622 * @mdev: the mediated device to perform pin/unpin operations
 623 * @orb: control block for the channel program from the guest
 624 *
 625 * This creates one or more ccwchain(s), and copies the raw data of
 626 * the target channel program from @orb->cmd.iova to the new ccwchain(s).
 627 *
 628 * Limitations:
 629 * 1. Supports idal(c64) ccw chaining.
 630 * 2. Supports 4k idaw.
 631 *
 632 * Returns:
 633 *   %0 on success and a negative error value on failure.
 634 */
 635int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
 636{
 637        /* custom ratelimit used to avoid flood during guest IPL */
 638        static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
 639        int ret;
 640
 641        /* this is an error in the caller */
 642        if (cp->initialized)
 643                return -EBUSY;
 644
 645        /*
 646         * We only support prefetching the channel program. We assume all channel
 647         * programs executed by supported guests likewise support prefetching.
 648         * Executing a channel program that does not specify prefetching will
 649         * typically not cause an error, but a warning is issued to help identify
 650         * the problem if something does break.
 651         */
 652        if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
 653                dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB");
 654
 655        INIT_LIST_HEAD(&cp->ccwchain_list);
 656        memcpy(&cp->orb, orb, sizeof(*orb));
 657        cp->mdev = mdev;
 658
 659        /* Build a ccwchain for the first CCW segment */
 660        ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
 661
 662        if (!ret) {
 663                cp->initialized = true;
 664
 665                /* It is safe to force: if it was not set but idals used
 666                 * ccwchain_calc_length would have returned an error.
 667                 */
 668                cp->orb.cmd.c64 = 1;
 669        }
 670
 671        return ret;
 672}
 673
 674
 675/**
 676 * cp_free() - free resources for channel program.
 677 * @cp: channel_program on which to perform the operation
 678 *
 679 * This unpins the memory pages and frees the memory space occupied by
 680 * @cp, which must have been returned by a previous call to cp_init().
 681 * Otherwise, undefined behavior occurs.
 682 */
 683void cp_free(struct channel_program *cp)
 684{
 685        struct ccwchain *chain, *temp;
 686        int i;
 687
 688        if (!cp->initialized)
 689                return;
 690
 691        cp->initialized = false;
 692        list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
 693                for (i = 0; i < chain->ch_len; i++) {
 694                        pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
 695                        ccwchain_cda_free(chain, i);
 696                }
 697                ccwchain_free(chain);
 698        }
 699}
 700
 701/**
 702 * cp_prefetch() - translate a guest physical address channel program to
 703 *                 a real-device runnable channel program.
 704 * @cp: channel_program on which to perform the operation
 705 *
 706 * This function translates the guest-physical-address channel program
 707 * and stores the result to ccwchain list. @cp must have been
 708 * initialized by a previous call with cp_init(). Otherwise, undefined
 709 * behavior occurs.
 710 * For each chain composing the channel program:
 711 * - On entry ch_len holds the count of CCWs to be translated.
 712 * - On exit ch_len is adjusted to the count of successfully translated CCWs.
 713 * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
 714 *
 715 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
 716 * as helpers to do ccw chain translation inside the kernel. Basically
 717 * they accept a channel program issued by a virtual machine, and
 718 * translate the channel program to a real-device runnable channel
 719 * program.
 720 *
 721 * These APIs will copy the ccws into kernel-space buffers, and update
 722 * the guest phsical addresses with their corresponding host physical
 723 * addresses.  Then channel I/O device drivers could issue the
 724 * translated channel program to real devices to perform an I/O
 725 * operation.
 726 *
 727 * These interfaces are designed to support translation only for
 728 * channel programs, which are generated and formatted by a
 729 * guest. Thus this will make it possible for things like VFIO to
 730 * leverage the interfaces to passthrough a channel I/O mediated
 731 * device in QEMU.
 732 *
 733 * We support direct ccw chaining by translating them to idal ccws.
 734 *
 735 * Returns:
 736 *   %0 on success and a negative error value on failure.
 737 */
 738int cp_prefetch(struct channel_program *cp)
 739{
 740        struct ccwchain *chain;
 741        int len, idx, ret;
 742
 743        /* this is an error in the caller */
 744        if (!cp->initialized)
 745                return -EINVAL;
 746
 747        list_for_each_entry(chain, &cp->ccwchain_list, next) {
 748                len = chain->ch_len;
 749                for (idx = 0; idx < len; idx++) {
 750                        ret = ccwchain_fetch_one(chain, idx, cp);
 751                        if (ret)
 752                                goto out_err;
 753                }
 754        }
 755
 756        return 0;
 757out_err:
 758        /* Only cleanup the chain elements that were actually translated. */
 759        chain->ch_len = idx;
 760        list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
 761                chain->ch_len = 0;
 762        }
 763        return ret;
 764}
 765
 766/**
 767 * cp_get_orb() - get the orb of the channel program
 768 * @cp: channel_program on which to perform the operation
 769 * @intparm: new intparm for the returned orb
 770 * @lpm: candidate value of the logical-path mask for the returned orb
 771 *
 772 * This function returns the address of the updated orb of the channel
 773 * program. Channel I/O device drivers could use this orb to issue a
 774 * ssch.
 775 */
 776union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
 777{
 778        union orb *orb;
 779        struct ccwchain *chain;
 780        struct ccw1 *cpa;
 781
 782        /* this is an error in the caller */
 783        if (!cp->initialized)
 784                return NULL;
 785
 786        orb = &cp->orb;
 787
 788        orb->cmd.intparm = intparm;
 789        orb->cmd.fmt = 1;
 790        orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
 791
 792        if (orb->cmd.lpm == 0)
 793                orb->cmd.lpm = lpm;
 794
 795        chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
 796        cpa = chain->ch_ccw;
 797        orb->cmd.cpa = (__u32) __pa(cpa);
 798
 799        return orb;
 800}
 801
 802/**
 803 * cp_update_scsw() - update scsw for a channel program.
 804 * @cp: channel_program on which to perform the operation
 805 * @scsw: I/O results of the channel program and also the target to be
 806 *        updated
 807 *
 808 * @scsw contains the I/O results of the channel program that pointed
 809 * to by @cp. However what @scsw->cpa stores is a host physical
 810 * address, which is meaningless for the guest, which is waiting for
 811 * the I/O results.
 812 *
 813 * This function updates @scsw->cpa to its coressponding guest physical
 814 * address.
 815 */
 816void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
 817{
 818        struct ccwchain *chain;
 819        u32 cpa = scsw->cmd.cpa;
 820        u32 ccw_head;
 821
 822        if (!cp->initialized)
 823                return;
 824
 825        /*
 826         * LATER:
 827         * For now, only update the cmd.cpa part. We may need to deal with
 828         * other portions of the schib as well, even if we don't return them
 829         * in the ioctl directly. Path status changes etc.
 830         */
 831        list_for_each_entry(chain, &cp->ccwchain_list, next) {
 832                ccw_head = (u32)(u64)chain->ch_ccw;
 833                /*
 834                 * On successful execution, cpa points just beyond the end
 835                 * of the chain.
 836                 */
 837                if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
 838                        /*
 839                         * (cpa - ccw_head) is the offset value of the host
 840                         * physical ccw to its chain head.
 841                         * Adding this value to the guest physical ccw chain
 842                         * head gets us the guest cpa.
 843                         */
 844                        cpa = chain->ch_iova + (cpa - ccw_head);
 845                        break;
 846                }
 847        }
 848
 849        scsw->cmd.cpa = cpa;
 850}
 851
 852/**
 853 * cp_iova_pinned() - check if an iova is pinned for a ccw chain.
 854 * @cp: channel_program on which to perform the operation
 855 * @iova: the iova to check
 856 *
 857 * If the @iova is currently pinned for the ccw chain, return true;
 858 * else return false.
 859 */
 860bool cp_iova_pinned(struct channel_program *cp, u64 iova)
 861{
 862        struct ccwchain *chain;
 863        int i;
 864
 865        if (!cp->initialized)
 866                return false;
 867
 868        list_for_each_entry(chain, &cp->ccwchain_list, next) {
 869                for (i = 0; i < chain->ch_len; i++)
 870                        if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
 871                                return true;
 872        }
 873
 874        return false;
 875}
 876