linux/drivers/infiniband/hw/ehca/ehca_mrmw.c
<<
>>
Prefs
   1/*
   2 *  IBM eServer eHCA Infiniband device driver for Linux on POWER
   3 *
   4 *  MR/MW functions
   5 *
   6 *  Authors: Dietmar Decker <ddecker@de.ibm.com>
   7 *           Christoph Raisch <raisch@de.ibm.com>
   8 *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
   9 *
  10 *  Copyright (c) 2005 IBM Corporation
  11 *
  12 *  All rights reserved.
  13 *
  14 *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15 *  BSD.
  16 *
  17 * OpenIB BSD License
  18 *
  19 * Redistribution and use in source and binary forms, with or without
  20 * modification, are permitted provided that the following conditions are met:
  21 *
  22 * Redistributions of source code must retain the above copyright notice, this
  23 * list of conditions and the following disclaimer.
  24 *
  25 * Redistributions in binary form must reproduce the above copyright notice,
  26 * this list of conditions and the following disclaimer in the documentation
  27 * and/or other materials
  28 * provided with the distribution.
  29 *
  30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40 * POSSIBILITY OF SUCH DAMAGE.
  41 */
  42
  43#include <linux/slab.h>
  44#include <rdma/ib_umem.h>
  45
  46#include "ehca_iverbs.h"
  47#include "ehca_mrmw.h"
  48#include "hcp_if.h"
  49#include "hipz_hw.h"
  50
  51#define NUM_CHUNKS(length, chunk_size) \
  52        (((length) + (chunk_size - 1)) / (chunk_size))
  53
  54/* max number of rpages (per hcall register_rpages) */
  55#define MAX_RPAGES 512
  56
  57/* DMEM toleration management */
  58#define EHCA_SECTSHIFT        SECTION_SIZE_BITS
  59#define EHCA_SECTSIZE          (1UL << EHCA_SECTSHIFT)
  60#define EHCA_HUGEPAGESHIFT     34
  61#define EHCA_HUGEPAGE_SIZE     (1UL << EHCA_HUGEPAGESHIFT)
  62#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
  63#define EHCA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
  64#define EHCA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
  65#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
  66#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
  67#define EHCA_TOP_MAP_SIZE (0x10000)               /* currently fixed map size */
  68#define EHCA_DIR_MAP_SIZE (0x10000)
  69#define EHCA_ENT_MAP_SIZE (0x10000)
  70#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
  71
  72static unsigned long ehca_mr_len;
  73
  74/*
  75 * Memory map data structures
  76 */
  77struct ehca_dir_bmap {
  78        u64 ent[EHCA_MAP_ENTRIES];
  79};
  80struct ehca_top_bmap {
  81        struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
  82};
  83struct ehca_bmap {
  84        struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
  85};
  86
  87static struct ehca_bmap *ehca_bmap;
  88
  89static struct kmem_cache *mr_cache;
  90static struct kmem_cache *mw_cache;
  91
  92enum ehca_mr_pgsize {
  93        EHCA_MR_PGSIZE4K  = 0x1000L,
  94        EHCA_MR_PGSIZE64K = 0x10000L,
  95        EHCA_MR_PGSIZE1M  = 0x100000L,
  96        EHCA_MR_PGSIZE16M = 0x1000000L
  97};
  98
  99#define EHCA_MR_PGSHIFT4K  12
 100#define EHCA_MR_PGSHIFT64K 16
 101#define EHCA_MR_PGSHIFT1M  20
 102#define EHCA_MR_PGSHIFT16M 24
 103
 104static u64 ehca_map_vaddr(void *caddr);
 105
 106static u32 ehca_encode_hwpage_size(u32 pgsize)
 107{
 108        int log = ilog2(pgsize);
 109        WARN_ON(log < 12 || log > 24 || log & 3);
 110        return (log - 12) / 4;
 111}
 112
 113static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
 114{
 115        return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
 116}
 117
 118static struct ehca_mr *ehca_mr_new(void)
 119{
 120        struct ehca_mr *me;
 121
 122        me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
 123        if (me)
 124                spin_lock_init(&me->mrlock);
 125        else
 126                ehca_gen_err("alloc failed");
 127
 128        return me;
 129}
 130
 131static void ehca_mr_delete(struct ehca_mr *me)
 132{
 133        kmem_cache_free(mr_cache, me);
 134}
 135
 136static struct ehca_mw *ehca_mw_new(void)
 137{
 138        struct ehca_mw *me;
 139
 140        me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
 141        if (me)
 142                spin_lock_init(&me->mwlock);
 143        else
 144                ehca_gen_err("alloc failed");
 145
 146        return me;
 147}
 148
 149static void ehca_mw_delete(struct ehca_mw *me)
 150{
 151        kmem_cache_free(mw_cache, me);
 152}
 153
 154/*----------------------------------------------------------------------*/
 155
 156struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
 157{
 158        struct ib_mr *ib_mr;
 159        int ret;
 160        struct ehca_mr *e_maxmr;
 161        struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
 162        struct ehca_shca *shca =
 163                container_of(pd->device, struct ehca_shca, ib_device);
 164
 165        if (shca->maxmr) {
 166                e_maxmr = ehca_mr_new();
 167                if (!e_maxmr) {
 168                        ehca_err(&shca->ib_device, "out of memory");
 169                        ib_mr = ERR_PTR(-ENOMEM);
 170                        goto get_dma_mr_exit0;
 171                }
 172
 173                ret = ehca_reg_maxmr(shca, e_maxmr,
 174                                     (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
 175                                     mr_access_flags, e_pd,
 176                                     &e_maxmr->ib.ib_mr.lkey,
 177                                     &e_maxmr->ib.ib_mr.rkey);
 178                if (ret) {
 179                        ehca_mr_delete(e_maxmr);
 180                        ib_mr = ERR_PTR(ret);
 181                        goto get_dma_mr_exit0;
 182                }
 183                ib_mr = &e_maxmr->ib.ib_mr;
 184        } else {
 185                ehca_err(&shca->ib_device, "no internal max-MR exist!");
 186                ib_mr = ERR_PTR(-EINVAL);
 187                goto get_dma_mr_exit0;
 188        }
 189
 190get_dma_mr_exit0:
 191        if (IS_ERR(ib_mr))
 192                ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
 193                         PTR_ERR(ib_mr), pd, mr_access_flags);
 194        return ib_mr;
 195} /* end ehca_get_dma_mr() */
 196
 197/*----------------------------------------------------------------------*/
 198
 199struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 200                               u64 virt, int mr_access_flags,
 201                               struct ib_udata *udata)
 202{
 203        struct ib_mr *ib_mr;
 204        struct ehca_mr *e_mr;
 205        struct ehca_shca *shca =
 206                container_of(pd->device, struct ehca_shca, ib_device);
 207        struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
 208        struct ehca_mr_pginfo pginfo;
 209        int ret, page_shift;
 210        u32 num_kpages;
 211        u32 num_hwpages;
 212        u64 hwpage_size;
 213
 214        if (!pd) {
 215                ehca_gen_err("bad pd=%p", pd);
 216                return ERR_PTR(-EFAULT);
 217        }
 218
 219        if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
 220             !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
 221            ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
 222             !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
 223                /*
 224                 * Remote Write Access requires Local Write Access
 225                 * Remote Atomic Access requires Local Write Access
 226                 */
 227                ehca_err(pd->device, "bad input values: mr_access_flags=%x",
 228                         mr_access_flags);
 229                ib_mr = ERR_PTR(-EINVAL);
 230                goto reg_user_mr_exit0;
 231        }
 232
 233        if (length == 0 || virt + length < virt) {
 234                ehca_err(pd->device, "bad input values: length=%llx "
 235                         "virt_base=%llx", length, virt);
 236                ib_mr = ERR_PTR(-EINVAL);
 237                goto reg_user_mr_exit0;
 238        }
 239
 240        e_mr = ehca_mr_new();
 241        if (!e_mr) {
 242                ehca_err(pd->device, "out of memory");
 243                ib_mr = ERR_PTR(-ENOMEM);
 244                goto reg_user_mr_exit0;
 245        }
 246
 247        e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
 248                                 mr_access_flags, 0);
 249        if (IS_ERR(e_mr->umem)) {
 250                ib_mr = (void *)e_mr->umem;
 251                goto reg_user_mr_exit1;
 252        }
 253
 254        if (e_mr->umem->page_shift != PAGE_SHIFT) {
 255                ehca_err(pd->device, "page size not supported, "
 256                         "e_mr->umem->page_shift=%x", e_mr->umem->page_shift);
 257                ib_mr = ERR_PTR(-EINVAL);
 258                goto reg_user_mr_exit2;
 259        }
 260
 261        /* determine number of MR pages */
 262        num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
 263        /* select proper hw_pgsize */
 264        page_shift = PAGE_SHIFT;
 265        if (e_mr->umem->hugetlb) {
 266                /* determine page_shift, clamp between 4K and 16M */
 267                page_shift = (fls64(length - 1) + 3) & ~3;
 268                page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
 269                                 EHCA_MR_PGSHIFT16M);
 270        }
 271        hwpage_size = 1UL << page_shift;
 272
 273        /* now that we have the desired page size, shift until it's
 274         * supported, too. 4K is always supported, so this terminates.
 275         */
 276        while (!(hwpage_size & shca->hca_cap_mr_pgsize))
 277                hwpage_size >>= 4;
 278
 279reg_user_mr_fallback:
 280        num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
 281        /* register MR on HCA */
 282        memset(&pginfo, 0, sizeof(pginfo));
 283        pginfo.type = EHCA_MR_PGI_USER;
 284        pginfo.hwpage_size = hwpage_size;
 285        pginfo.num_kpages = num_kpages;
 286        pginfo.num_hwpages = num_hwpages;
 287        pginfo.u.usr.region = e_mr->umem;
 288        pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
 289        pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
 290        ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
 291                          e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
 292                          &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
 293        if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
 294                ehca_warn(pd->device, "failed to register mr "
 295                          "with hwpage_size=%llx", hwpage_size);
 296                ehca_info(pd->device, "try to register mr with "
 297                          "kpage_size=%lx", PAGE_SIZE);
 298                /*
 299                 * this means kpages are not contiguous for a hw page
 300                 * try kernel page size as fallback solution
 301                 */
 302                hwpage_size = PAGE_SIZE;
 303                goto reg_user_mr_fallback;
 304        }
 305        if (ret) {
 306                ib_mr = ERR_PTR(ret);
 307                goto reg_user_mr_exit2;
 308        }
 309
 310        /* successful registration of all pages */
 311        return &e_mr->ib.ib_mr;
 312
 313reg_user_mr_exit2:
 314        ib_umem_release(e_mr->umem);
 315reg_user_mr_exit1:
 316        ehca_mr_delete(e_mr);
 317reg_user_mr_exit0:
 318        if (IS_ERR(ib_mr))
 319                ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
 320                         PTR_ERR(ib_mr), pd, mr_access_flags, udata);
 321        return ib_mr;
 322} /* end ehca_reg_user_mr() */
 323
 324/*----------------------------------------------------------------------*/
 325
 326int ehca_dereg_mr(struct ib_mr *mr)
 327{
 328        int ret = 0;
 329        u64 h_ret;
 330        struct ehca_shca *shca =
 331                container_of(mr->device, struct ehca_shca, ib_device);
 332        struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
 333
 334        if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
 335                ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
 336                         "e_mr->flags=%x", mr, e_mr, e_mr->flags);
 337                ret = -EINVAL;
 338                goto dereg_mr_exit0;
 339        } else if (e_mr == shca->maxmr) {
 340                /* should be impossible, however reject to be sure */
 341                ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
 342                         "shca->maxmr=%p mr->lkey=%x",
 343                         mr, shca->maxmr, mr->lkey);
 344                ret = -EINVAL;
 345                goto dereg_mr_exit0;
 346        }
 347
 348        /* TODO: BUSY: MR still has bound window(s) */
 349        h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 350        if (h_ret != H_SUCCESS) {
 351                ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
 352                         "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
 353                         h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
 354                         e_mr->ipz_mr_handle.handle, mr->lkey);
 355                ret = ehca2ib_return_code(h_ret);
 356                goto dereg_mr_exit0;
 357        }
 358
 359        if (e_mr->umem)
 360                ib_umem_release(e_mr->umem);
 361
 362        /* successful deregistration */
 363        ehca_mr_delete(e_mr);
 364
 365dereg_mr_exit0:
 366        if (ret)
 367                ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
 368        return ret;
 369} /* end ehca_dereg_mr() */
 370
 371/*----------------------------------------------------------------------*/
 372
 373struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 374                            struct ib_udata *udata)
 375{
 376        struct ib_mw *ib_mw;
 377        u64 h_ret;
 378        struct ehca_mw *e_mw;
 379        struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
 380        struct ehca_shca *shca =
 381                container_of(pd->device, struct ehca_shca, ib_device);
 382        struct ehca_mw_hipzout_parms hipzout;
 383
 384        if (type != IB_MW_TYPE_1)
 385                return ERR_PTR(-EINVAL);
 386
 387        e_mw = ehca_mw_new();
 388        if (!e_mw) {
 389                ib_mw = ERR_PTR(-ENOMEM);
 390                goto alloc_mw_exit0;
 391        }
 392
 393        h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
 394                                         e_pd->fw_pd, &hipzout);
 395        if (h_ret != H_SUCCESS) {
 396                ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
 397                         "shca=%p hca_hndl=%llx mw=%p",
 398                         h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
 399                ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
 400                goto alloc_mw_exit1;
 401        }
 402        /* successful MW allocation */
 403        e_mw->ipz_mw_handle = hipzout.handle;
 404        e_mw->ib_mw.rkey    = hipzout.rkey;
 405        return &e_mw->ib_mw;
 406
 407alloc_mw_exit1:
 408        ehca_mw_delete(e_mw);
 409alloc_mw_exit0:
 410        if (IS_ERR(ib_mw))
 411                ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
 412        return ib_mw;
 413} /* end ehca_alloc_mw() */
 414
 415/*----------------------------------------------------------------------*/
 416
 417int ehca_dealloc_mw(struct ib_mw *mw)
 418{
 419        u64 h_ret;
 420        struct ehca_shca *shca =
 421                container_of(mw->device, struct ehca_shca, ib_device);
 422        struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
 423
 424        h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
 425        if (h_ret != H_SUCCESS) {
 426                ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
 427                         "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
 428                         h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
 429                         e_mw->ipz_mw_handle.handle);
 430                return ehca2ib_return_code(h_ret);
 431        }
 432        /* successful deallocation */
 433        ehca_mw_delete(e_mw);
 434        return 0;
 435} /* end ehca_dealloc_mw() */
 436
 437/*----------------------------------------------------------------------*/
 438
 439struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
 440                              int mr_access_flags,
 441                              struct ib_fmr_attr *fmr_attr)
 442{
 443        struct ib_fmr *ib_fmr;
 444        struct ehca_shca *shca =
 445                container_of(pd->device, struct ehca_shca, ib_device);
 446        struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
 447        struct ehca_mr *e_fmr;
 448        int ret;
 449        u32 tmp_lkey, tmp_rkey;
 450        struct ehca_mr_pginfo pginfo;
 451        u64 hw_pgsize;
 452
 453        /* check other parameters */
 454        if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
 455             !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
 456            ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
 457             !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
 458                /*
 459                 * Remote Write Access requires Local Write Access
 460                 * Remote Atomic Access requires Local Write Access
 461                 */
 462                ehca_err(pd->device, "bad input values: mr_access_flags=%x",
 463                         mr_access_flags);
 464                ib_fmr = ERR_PTR(-EINVAL);
 465                goto alloc_fmr_exit0;
 466        }
 467        if (mr_access_flags & IB_ACCESS_MW_BIND) {
 468                ehca_err(pd->device, "bad input values: mr_access_flags=%x",
 469                         mr_access_flags);
 470                ib_fmr = ERR_PTR(-EINVAL);
 471                goto alloc_fmr_exit0;
 472        }
 473        if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
 474                ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
 475                         "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
 476                         fmr_attr->max_pages, fmr_attr->max_maps,
 477                         fmr_attr->page_shift);
 478                ib_fmr = ERR_PTR(-EINVAL);
 479                goto alloc_fmr_exit0;
 480        }
 481
 482        hw_pgsize = 1 << fmr_attr->page_shift;
 483        if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
 484                ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
 485                         fmr_attr->page_shift);
 486                ib_fmr = ERR_PTR(-EINVAL);
 487                goto alloc_fmr_exit0;
 488        }
 489
 490        e_fmr = ehca_mr_new();
 491        if (!e_fmr) {
 492                ib_fmr = ERR_PTR(-ENOMEM);
 493                goto alloc_fmr_exit0;
 494        }
 495        e_fmr->flags |= EHCA_MR_FLAG_FMR;
 496
 497        /* register MR on HCA */
 498        memset(&pginfo, 0, sizeof(pginfo));
 499        pginfo.hwpage_size = hw_pgsize;
 500        /*
 501         * pginfo.num_hwpages==0, ie register_rpages() will not be called
 502         * but deferred to map_phys_fmr()
 503         */
 504        ret = ehca_reg_mr(shca, e_fmr, NULL,
 505                          fmr_attr->max_pages * (1 << fmr_attr->page_shift),
 506                          mr_access_flags, e_pd, &pginfo,
 507                          &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
 508        if (ret) {
 509                ib_fmr = ERR_PTR(ret);
 510                goto alloc_fmr_exit1;
 511        }
 512
 513        /* successful */
 514        e_fmr->hwpage_size = hw_pgsize;
 515        e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
 516        e_fmr->fmr_max_pages = fmr_attr->max_pages;
 517        e_fmr->fmr_max_maps = fmr_attr->max_maps;
 518        e_fmr->fmr_map_cnt = 0;
 519        return &e_fmr->ib.ib_fmr;
 520
 521alloc_fmr_exit1:
 522        ehca_mr_delete(e_fmr);
 523alloc_fmr_exit0:
 524        return ib_fmr;
 525} /* end ehca_alloc_fmr() */
 526
 527/*----------------------------------------------------------------------*/
 528
 529int ehca_map_phys_fmr(struct ib_fmr *fmr,
 530                      u64 *page_list,
 531                      int list_len,
 532                      u64 iova)
 533{
 534        int ret;
 535        struct ehca_shca *shca =
 536                container_of(fmr->device, struct ehca_shca, ib_device);
 537        struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
 538        struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
 539        struct ehca_mr_pginfo pginfo;
 540        u32 tmp_lkey, tmp_rkey;
 541
 542        if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
 543                ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
 544                         e_fmr, e_fmr->flags);
 545                ret = -EINVAL;
 546                goto map_phys_fmr_exit0;
 547        }
 548        ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
 549        if (ret)
 550                goto map_phys_fmr_exit0;
 551        if (iova % e_fmr->fmr_page_size) {
 552                /* only whole-numbered pages */
 553                ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
 554                         iova, e_fmr->fmr_page_size);
 555                ret = -EINVAL;
 556                goto map_phys_fmr_exit0;
 557        }
 558        if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
 559                /* HCAD does not limit the maps, however trace this anyway */
 560                ehca_info(fmr->device, "map limit exceeded, fmr=%p "
 561                          "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
 562                          fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
 563        }
 564
 565        memset(&pginfo, 0, sizeof(pginfo));
 566        pginfo.type = EHCA_MR_PGI_FMR;
 567        pginfo.num_kpages = list_len;
 568        pginfo.hwpage_size = e_fmr->hwpage_size;
 569        pginfo.num_hwpages =
 570                list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
 571        pginfo.u.fmr.page_list = page_list;
 572        pginfo.next_hwpage =
 573                (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
 574        pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
 575
 576        ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
 577                            list_len * e_fmr->fmr_page_size,
 578                            e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
 579        if (ret)
 580                goto map_phys_fmr_exit0;
 581
 582        /* successful reregistration */
 583        e_fmr->fmr_map_cnt++;
 584        e_fmr->ib.ib_fmr.lkey = tmp_lkey;
 585        e_fmr->ib.ib_fmr.rkey = tmp_rkey;
 586        return 0;
 587
 588map_phys_fmr_exit0:
 589        if (ret)
 590                ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
 591                         "iova=%llx", ret, fmr, page_list, list_len, iova);
 592        return ret;
 593} /* end ehca_map_phys_fmr() */
 594
 595/*----------------------------------------------------------------------*/
 596
 597int ehca_unmap_fmr(struct list_head *fmr_list)
 598{
 599        int ret = 0;
 600        struct ib_fmr *ib_fmr;
 601        struct ehca_shca *shca = NULL;
 602        struct ehca_shca *prev_shca;
 603        struct ehca_mr *e_fmr;
 604        u32 num_fmr = 0;
 605        u32 unmap_fmr_cnt = 0;
 606
 607        /* check all FMR belong to same SHCA, and check internal flag */
 608        list_for_each_entry(ib_fmr, fmr_list, list) {
 609                prev_shca = shca;
 610                shca = container_of(ib_fmr->device, struct ehca_shca,
 611                                    ib_device);
 612                e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
 613                if ((shca != prev_shca) && prev_shca) {
 614                        ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
 615                                 "prev_shca=%p e_fmr=%p",
 616                                 shca, prev_shca, e_fmr);
 617                        ret = -EINVAL;
 618                        goto unmap_fmr_exit0;
 619                }
 620                if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
 621                        ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
 622                                 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
 623                        ret = -EINVAL;
 624                        goto unmap_fmr_exit0;
 625                }
 626                num_fmr++;
 627        }
 628
 629        /* loop over all FMRs to unmap */
 630        list_for_each_entry(ib_fmr, fmr_list, list) {
 631                unmap_fmr_cnt++;
 632                e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
 633                shca = container_of(ib_fmr->device, struct ehca_shca,
 634                                    ib_device);
 635                ret = ehca_unmap_one_fmr(shca, e_fmr);
 636                if (ret) {
 637                        /* unmap failed, stop unmapping of rest of FMRs */
 638                        ehca_err(&shca->ib_device, "unmap of one FMR failed, "
 639                                 "stop rest, e_fmr=%p num_fmr=%x "
 640                                 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
 641                                 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
 642                        goto unmap_fmr_exit0;
 643                }
 644        }
 645
 646unmap_fmr_exit0:
 647        if (ret)
 648                ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
 649                             ret, fmr_list, num_fmr, unmap_fmr_cnt);
 650        return ret;
 651} /* end ehca_unmap_fmr() */
 652
 653/*----------------------------------------------------------------------*/
 654
 655int ehca_dealloc_fmr(struct ib_fmr *fmr)
 656{
 657        int ret;
 658        u64 h_ret;
 659        struct ehca_shca *shca =
 660                container_of(fmr->device, struct ehca_shca, ib_device);
 661        struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
 662
 663        if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
 664                ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
 665                         e_fmr, e_fmr->flags);
 666                ret = -EINVAL;
 667                goto free_fmr_exit0;
 668        }
 669
 670        h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
 671        if (h_ret != H_SUCCESS) {
 672                ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
 673                         "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
 674                         h_ret, e_fmr, shca->ipz_hca_handle.handle,
 675                         e_fmr->ipz_mr_handle.handle, fmr->lkey);
 676                ret = ehca2ib_return_code(h_ret);
 677                goto free_fmr_exit0;
 678        }
 679        /* successful deregistration */
 680        ehca_mr_delete(e_fmr);
 681        return 0;
 682
 683free_fmr_exit0:
 684        if (ret)
 685                ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
 686        return ret;
 687} /* end ehca_dealloc_fmr() */
 688
 689/*----------------------------------------------------------------------*/
 690
 691static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
 692                                   struct ehca_mr *e_mr,
 693                                   struct ehca_mr_pginfo *pginfo);
 694
 695int ehca_reg_mr(struct ehca_shca *shca,
 696                struct ehca_mr *e_mr,
 697                u64 *iova_start,
 698                u64 size,
 699                int acl,
 700                struct ehca_pd *e_pd,
 701                struct ehca_mr_pginfo *pginfo,
 702                u32 *lkey, /*OUT*/
 703                u32 *rkey, /*OUT*/
 704                enum ehca_reg_type reg_type)
 705{
 706        int ret;
 707        u64 h_ret;
 708        u32 hipz_acl;
 709        struct ehca_mr_hipzout_parms hipzout;
 710
 711        ehca_mrmw_map_acl(acl, &hipz_acl);
 712        ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
 713        if (ehca_use_hp_mr == 1)
 714                hipz_acl |= 0x00000001;
 715
 716        h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
 717                                         (u64)iova_start, size, hipz_acl,
 718                                         e_pd->fw_pd, &hipzout);
 719        if (h_ret != H_SUCCESS) {
 720                ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
 721                         "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
 722                ret = ehca2ib_return_code(h_ret);
 723                goto ehca_reg_mr_exit0;
 724        }
 725
 726        e_mr->ipz_mr_handle = hipzout.handle;
 727
 728        if (reg_type == EHCA_REG_BUSMAP_MR)
 729                ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
 730        else if (reg_type == EHCA_REG_MR)
 731                ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
 732        else
 733                ret = -EINVAL;
 734
 735        if (ret)
 736                goto ehca_reg_mr_exit1;
 737
 738        /* successful registration */
 739        e_mr->num_kpages = pginfo->num_kpages;
 740        e_mr->num_hwpages = pginfo->num_hwpages;
 741        e_mr->hwpage_size = pginfo->hwpage_size;
 742        e_mr->start = iova_start;
 743        e_mr->size = size;
 744        e_mr->acl = acl;
 745        *lkey = hipzout.lkey;
 746        *rkey = hipzout.rkey;
 747        return 0;
 748
 749ehca_reg_mr_exit1:
 750        h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 751        if (h_ret != H_SUCCESS) {
 752                ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
 753                         "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
 754                         "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
 755                         h_ret, shca, e_mr, iova_start, size, acl, e_pd,
 756                         hipzout.lkey, pginfo, pginfo->num_kpages,
 757                         pginfo->num_hwpages, ret);
 758                ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
 759                         "not recoverable");
 760        }
 761ehca_reg_mr_exit0:
 762        if (ret)
 763                ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
 764                         "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
 765                         "num_kpages=%llx num_hwpages=%llx",
 766                         ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
 767                         pginfo->num_kpages, pginfo->num_hwpages);
 768        return ret;
 769} /* end ehca_reg_mr() */
 770
 771/*----------------------------------------------------------------------*/
 772
 773int ehca_reg_mr_rpages(struct ehca_shca *shca,
 774                       struct ehca_mr *e_mr,
 775                       struct ehca_mr_pginfo *pginfo)
 776{
 777        int ret = 0;
 778        u64 h_ret;
 779        u32 rnum;
 780        u64 rpage;
 781        u32 i;
 782        u64 *kpage;
 783
 784        if (!pginfo->num_hwpages) /* in case of fmr */
 785                return 0;
 786
 787        kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
 788        if (!kpage) {
 789                ehca_err(&shca->ib_device, "kpage alloc failed");
 790                ret = -ENOMEM;
 791                goto ehca_reg_mr_rpages_exit0;
 792        }
 793
 794        /* max MAX_RPAGES ehca mr pages per register call */
 795        for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
 796
 797                if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
 798                        rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
 799                        if (rnum == 0)
 800                                rnum = MAX_RPAGES;      /* last shot is full */
 801                } else
 802                        rnum = MAX_RPAGES;
 803
 804                ret = ehca_set_pagebuf(pginfo, rnum, kpage);
 805                if (ret) {
 806                        ehca_err(&shca->ib_device, "ehca_set_pagebuf "
 807                                 "bad rc, ret=%i rnum=%x kpage=%p",
 808                                 ret, rnum, kpage);
 809                        goto ehca_reg_mr_rpages_exit1;
 810                }
 811
 812                if (rnum > 1) {
 813                        rpage = __pa(kpage);
 814                        if (!rpage) {
 815                                ehca_err(&shca->ib_device, "kpage=%p i=%x",
 816                                         kpage, i);
 817                                ret = -EFAULT;
 818                                goto ehca_reg_mr_rpages_exit1;
 819                        }
 820                } else
 821                        rpage = *kpage;
 822
 823                h_ret = hipz_h_register_rpage_mr(
 824                        shca->ipz_hca_handle, e_mr,
 825                        ehca_encode_hwpage_size(pginfo->hwpage_size),
 826                        0, rpage, rnum);
 827
 828                if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
 829                        /*
 830                         * check for 'registration complete'==H_SUCCESS
 831                         * and for 'page registered'==H_PAGE_REGISTERED
 832                         */
 833                        if (h_ret != H_SUCCESS) {
 834                                ehca_err(&shca->ib_device, "last "
 835                                         "hipz_reg_rpage_mr failed, h_ret=%lli "
 836                                         "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
 837                                         " lkey=%x", h_ret, e_mr, i,
 838                                         shca->ipz_hca_handle.handle,
 839                                         e_mr->ipz_mr_handle.handle,
 840                                         e_mr->ib.ib_mr.lkey);
 841                                ret = ehca2ib_return_code(h_ret);
 842                                break;
 843                        } else
 844                                ret = 0;
 845                } else if (h_ret != H_PAGE_REGISTERED) {
 846                        ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
 847                                 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
 848                                 "mr_hndl=%llx", h_ret, e_mr, i,
 849                                 e_mr->ib.ib_mr.lkey,
 850                                 shca->ipz_hca_handle.handle,
 851                                 e_mr->ipz_mr_handle.handle);
 852                        ret = ehca2ib_return_code(h_ret);
 853                        break;
 854                } else
 855                        ret = 0;
 856        } /* end for(i) */
 857
 858
 859ehca_reg_mr_rpages_exit1:
 860        ehca_free_fw_ctrlblock(kpage);
 861ehca_reg_mr_rpages_exit0:
 862        if (ret)
 863                ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
 864                         "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
 865                         pginfo, pginfo->num_kpages, pginfo->num_hwpages);
 866        return ret;
 867} /* end ehca_reg_mr_rpages() */
 868
 869/*----------------------------------------------------------------------*/
 870
 871inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
 872                                struct ehca_mr *e_mr,
 873                                u64 *iova_start,
 874                                u64 size,
 875                                u32 acl,
 876                                struct ehca_pd *e_pd,
 877                                struct ehca_mr_pginfo *pginfo,
 878                                u32 *lkey, /*OUT*/
 879                                u32 *rkey) /*OUT*/
 880{
 881        int ret;
 882        u64 h_ret;
 883        u32 hipz_acl;
 884        u64 *kpage;
 885        u64 rpage;
 886        struct ehca_mr_pginfo pginfo_save;
 887        struct ehca_mr_hipzout_parms hipzout;
 888
 889        ehca_mrmw_map_acl(acl, &hipz_acl);
 890        ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
 891
 892        kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
 893        if (!kpage) {
 894                ehca_err(&shca->ib_device, "kpage alloc failed");
 895                ret = -ENOMEM;
 896                goto ehca_rereg_mr_rereg1_exit0;
 897        }
 898
 899        pginfo_save = *pginfo;
 900        ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
 901        if (ret) {
 902                ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
 903                         "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
 904                         "kpage=%p", e_mr, pginfo, pginfo->type,
 905                         pginfo->num_kpages, pginfo->num_hwpages, kpage);
 906                goto ehca_rereg_mr_rereg1_exit1;
 907        }
 908        rpage = __pa(kpage);
 909        if (!rpage) {
 910                ehca_err(&shca->ib_device, "kpage=%p", kpage);
 911                ret = -EFAULT;
 912                goto ehca_rereg_mr_rereg1_exit1;
 913        }
 914        h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
 915                                      (u64)iova_start, size, hipz_acl,
 916                                      e_pd->fw_pd, rpage, &hipzout);
 917        if (h_ret != H_SUCCESS) {
 918                /*
 919                 * reregistration unsuccessful, try it again with the 3 hCalls,
 920                 * e.g. this is required in case H_MR_CONDITION
 921                 * (MW bound or MR is shared)
 922                 */
 923                ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
 924                          "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
 925                *pginfo = pginfo_save;
 926                ret = -EAGAIN;
 927        } else if ((u64 *)hipzout.vaddr != iova_start) {
 928                ehca_err(&shca->ib_device, "PHYP changed iova_start in "
 929                         "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
 930                         "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
 931                         hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
 932                         e_mr->ib.ib_mr.lkey, hipzout.lkey);
 933                ret = -EFAULT;
 934        } else {
 935                /*
 936                 * successful reregistration
 937                 * note: start and start_out are identical for eServer HCAs
 938                 */
 939                e_mr->num_kpages = pginfo->num_kpages;
 940                e_mr->num_hwpages = pginfo->num_hwpages;
 941                e_mr->hwpage_size = pginfo->hwpage_size;
 942                e_mr->start = iova_start;
 943                e_mr->size = size;
 944                e_mr->acl = acl;
 945                *lkey = hipzout.lkey;
 946                *rkey = hipzout.rkey;
 947        }
 948
 949ehca_rereg_mr_rereg1_exit1:
 950        ehca_free_fw_ctrlblock(kpage);
 951ehca_rereg_mr_rereg1_exit0:
 952        if ( ret && (ret != -EAGAIN) )
 953                ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
 954                         "pginfo=%p num_kpages=%llx num_hwpages=%llx",
 955                         ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
 956                         pginfo->num_hwpages);
 957        return ret;
 958} /* end ehca_rereg_mr_rereg1() */
 959
 960/*----------------------------------------------------------------------*/
 961
 962int ehca_rereg_mr(struct ehca_shca *shca,
 963                  struct ehca_mr *e_mr,
 964                  u64 *iova_start,
 965                  u64 size,
 966                  int acl,
 967                  struct ehca_pd *e_pd,
 968                  struct ehca_mr_pginfo *pginfo,
 969                  u32 *lkey,
 970                  u32 *rkey)
 971{
 972        int ret = 0;
 973        u64 h_ret;
 974        int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
 975        int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
 976
 977        /* first determine reregistration hCall(s) */
 978        if ((pginfo->num_hwpages > MAX_RPAGES) ||
 979            (e_mr->num_hwpages > MAX_RPAGES) ||
 980            (pginfo->num_hwpages > e_mr->num_hwpages)) {
 981                ehca_dbg(&shca->ib_device, "Rereg3 case, "
 982                         "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
 983                         pginfo->num_hwpages, e_mr->num_hwpages);
 984                rereg_1_hcall = 0;
 985                rereg_3_hcall = 1;
 986        }
 987
 988        if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
 989                rereg_1_hcall = 0;
 990                rereg_3_hcall = 1;
 991                e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
 992                ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
 993                         e_mr);
 994        }
 995
 996        if (rereg_1_hcall) {
 997                ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
 998                                           acl, e_pd, pginfo, lkey, rkey);
 999                if (ret) {
1000                        if (ret == -EAGAIN)
1001                                rereg_3_hcall = 1;
1002                        else
1003                                goto ehca_rereg_mr_exit0;
1004                }
1005        }
1006
1007        if (rereg_3_hcall) {
1008                struct ehca_mr save_mr;
1009
1010                /* first deregister old MR */
1011                h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1012                if (h_ret != H_SUCCESS) {
1013                        ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1014                                 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1015                                 "mr->lkey=%x",
1016                                 h_ret, e_mr, shca->ipz_hca_handle.handle,
1017                                 e_mr->ipz_mr_handle.handle,
1018                                 e_mr->ib.ib_mr.lkey);
1019                        ret = ehca2ib_return_code(h_ret);
1020                        goto ehca_rereg_mr_exit0;
1021                }
1022                /* clean ehca_mr_t, without changing struct ib_mr and lock */
1023                save_mr = *e_mr;
1024                ehca_mr_deletenew(e_mr);
1025
1026                /* set some MR values */
1027                e_mr->flags = save_mr.flags;
1028                e_mr->hwpage_size = save_mr.hwpage_size;
1029                e_mr->fmr_page_size = save_mr.fmr_page_size;
1030                e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1031                e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1032                e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1033
1034                ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1035                                  e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1036                if (ret) {
1037                        u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1038                        memcpy(&e_mr->flags, &(save_mr.flags),
1039                               sizeof(struct ehca_mr) - offset);
1040                        goto ehca_rereg_mr_exit0;
1041                }
1042        }
1043
1044ehca_rereg_mr_exit0:
1045        if (ret)
1046                ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1047                         "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1048                         "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1049                         "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1050                         acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1051                         rereg_1_hcall, rereg_3_hcall);
1052        return ret;
1053} /* end ehca_rereg_mr() */
1054
1055/*----------------------------------------------------------------------*/
1056
1057int ehca_unmap_one_fmr(struct ehca_shca *shca,
1058                       struct ehca_mr *e_fmr)
1059{
1060        int ret = 0;
1061        u64 h_ret;
1062        struct ehca_pd *e_pd =
1063                container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1064        struct ehca_mr save_fmr;
1065        u32 tmp_lkey, tmp_rkey;
1066        struct ehca_mr_pginfo pginfo;
1067        struct ehca_mr_hipzout_parms hipzout;
1068        struct ehca_mr save_mr;
1069
1070        if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1071                /*
1072                 * note: after using rereg hcall with len=0,
1073                 * rereg hcall must be used again for registering pages
1074                 */
1075                h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1076                                              0, 0, e_pd->fw_pd, 0, &hipzout);
1077                if (h_ret == H_SUCCESS) {
1078                        /* successful reregistration */
1079                        e_fmr->start = NULL;
1080                        e_fmr->size = 0;
1081                        tmp_lkey = hipzout.lkey;
1082                        tmp_rkey = hipzout.rkey;
1083                        return 0;
1084                }
1085                /*
1086                 * should not happen, because length checked above,
1087                 * FMRs are not shared and no MW bound to FMRs
1088                 */
1089                ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1090                         "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1091                         "mr_hndl=%llx lkey=%x lkey_out=%x",
1092                         h_ret, e_fmr, shca->ipz_hca_handle.handle,
1093                         e_fmr->ipz_mr_handle.handle,
1094                         e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1095                /* try free and rereg */
1096        }
1097
1098        /* first free old FMR */
1099        h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1100        if (h_ret != H_SUCCESS) {
1101                ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1102                         "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1103                         "lkey=%x",
1104                         h_ret, e_fmr, shca->ipz_hca_handle.handle,
1105                         e_fmr->ipz_mr_handle.handle,
1106                         e_fmr->ib.ib_fmr.lkey);
1107                ret = ehca2ib_return_code(h_ret);
1108                goto ehca_unmap_one_fmr_exit0;
1109        }
1110        /* clean ehca_mr_t, without changing lock */
1111        save_fmr = *e_fmr;
1112        ehca_mr_deletenew(e_fmr);
1113
1114        /* set some MR values */
1115        e_fmr->flags = save_fmr.flags;
1116        e_fmr->hwpage_size = save_fmr.hwpage_size;
1117        e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1118        e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1119        e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1120        e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1121        e_fmr->acl = save_fmr.acl;
1122
1123        memset(&pginfo, 0, sizeof(pginfo));
1124        pginfo.type = EHCA_MR_PGI_FMR;
1125        ret = ehca_reg_mr(shca, e_fmr, NULL,
1126                          (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1127                          e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1128                          &tmp_rkey, EHCA_REG_MR);
1129        if (ret) {
1130                u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1131                memcpy(&e_fmr->flags, &(save_mr.flags),
1132                       sizeof(struct ehca_mr) - offset);
1133        }
1134
1135ehca_unmap_one_fmr_exit0:
1136        if (ret)
1137                ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
1138                         "fmr_max_pages=%x",
1139                         ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1140        return ret;
1141} /* end ehca_unmap_one_fmr() */
1142
1143/*----------------------------------------------------------------------*/
1144
1145int ehca_reg_smr(struct ehca_shca *shca,
1146                 struct ehca_mr *e_origmr,
1147                 struct ehca_mr *e_newmr,
1148                 u64 *iova_start,
1149                 int acl,
1150                 struct ehca_pd *e_pd,
1151                 u32 *lkey, /*OUT*/
1152                 u32 *rkey) /*OUT*/
1153{
1154        int ret = 0;
1155        u64 h_ret;
1156        u32 hipz_acl;
1157        struct ehca_mr_hipzout_parms hipzout;
1158
1159        ehca_mrmw_map_acl(acl, &hipz_acl);
1160        ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1161
1162        h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1163                                    (u64)iova_start, hipz_acl, e_pd->fw_pd,
1164                                    &hipzout);
1165        if (h_ret != H_SUCCESS) {
1166                ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1167                         "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1168                         "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1169                         h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1170                         shca->ipz_hca_handle.handle,
1171                         e_origmr->ipz_mr_handle.handle,
1172                         e_origmr->ib.ib_mr.lkey);
1173                ret = ehca2ib_return_code(h_ret);
1174                goto ehca_reg_smr_exit0;
1175        }
1176        /* successful registration */
1177        e_newmr->num_kpages = e_origmr->num_kpages;
1178        e_newmr->num_hwpages = e_origmr->num_hwpages;
1179        e_newmr->hwpage_size   = e_origmr->hwpage_size;
1180        e_newmr->start = iova_start;
1181        e_newmr->size = e_origmr->size;
1182        e_newmr->acl = acl;
1183        e_newmr->ipz_mr_handle = hipzout.handle;
1184        *lkey = hipzout.lkey;
1185        *rkey = hipzout.rkey;
1186        return 0;
1187
1188ehca_reg_smr_exit0:
1189        if (ret)
1190                ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
1191                         "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1192                         ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1193        return ret;
1194} /* end ehca_reg_smr() */
1195
1196/*----------------------------------------------------------------------*/
1197static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1198{
1199        unsigned long ret = idx;
1200        ret |= dir << EHCA_DIR_INDEX_SHIFT;
1201        ret |= top << EHCA_TOP_INDEX_SHIFT;
1202        return __va(ret << SECTION_SIZE_BITS);
1203}
1204
1205#define ehca_bmap_valid(entry) \
1206        ((u64)entry != (u64)EHCA_INVAL_ADDR)
1207
1208static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1209                               struct ehca_shca *shca, struct ehca_mr *mr,
1210                               struct ehca_mr_pginfo *pginfo)
1211{
1212        u64 h_ret = 0;
1213        unsigned long page = 0;
1214        u64 rpage = __pa(kpage);
1215        int page_count;
1216
1217        void *sectbase = ehca_calc_sectbase(top, dir, idx);
1218        if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1219                ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
1220                                           "hwpage_size does not fit to "
1221                                           "section start address");
1222        }
1223        page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1224
1225        while (page < page_count) {
1226                u64 rnum;
1227                for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1228                     rnum++) {
1229                        void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1230                        kpage[rnum] = __pa(pg);
1231                }
1232
1233                h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
1234                        ehca_encode_hwpage_size(pginfo->hwpage_size),
1235                        0, rpage, rnum);
1236
1237                if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1238                        ehca_err(&shca->ib_device, "register_rpage_mr failed");
1239                        return h_ret;
1240                }
1241        }
1242        return h_ret;
1243}
1244
1245static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
1246                                struct ehca_shca *shca, struct ehca_mr *mr,
1247                                struct ehca_mr_pginfo *pginfo)
1248{
1249        u64 hret = H_SUCCESS;
1250        int idx;
1251
1252        for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
1253                if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
1254                        continue;
1255
1256                hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1257                                           pginfo);
1258                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1259                                return hret;
1260        }
1261        return hret;
1262}
1263
1264static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
1265                                    struct ehca_mr *mr,
1266                                    struct ehca_mr_pginfo *pginfo)
1267{
1268        u64 hret = H_SUCCESS;
1269        int dir;
1270
1271        for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1272                if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1273                        continue;
1274
1275                hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1276                if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1277                                return hret;
1278        }
1279        return hret;
1280}
1281
1282/* register internal max-MR to internal SHCA */
1283int ehca_reg_internal_maxmr(
1284        struct ehca_shca *shca,
1285        struct ehca_pd *e_pd,
1286        struct ehca_mr **e_maxmr)  /*OUT*/
1287{
1288        int ret;
1289        struct ehca_mr *e_mr;
1290        u64 *iova_start;
1291        u64 size_maxmr;
1292        struct ehca_mr_pginfo pginfo;
1293        u32 num_kpages;
1294        u32 num_hwpages;
1295        u64 hw_pgsize;
1296
1297        if (!ehca_bmap) {
1298                ret = -EFAULT;
1299                goto ehca_reg_internal_maxmr_exit0;
1300        }
1301
1302        e_mr = ehca_mr_new();
1303        if (!e_mr) {
1304                ehca_err(&shca->ib_device, "out of memory");
1305                ret = -ENOMEM;
1306                goto ehca_reg_internal_maxmr_exit0;
1307        }
1308        e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1309
1310        /* register internal max-MR on HCA */
1311        size_maxmr = ehca_mr_len;
1312        iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
1313        num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1314                                PAGE_SIZE);
1315        hw_pgsize = ehca_get_max_hwpage_size(shca);
1316        num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1317                                 hw_pgsize);
1318
1319        memset(&pginfo, 0, sizeof(pginfo));
1320        pginfo.type = EHCA_MR_PGI_PHYS;
1321        pginfo.num_kpages = num_kpages;
1322        pginfo.num_hwpages = num_hwpages;
1323        pginfo.hwpage_size = hw_pgsize;
1324        pginfo.u.phy.addr = 0;
1325        pginfo.u.phy.size = size_maxmr;
1326
1327        ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1328                          &pginfo, &e_mr->ib.ib_mr.lkey,
1329                          &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
1330        if (ret) {
1331                ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1332                         "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1333                         "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1334                         num_kpages, num_hwpages);
1335                goto ehca_reg_internal_maxmr_exit1;
1336        }
1337
1338        /* successful registration of all pages */
1339        e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1340        e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1341        e_mr->ib.ib_mr.uobject = NULL;
1342        atomic_inc(&(e_pd->ib_pd.usecnt));
1343        *e_maxmr = e_mr;
1344        return 0;
1345
1346ehca_reg_internal_maxmr_exit1:
1347        ehca_mr_delete(e_mr);
1348ehca_reg_internal_maxmr_exit0:
1349        if (ret)
1350                ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
1351                         ret, shca, e_pd, e_maxmr);
1352        return ret;
1353} /* end ehca_reg_internal_maxmr() */
1354
1355/*----------------------------------------------------------------------*/
1356
1357int ehca_reg_maxmr(struct ehca_shca *shca,
1358                   struct ehca_mr *e_newmr,
1359                   u64 *iova_start,
1360                   int acl,
1361                   struct ehca_pd *e_pd,
1362                   u32 *lkey,
1363                   u32 *rkey)
1364{
1365        u64 h_ret;
1366        struct ehca_mr *e_origmr = shca->maxmr;
1367        u32 hipz_acl;
1368        struct ehca_mr_hipzout_parms hipzout;
1369
1370        ehca_mrmw_map_acl(acl, &hipz_acl);
1371        ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1372
1373        h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1374                                    (u64)iova_start, hipz_acl, e_pd->fw_pd,
1375                                    &hipzout);
1376        if (h_ret != H_SUCCESS) {
1377                ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1378                         "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1379                         h_ret, e_origmr, shca->ipz_hca_handle.handle,
1380                         e_origmr->ipz_mr_handle.handle,
1381                         e_origmr->ib.ib_mr.lkey);
1382                return ehca2ib_return_code(h_ret);
1383        }
1384        /* successful registration */
1385        e_newmr->num_kpages = e_origmr->num_kpages;
1386        e_newmr->num_hwpages = e_origmr->num_hwpages;
1387        e_newmr->hwpage_size = e_origmr->hwpage_size;
1388        e_newmr->start = iova_start;
1389        e_newmr->size = e_origmr->size;
1390        e_newmr->acl = acl;
1391        e_newmr->ipz_mr_handle = hipzout.handle;
1392        *lkey = hipzout.lkey;
1393        *rkey = hipzout.rkey;
1394        return 0;
1395} /* end ehca_reg_maxmr() */
1396
1397/*----------------------------------------------------------------------*/
1398
1399int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1400{
1401        int ret;
1402        struct ehca_mr *e_maxmr;
1403        struct ib_pd *ib_pd;
1404
1405        if (!shca->maxmr) {
1406                ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1407                ret = -EINVAL;
1408                goto ehca_dereg_internal_maxmr_exit0;
1409        }
1410
1411        e_maxmr = shca->maxmr;
1412        ib_pd = e_maxmr->ib.ib_mr.pd;
1413        shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1414
1415        ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1416        if (ret) {
1417                ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1418                         "ret=%i e_maxmr=%p shca=%p lkey=%x",
1419                         ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1420                shca->maxmr = e_maxmr;
1421                goto ehca_dereg_internal_maxmr_exit0;
1422        }
1423
1424        atomic_dec(&ib_pd->usecnt);
1425
1426ehca_dereg_internal_maxmr_exit0:
1427        if (ret)
1428                ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
1429                         ret, shca, shca->maxmr);
1430        return ret;
1431} /* end ehca_dereg_internal_maxmr() */
1432
1433/*----------------------------------------------------------------------*/
1434
1435/* check page list of map FMR verb for validness */
1436int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1437                             u64 *page_list,
1438                             int list_len)
1439{
1440        u32 i;
1441        u64 *page;
1442
1443        if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1444                ehca_gen_err("bad list_len, list_len=%x "
1445                             "e_fmr->fmr_max_pages=%x fmr=%p",
1446                             list_len, e_fmr->fmr_max_pages, e_fmr);
1447                return -EINVAL;
1448        }
1449
1450        /* each page must be aligned */
1451        page = page_list;
1452        for (i = 0; i < list_len; i++) {
1453                if (*page % e_fmr->fmr_page_size) {
1454                        ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1455                                     "fmr_page_size=%x", i, *page, page, e_fmr,
1456                                     e_fmr->fmr_page_size);
1457                        return -EINVAL;
1458                }
1459                page++;
1460        }
1461
1462        return 0;
1463} /* end ehca_fmr_check_page_list() */
1464
1465/*----------------------------------------------------------------------*/
1466
1467/* PAGE_SIZE >= pginfo->hwpage_size */
1468static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1469                                  u32 number,
1470                                  u64 *kpage)
1471{
1472        int ret = 0;
1473        u64 pgaddr;
1474        u32 j = 0;
1475        int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1476        struct scatterlist **sg = &pginfo->u.usr.next_sg;
1477
1478        while (*sg != NULL) {
1479                pgaddr = page_to_pfn(sg_page(*sg))
1480                        << PAGE_SHIFT;
1481                *kpage = pgaddr + (pginfo->next_hwpage *
1482                                   pginfo->hwpage_size);
1483                if (!(*kpage)) {
1484                        ehca_gen_err("pgaddr=%llx "
1485                                     "sg_dma_address=%llx "
1486                                     "entry=%llx next_hwpage=%llx",
1487                                     pgaddr, (u64)sg_dma_address(*sg),
1488                                     pginfo->u.usr.next_nmap,
1489                                     pginfo->next_hwpage);
1490                        return -EFAULT;
1491                }
1492                (pginfo->hwpage_cnt)++;
1493                (pginfo->next_hwpage)++;
1494                kpage++;
1495                if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1496                        (pginfo->kpage_cnt)++;
1497                        (pginfo->u.usr.next_nmap)++;
1498                        pginfo->next_hwpage = 0;
1499                        *sg = sg_next(*sg);
1500                }
1501                j++;
1502                if (j >= number)
1503                        break;
1504        }
1505
1506        return ret;
1507}
1508
1509/*
1510 * check given pages for contiguous layout
1511 * last page addr is returned in prev_pgaddr for further check
1512 */
1513static int ehca_check_kpages_per_ate(struct scatterlist **sg,
1514                                     int num_pages,
1515                                     u64 *prev_pgaddr)
1516{
1517        for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
1518                u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
1519                if (ehca_debug_level >= 3)
1520                        ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1521                                     *(u64 *)__va(pgaddr));
1522                if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1523                        ehca_gen_err("uncontiguous page found pgaddr=%llx "
1524                                     "prev_pgaddr=%llx entries_left_in_hwpage=%x",
1525                                     pgaddr, *prev_pgaddr, num_pages);
1526                        return -EINVAL;
1527                }
1528                *prev_pgaddr = pgaddr;
1529        }
1530        return 0;
1531}
1532
1533/* PAGE_SIZE < pginfo->hwpage_size */
1534static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1535                                  u32 number,
1536                                  u64 *kpage)
1537{
1538        int ret = 0;
1539        u64 pgaddr, prev_pgaddr;
1540        u32 j = 0;
1541        int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1542        int nr_kpages = kpages_per_hwpage;
1543        struct scatterlist **sg = &pginfo->u.usr.next_sg;
1544
1545        while (*sg != NULL) {
1546
1547                if (nr_kpages == kpages_per_hwpage) {
1548                        pgaddr = (page_to_pfn(sg_page(*sg))
1549                                   << PAGE_SHIFT);
1550                        *kpage = pgaddr;
1551                        if (!(*kpage)) {
1552                                ehca_gen_err("pgaddr=%llx entry=%llx",
1553                                             pgaddr, pginfo->u.usr.next_nmap);
1554                                ret = -EFAULT;
1555                                return ret;
1556                        }
1557                        /*
1558                         * The first page in a hwpage must be aligned;
1559                         * the first MR page is exempt from this rule.
1560                         */
1561                        if (pgaddr & (pginfo->hwpage_size - 1)) {
1562                                if (pginfo->hwpage_cnt) {
1563                                        ehca_gen_err(
1564                                                "invalid alignment "
1565                                                "pgaddr=%llx entry=%llx "
1566                                                "mr_pgsize=%llx",
1567                                                pgaddr, pginfo->u.usr.next_nmap,
1568                                                pginfo->hwpage_size);
1569                                        ret = -EFAULT;
1570                                        return ret;
1571                                }
1572                                /* first MR page */
1573                                pginfo->kpage_cnt =
1574                                        (pgaddr &
1575                                         (pginfo->hwpage_size - 1)) >>
1576                                        PAGE_SHIFT;
1577                                nr_kpages -= pginfo->kpage_cnt;
1578                                *kpage = pgaddr &
1579                                         ~(pginfo->hwpage_size - 1);
1580                        }
1581                        if (ehca_debug_level >= 3) {
1582                                u64 val = *(u64 *)__va(pgaddr);
1583                                ehca_gen_dbg("kpage=%llx page=%llx "
1584                                             "value=%016llx",
1585                                             *kpage, pgaddr, val);
1586                        }
1587                        prev_pgaddr = pgaddr;
1588                        *sg = sg_next(*sg);
1589                        pginfo->kpage_cnt++;
1590                        pginfo->u.usr.next_nmap++;
1591                        nr_kpages--;
1592                        if (!nr_kpages)
1593                                goto next_kpage;
1594                        continue;
1595                }
1596
1597                ret = ehca_check_kpages_per_ate(sg, nr_kpages,
1598                                                &prev_pgaddr);
1599                if (ret)
1600                        return ret;
1601                pginfo->kpage_cnt += nr_kpages;
1602                pginfo->u.usr.next_nmap += nr_kpages;
1603
1604next_kpage:
1605                nr_kpages = kpages_per_hwpage;
1606                (pginfo->hwpage_cnt)++;
1607                kpage++;
1608                j++;
1609                if (j >= number)
1610                        break;
1611        }
1612
1613        return ret;
1614}
1615
1616static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1617                                 u32 number, u64 *kpage)
1618{
1619        int ret = 0;
1620        u64 addr = pginfo->u.phy.addr;
1621        u64 size = pginfo->u.phy.size;
1622        u64 num_hw, offs_hw;
1623        u32 i = 0;
1624
1625        num_hw  = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
1626                                pginfo->hwpage_size);
1627        offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
1628
1629        while (pginfo->next_hwpage < offs_hw + num_hw) {
1630                /* sanity check */
1631                if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1632                    (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1633                        ehca_gen_err("kpage_cnt >= num_kpages, "
1634                                     "kpage_cnt=%llx num_kpages=%llx "
1635                                     "hwpage_cnt=%llx "
1636                                     "num_hwpages=%llx i=%x",
1637                                     pginfo->kpage_cnt,
1638                                     pginfo->num_kpages,
1639                                     pginfo->hwpage_cnt,
1640                                     pginfo->num_hwpages, i);
1641                        return -EFAULT;
1642                }
1643                *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
1644                         (pginfo->next_hwpage * pginfo->hwpage_size);
1645                if ( !(*kpage) && addr ) {
1646                        ehca_gen_err("addr=%llx size=%llx "
1647                                     "next_hwpage=%llx", addr,
1648                                     size, pginfo->next_hwpage);
1649                        return -EFAULT;
1650                }
1651                (pginfo->hwpage_cnt)++;
1652                (pginfo->next_hwpage)++;
1653                if (PAGE_SIZE >= pginfo->hwpage_size) {
1654                        if (pginfo->next_hwpage %
1655                            (PAGE_SIZE / pginfo->hwpage_size) == 0)
1656                                (pginfo->kpage_cnt)++;
1657                } else
1658                        pginfo->kpage_cnt += pginfo->hwpage_size /
1659                                PAGE_SIZE;
1660                kpage++;
1661                i++;
1662                if (i >= number) break;
1663        }
1664        if (pginfo->next_hwpage >= offs_hw + num_hw) {
1665                pginfo->next_hwpage = 0;
1666        }
1667
1668        return ret;
1669}
1670
1671static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1672                                u32 number, u64 *kpage)
1673{
1674        int ret = 0;
1675        u64 *fmrlist;
1676        u32 i;
1677
1678        /* loop over desired page_list entries */
1679        fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1680        for (i = 0; i < number; i++) {
1681                *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
1682                           pginfo->next_hwpage * pginfo->hwpage_size;
1683                if ( !(*kpage) ) {
1684                        ehca_gen_err("*fmrlist=%llx fmrlist=%p "
1685                                     "next_listelem=%llx next_hwpage=%llx",
1686                                     *fmrlist, fmrlist,
1687                                     pginfo->u.fmr.next_listelem,
1688                                     pginfo->next_hwpage);
1689                        return -EFAULT;
1690                }
1691                (pginfo->hwpage_cnt)++;
1692                if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
1693                        if (pginfo->next_hwpage %
1694                            (pginfo->u.fmr.fmr_pgsize /
1695                             pginfo->hwpage_size) == 0) {
1696                                (pginfo->kpage_cnt)++;
1697                                (pginfo->u.fmr.next_listelem)++;
1698                                fmrlist++;
1699                                pginfo->next_hwpage = 0;
1700                        } else
1701                                (pginfo->next_hwpage)++;
1702                } else {
1703                        unsigned int cnt_per_hwpage = pginfo->hwpage_size /
1704                                pginfo->u.fmr.fmr_pgsize;
1705                        unsigned int j;
1706                        u64 prev = *kpage;
1707                        /* check if adrs are contiguous */
1708                        for (j = 1; j < cnt_per_hwpage; j++) {
1709                                u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
1710                                if (prev + pginfo->u.fmr.fmr_pgsize != p) {
1711                                        ehca_gen_err("uncontiguous fmr pages "
1712                                                     "found prev=%llx p=%llx "
1713                                                     "idx=%x", prev, p, i + j);
1714                                        return -EINVAL;
1715                                }
1716                                prev = p;
1717                        }
1718                        pginfo->kpage_cnt += cnt_per_hwpage;
1719                        pginfo->u.fmr.next_listelem += cnt_per_hwpage;
1720                        fmrlist += cnt_per_hwpage;
1721                }
1722                kpage++;
1723        }
1724        return ret;
1725}
1726
1727/* setup page buffer from page info */
1728int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
1729                     u32 number,
1730                     u64 *kpage)
1731{
1732        int ret;
1733
1734        switch (pginfo->type) {
1735        case EHCA_MR_PGI_PHYS:
1736                ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
1737                break;
1738        case EHCA_MR_PGI_USER:
1739                ret = PAGE_SIZE >= pginfo->hwpage_size ?
1740                        ehca_set_pagebuf_user1(pginfo, number, kpage) :
1741                        ehca_set_pagebuf_user2(pginfo, number, kpage);
1742                break;
1743        case EHCA_MR_PGI_FMR:
1744                ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
1745                break;
1746        default:
1747                ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1748                ret = -EFAULT;
1749                break;
1750        }
1751        return ret;
1752} /* end ehca_set_pagebuf() */
1753
1754/*----------------------------------------------------------------------*/
1755
1756/*
1757 * check MR if it is a max-MR, i.e. uses whole memory
1758 * in case it's a max-MR 1 is returned, else 0
1759 */
1760int ehca_mr_is_maxmr(u64 size,
1761                     u64 *iova_start)
1762{
1763        /* a MR is treated as max-MR only if it fits following: */
1764        if ((size == ehca_mr_len) &&
1765            (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
1766                ehca_gen_dbg("this is a max-MR");
1767                return 1;
1768        } else
1769                return 0;
1770} /* end ehca_mr_is_maxmr() */
1771
1772/*----------------------------------------------------------------------*/
1773
1774/* map access control for MR/MW. This routine is used for MR and MW. */
1775void ehca_mrmw_map_acl(int ib_acl,
1776                       u32 *hipz_acl)
1777{
1778        *hipz_acl = 0;
1779        if (ib_acl & IB_ACCESS_REMOTE_READ)
1780                *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1781        if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1782                *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1783        if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1784                *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
1785        if (ib_acl & IB_ACCESS_LOCAL_WRITE)
1786                *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
1787        if (ib_acl & IB_ACCESS_MW_BIND)
1788                *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
1789} /* end ehca_mrmw_map_acl() */
1790
1791/*----------------------------------------------------------------------*/
1792
1793/* sets page size in hipz access control for MR/MW. */
1794void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
1795{
1796        *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
1797} /* end ehca_mrmw_set_pgsize_hipz_acl() */
1798
1799/*----------------------------------------------------------------------*/
1800
1801/*
1802 * reverse map access control for MR/MW.
1803 * This routine is used for MR and MW.
1804 */
1805void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
1806                               int *ib_acl) /*OUT*/
1807{
1808        *ib_acl = 0;
1809        if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
1810                *ib_acl |= IB_ACCESS_REMOTE_READ;
1811        if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
1812                *ib_acl |= IB_ACCESS_REMOTE_WRITE;
1813        if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
1814                *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
1815        if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
1816                *ib_acl |= IB_ACCESS_LOCAL_WRITE;
1817        if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
1818                *ib_acl |= IB_ACCESS_MW_BIND;
1819} /* end ehca_mrmw_reverse_map_acl() */
1820
1821
1822/*----------------------------------------------------------------------*/
1823
1824/*
1825 * MR destructor and constructor
1826 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
1827 * except struct ib_mr and spinlock
1828 */
1829void ehca_mr_deletenew(struct ehca_mr *mr)
1830{
1831        mr->flags = 0;
1832        mr->num_kpages = 0;
1833        mr->num_hwpages = 0;
1834        mr->acl = 0;
1835        mr->start = NULL;
1836        mr->fmr_page_size = 0;
1837        mr->fmr_max_pages = 0;
1838        mr->fmr_max_maps = 0;
1839        mr->fmr_map_cnt = 0;
1840        memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
1841        memset(&mr->galpas, 0, sizeof(mr->galpas));
1842} /* end ehca_mr_deletenew() */
1843
1844int ehca_init_mrmw_cache(void)
1845{
1846        mr_cache = kmem_cache_create("ehca_cache_mr",
1847                                     sizeof(struct ehca_mr), 0,
1848                                     SLAB_HWCACHE_ALIGN,
1849                                     NULL);
1850        if (!mr_cache)
1851                return -ENOMEM;
1852        mw_cache = kmem_cache_create("ehca_cache_mw",
1853                                     sizeof(struct ehca_mw), 0,
1854                                     SLAB_HWCACHE_ALIGN,
1855                                     NULL);
1856        if (!mw_cache) {
1857                kmem_cache_destroy(mr_cache);
1858                mr_cache = NULL;
1859                return -ENOMEM;
1860        }
1861        return 0;
1862}
1863
1864void ehca_cleanup_mrmw_cache(void)
1865{
1866        if (mr_cache)
1867                kmem_cache_destroy(mr_cache);
1868        if (mw_cache)
1869                kmem_cache_destroy(mw_cache);
1870}
1871
1872static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
1873                                     int dir)
1874{
1875        if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
1876                ehca_top_bmap->dir[dir] =
1877                        kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
1878                if (!ehca_top_bmap->dir[dir])
1879                        return -ENOMEM;
1880                /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1881                memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
1882        }
1883        return 0;
1884}
1885
1886static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
1887{
1888        if (!ehca_bmap_valid(ehca_bmap->top[top])) {
1889                ehca_bmap->top[top] =
1890                        kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
1891                if (!ehca_bmap->top[top])
1892                        return -ENOMEM;
1893                /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1894                memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
1895        }
1896        return ehca_init_top_bmap(ehca_bmap->top[top], dir);
1897}
1898
1899static inline int ehca_calc_index(unsigned long i, unsigned long s)
1900{
1901        return (i >> s) & EHCA_INDEX_MASK;
1902}
1903
1904void ehca_destroy_busmap(void)
1905{
1906        int top, dir;
1907
1908        if (!ehca_bmap)
1909                return;
1910
1911        for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
1912                if (!ehca_bmap_valid(ehca_bmap->top[top]))
1913                        continue;
1914                for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1915                        if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1916                                continue;
1917
1918                        kfree(ehca_bmap->top[top]->dir[dir]);
1919                }
1920
1921                kfree(ehca_bmap->top[top]);
1922        }
1923
1924        kfree(ehca_bmap);
1925        ehca_bmap = NULL;
1926}
1927
1928static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
1929{
1930        unsigned long i, start_section, end_section;
1931        int top, dir, idx;
1932
1933        if (!nr_pages)
1934                return 0;
1935
1936        if (!ehca_bmap) {
1937                ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
1938                if (!ehca_bmap)
1939                        return -ENOMEM;
1940                /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1941                memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
1942        }
1943
1944        start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
1945        end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
1946        for (i = start_section; i < end_section; i++) {
1947                int ret;
1948                top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
1949                dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
1950                idx = i & EHCA_INDEX_MASK;
1951
1952                ret = ehca_init_bmap(ehca_bmap, top, dir);
1953                if (ret) {
1954                        ehca_destroy_busmap();
1955                        return ret;
1956                }
1957                ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
1958                ehca_mr_len += EHCA_SECTSIZE;
1959        }
1960        return 0;
1961}
1962
1963static int ehca_is_hugepage(unsigned long pfn)
1964{
1965        int page_order;
1966
1967        if (pfn & EHCA_HUGEPAGE_PFN_MASK)
1968                return 0;
1969
1970        page_order = compound_order(pfn_to_page(pfn));
1971        if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
1972                return 0;
1973
1974        return 1;
1975}
1976
1977static int ehca_create_busmap_callback(unsigned long initial_pfn,
1978                                       unsigned long total_nr_pages, void *arg)
1979{
1980        int ret;
1981        unsigned long pfn, start_pfn, end_pfn, nr_pages;
1982
1983        if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
1984                return ehca_update_busmap(initial_pfn, total_nr_pages);
1985
1986        /* Given chunk is >= 16GB -> check for hugepages */
1987        start_pfn = initial_pfn;
1988        end_pfn = initial_pfn + total_nr_pages;
1989        pfn = start_pfn;
1990
1991        while (pfn < end_pfn) {
1992                if (ehca_is_hugepage(pfn)) {
1993                        /* Add mem found in front of the hugepage */
1994                        nr_pages = pfn - start_pfn;
1995                        ret = ehca_update_busmap(start_pfn, nr_pages);
1996                        if (ret)
1997                                return ret;
1998                        /* Skip the hugepage */
1999                        pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
2000                        start_pfn = pfn;
2001                } else
2002                        pfn += (EHCA_SECTSIZE / PAGE_SIZE);
2003        }
2004
2005        /* Add mem found behind the hugepage(s)  */
2006        nr_pages = pfn - start_pfn;
2007        return ehca_update_busmap(start_pfn, nr_pages);
2008}
2009
2010int ehca_create_busmap(void)
2011{
2012        int ret;
2013
2014        ehca_mr_len = 0;
2015        ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
2016                                   ehca_create_busmap_callback);
2017        return ret;
2018}
2019
2020static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
2021                                   struct ehca_mr *e_mr,
2022                                   struct ehca_mr_pginfo *pginfo)
2023{
2024        int top;
2025        u64 hret, *kpage;
2026
2027        kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2028        if (!kpage) {
2029                ehca_err(&shca->ib_device, "kpage alloc failed");
2030                return -ENOMEM;
2031        }
2032        for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2033                if (!ehca_bmap_valid(ehca_bmap->top[top]))
2034                        continue;
2035                hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2036                if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2037                        break;
2038        }
2039
2040        ehca_free_fw_ctrlblock(kpage);
2041
2042        if (hret == H_SUCCESS)
2043                return 0; /* Everything is fine */
2044        else {
2045                ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
2046                                 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2047                                 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2048                                 e_mr->ib.ib_mr.lkey,
2049                                 shca->ipz_hca_handle.handle,
2050                                 e_mr->ipz_mr_handle.handle);
2051                return ehca2ib_return_code(hret);
2052        }
2053}
2054
2055static u64 ehca_map_vaddr(void *caddr)
2056{
2057        int top, dir, idx;
2058        unsigned long abs_addr, offset;
2059        u64 entry;
2060
2061        if (!ehca_bmap)
2062                return EHCA_INVAL_ADDR;
2063
2064        abs_addr = __pa(caddr);
2065        top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2066        if (!ehca_bmap_valid(ehca_bmap->top[top]))
2067                return EHCA_INVAL_ADDR;
2068
2069        dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
2070        if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2071                return EHCA_INVAL_ADDR;
2072
2073        idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
2074
2075        entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
2076        if (ehca_bmap_valid(entry)) {
2077                offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
2078                return entry | offset;
2079        } else
2080                return EHCA_INVAL_ADDR;
2081}
2082
2083static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2084{
2085        return dma_addr == EHCA_INVAL_ADDR;
2086}
2087
2088static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
2089                               size_t size, enum dma_data_direction direction)
2090{
2091        if (cpu_addr)
2092                return ehca_map_vaddr(cpu_addr);
2093        else
2094                return EHCA_INVAL_ADDR;
2095}
2096
2097static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
2098                                  enum dma_data_direction direction)
2099{
2100        /* This is only a stub; nothing to be done here */
2101}
2102
2103static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
2104                             unsigned long offset, size_t size,
2105                             enum dma_data_direction direction)
2106{
2107        u64 addr;
2108
2109        if (offset + size > PAGE_SIZE)
2110                return EHCA_INVAL_ADDR;
2111
2112        addr = ehca_map_vaddr(page_address(page));
2113        if (!ehca_dma_mapping_error(dev, addr))
2114                addr += offset;
2115
2116        return addr;
2117}
2118
2119static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
2120                                enum dma_data_direction direction)
2121{
2122        /* This is only a stub; nothing to be done here */
2123}
2124
2125static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
2126                           int nents, enum dma_data_direction direction)
2127{
2128        struct scatterlist *sg;
2129        int i;
2130
2131        for_each_sg(sgl, sg, nents, i) {
2132                u64 addr;
2133                addr = ehca_map_vaddr(sg_virt(sg));
2134                if (ehca_dma_mapping_error(dev, addr))
2135                        return 0;
2136
2137                sg->dma_address = addr;
2138                sg->dma_length = sg->length;
2139        }
2140        return nents;
2141}
2142
2143static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2144                              int nents, enum dma_data_direction direction)
2145{
2146        /* This is only a stub; nothing to be done here */
2147}
2148
2149static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
2150                                         size_t size,
2151                                         enum dma_data_direction dir)
2152{
2153        dma_sync_single_for_cpu(dev->dev.parent, addr, size, dir);
2154}
2155
2156static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
2157                                            size_t size,
2158                                            enum dma_data_direction dir)
2159{
2160        dma_sync_single_for_device(dev->dev.parent, addr, size, dir);
2161}
2162
2163static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
2164                                     u64 *dma_handle, gfp_t flag)
2165{
2166        struct page *p;
2167        void *addr = NULL;
2168        u64 dma_addr;
2169
2170        p = alloc_pages(flag, get_order(size));
2171        if (p) {
2172                addr = page_address(p);
2173                dma_addr = ehca_map_vaddr(addr);
2174                if (ehca_dma_mapping_error(dev, dma_addr)) {
2175                        free_pages((unsigned long)addr, get_order(size));
2176                        return NULL;
2177                }
2178                if (dma_handle)
2179                        *dma_handle = dma_addr;
2180                return addr;
2181        }
2182        return NULL;
2183}
2184
2185static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
2186                                   void *cpu_addr, u64 dma_handle)
2187{
2188        if (cpu_addr && size)
2189                free_pages((unsigned long)cpu_addr, get_order(size));
2190}
2191
2192
2193struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
2194        .mapping_error          = ehca_dma_mapping_error,
2195        .map_single             = ehca_dma_map_single,
2196        .unmap_single           = ehca_dma_unmap_single,
2197        .map_page               = ehca_dma_map_page,
2198        .unmap_page             = ehca_dma_unmap_page,
2199        .map_sg                 = ehca_dma_map_sg,
2200        .unmap_sg               = ehca_dma_unmap_sg,
2201        .sync_single_for_cpu    = ehca_dma_sync_single_for_cpu,
2202        .sync_single_for_device = ehca_dma_sync_single_for_device,
2203        .alloc_coherent         = ehca_dma_alloc_coherent,
2204        .free_coherent          = ehca_dma_free_coherent,
2205};
2206