linux/drivers/infiniband/hw/ehca/ehca_uverbs.c
<<
>>
Prefs
   1/*
   2 *  IBM eServer eHCA Infiniband device driver for Linux on POWER
   3 *
   4 *  userspace support verbs
   5 *
   6 *  Authors: Christoph Raisch <raisch@de.ibm.com>
   7 *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
   8 *           Heiko J Schick <schickhj@de.ibm.com>
   9 *
  10 *  Copyright (c) 2005 IBM Corporation
  11 *
  12 *  All rights reserved.
  13 *
  14 *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15 *  BSD.
  16 *
  17 * OpenIB BSD License
  18 *
  19 * Redistribution and use in source and binary forms, with or without
  20 * modification, are permitted provided that the following conditions are met:
  21 *
  22 * Redistributions of source code must retain the above copyright notice, this
  23 * list of conditions and the following disclaimer.
  24 *
  25 * Redistributions in binary form must reproduce the above copyright notice,
  26 * this list of conditions and the following disclaimer in the documentation
  27 * and/or other materials
  28 * provided with the distribution.
  29 *
  30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40 * POSSIBILITY OF SUCH DAMAGE.
  41 */
  42
  43#include <linux/slab.h>
  44
  45#include "ehca_classes.h"
  46#include "ehca_iverbs.h"
  47#include "ehca_mrmw.h"
  48#include "ehca_tools.h"
  49#include "hcp_if.h"
  50
  51struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
  52                                        struct ib_udata *udata)
  53{
  54        struct ehca_ucontext *my_context;
  55
  56        my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
  57        if (!my_context) {
  58                ehca_err(device, "Out of memory device=%p", device);
  59                return ERR_PTR(-ENOMEM);
  60        }
  61
  62        return &my_context->ib_ucontext;
  63}
  64
  65int ehca_dealloc_ucontext(struct ib_ucontext *context)
  66{
  67        kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
  68        return 0;
  69}
  70
  71static void ehca_mm_open(struct vm_area_struct *vma)
  72{
  73        u32 *count = (u32 *)vma->vm_private_data;
  74        if (!count) {
  75                ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
  76                             vma->vm_start, vma->vm_end);
  77                return;
  78        }
  79        (*count)++;
  80        if (!(*count))
  81                ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
  82                             vma->vm_start, vma->vm_end);
  83        ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
  84                     vma->vm_start, vma->vm_end, *count);
  85}
  86
  87static void ehca_mm_close(struct vm_area_struct *vma)
  88{
  89        u32 *count = (u32 *)vma->vm_private_data;
  90        if (!count) {
  91                ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
  92                             vma->vm_start, vma->vm_end);
  93                return;
  94        }
  95        (*count)--;
  96        ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
  97                     vma->vm_start, vma->vm_end, *count);
  98}
  99
 100static const struct vm_operations_struct vm_ops = {
 101        .open = ehca_mm_open,
 102        .close = ehca_mm_close,
 103};
 104
 105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
 106                        u32 *mm_count)
 107{
 108        int ret;
 109        u64 vsize, physical;
 110
 111        vsize = vma->vm_end - vma->vm_start;
 112        if (vsize < EHCA_PAGESIZE) {
 113                ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
 114                return -EINVAL;
 115        }
 116
 117        physical = galpas->user.fw_handle;
 118        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 119        ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
 120        /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
 121        ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
 122                           vma->vm_page_prot);
 123        if (unlikely(ret)) {
 124                ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
 125                return -ENOMEM;
 126        }
 127
 128        vma->vm_private_data = mm_count;
 129        (*mm_count)++;
 130        vma->vm_ops = &vm_ops;
 131
 132        return 0;
 133}
 134
 135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
 136                           u32 *mm_count)
 137{
 138        int ret;
 139        u64 start, ofs;
 140        struct page *page;
 141
 142        vma->vm_flags |= VM_RESERVED;
 143        start = vma->vm_start;
 144        for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
 145                u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
 146                page = virt_to_page(virt_addr);
 147                ret = vm_insert_page(vma, start, page);
 148                if (unlikely(ret)) {
 149                        ehca_gen_err("vm_insert_page() failed rc=%i", ret);
 150                        return ret;
 151                }
 152                start += PAGE_SIZE;
 153        }
 154        vma->vm_private_data = mm_count;
 155        (*mm_count)++;
 156        vma->vm_ops = &vm_ops;
 157
 158        return 0;
 159}
 160
 161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
 162                        u32 rsrc_type)
 163{
 164        int ret;
 165
 166        switch (rsrc_type) {
 167        case 0: /* galpa fw handle */
 168                ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
 169                ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
 170                if (unlikely(ret)) {
 171                        ehca_err(cq->ib_cq.device,
 172                                 "ehca_mmap_fw() failed rc=%i cq_num=%x",
 173                                 ret, cq->cq_number);
 174                        return ret;
 175                }
 176                break;
 177
 178        case 1: /* cq queue_addr */
 179                ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
 180                ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
 181                if (unlikely(ret)) {
 182                        ehca_err(cq->ib_cq.device,
 183                                 "ehca_mmap_queue() failed rc=%i cq_num=%x",
 184                                 ret, cq->cq_number);
 185                        return ret;
 186                }
 187                break;
 188
 189        default:
 190                ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
 191                         rsrc_type, cq->cq_number);
 192                return -EINVAL;
 193        }
 194
 195        return 0;
 196}
 197
 198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
 199                        u32 rsrc_type)
 200{
 201        int ret;
 202
 203        switch (rsrc_type) {
 204        case 0: /* galpa fw handle */
 205                ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
 206                ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
 207                if (unlikely(ret)) {
 208                        ehca_err(qp->ib_qp.device,
 209                                 "remap_pfn_range() failed ret=%i qp_num=%x",
 210                                 ret, qp->ib_qp.qp_num);
 211                        return -ENOMEM;
 212                }
 213                break;
 214
 215        case 1: /* qp rqueue_addr */
 216                ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
 217                ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
 218                                      &qp->mm_count_rqueue);
 219                if (unlikely(ret)) {
 220                        ehca_err(qp->ib_qp.device,
 221                                 "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
 222                                 ret, qp->ib_qp.qp_num);
 223                        return ret;
 224                }
 225                break;
 226
 227        case 2: /* qp squeue_addr */
 228                ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
 229                ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
 230                                      &qp->mm_count_squeue);
 231                if (unlikely(ret)) {
 232                        ehca_err(qp->ib_qp.device,
 233                                 "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
 234                                 ret, qp->ib_qp.qp_num);
 235                        return ret;
 236                }
 237                break;
 238
 239        default:
 240                ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
 241                         rsrc_type, qp->ib_qp.qp_num);
 242                return -EINVAL;
 243        }
 244
 245        return 0;
 246}
 247
 248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 249{
 250        u64 fileoffset = vma->vm_pgoff;
 251        u32 idr_handle = fileoffset & 0x1FFFFFF;
 252        u32 q_type = (fileoffset >> 27) & 0x1;    /* CQ, QP,...        */
 253        u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
 254        u32 ret;
 255        struct ehca_cq *cq;
 256        struct ehca_qp *qp;
 257        struct ib_uobject *uobject;
 258
 259        switch (q_type) {
 260        case  0: /* CQ */
 261                read_lock(&ehca_cq_idr_lock);
 262                cq = idr_find(&ehca_cq_idr, idr_handle);
 263                read_unlock(&ehca_cq_idr_lock);
 264
 265                /* make sure this mmap really belongs to the authorized user */
 266                if (!cq)
 267                        return -EINVAL;
 268
 269                if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
 270                        return -EINVAL;
 271
 272                ret = ehca_mmap_cq(vma, cq, rsrc_type);
 273                if (unlikely(ret)) {
 274                        ehca_err(cq->ib_cq.device,
 275                                 "ehca_mmap_cq() failed rc=%i cq_num=%x",
 276                                 ret, cq->cq_number);
 277                        return ret;
 278                }
 279                break;
 280
 281        case 1: /* QP */
 282                read_lock(&ehca_qp_idr_lock);
 283                qp = idr_find(&ehca_qp_idr, idr_handle);
 284                read_unlock(&ehca_qp_idr_lock);
 285
 286                /* make sure this mmap really belongs to the authorized user */
 287                if (!qp)
 288                        return -EINVAL;
 289
 290                uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
 291                if (!uobject || uobject->context != context)
 292                        return -EINVAL;
 293
 294                ret = ehca_mmap_qp(vma, qp, rsrc_type);
 295                if (unlikely(ret)) {
 296                        ehca_err(qp->ib_qp.device,
 297                                 "ehca_mmap_qp() failed rc=%i qp_num=%x",
 298                                 ret, qp->ib_qp.qp_num);
 299                        return ret;
 300                }
 301                break;
 302
 303        default:
 304                ehca_gen_err("bad queue type %x", q_type);
 305                return -EINVAL;
 306        }
 307
 308        return 0;
 309}
 310