linux/drivers/infiniband/sw/rxe/rxe_mmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/vmalloc.h>
   9#include <linux/mm.h>
  10#include <linux/errno.h>
  11#include <asm/pgtable.h>
  12#include <rdma/uverbs_ioctl.h>
  13
  14#include "rxe.h"
  15#include "rxe_loc.h"
  16#include "rxe_queue.h"
  17
  18void rxe_mmap_release(struct kref *ref)
  19{
  20        struct rxe_mmap_info *ip = container_of(ref,
  21                                        struct rxe_mmap_info, ref);
  22        struct rxe_dev *rxe = to_rdev(ip->context->device);
  23
  24        spin_lock_bh(&rxe->pending_lock);
  25
  26        if (!list_empty(&ip->pending_mmaps))
  27                list_del(&ip->pending_mmaps);
  28
  29        spin_unlock_bh(&rxe->pending_lock);
  30
  31        vfree(ip->obj);         /* buf */
  32        kfree(ip);
  33}
  34
  35/*
  36 * open and close keep track of how many times the memory region is mapped,
  37 * to avoid releasing it.
  38 */
  39static void rxe_vma_open(struct vm_area_struct *vma)
  40{
  41        struct rxe_mmap_info *ip = vma->vm_private_data;
  42
  43        kref_get(&ip->ref);
  44}
  45
  46static void rxe_vma_close(struct vm_area_struct *vma)
  47{
  48        struct rxe_mmap_info *ip = vma->vm_private_data;
  49
  50        kref_put(&ip->ref, rxe_mmap_release);
  51}
  52
  53static const struct vm_operations_struct rxe_vm_ops = {
  54        .open = rxe_vma_open,
  55        .close = rxe_vma_close,
  56};
  57
  58/**
  59 * rxe_mmap - create a new mmap region
  60 * @context: the IB user context of the process making the mmap() call
  61 * @vma: the VMA to be initialized
  62 * Return zero if the mmap is OK. Otherwise, return an errno.
  63 */
  64int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  65{
  66        struct rxe_dev *rxe = to_rdev(context->device);
  67        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
  68        unsigned long size = vma->vm_end - vma->vm_start;
  69        struct rxe_mmap_info *ip, *pp;
  70        int ret;
  71
  72        /*
  73         * Search the device's list of objects waiting for a mmap call.
  74         * Normally, this list is very short since a call to create a
  75         * CQ, QP, or SRQ is soon followed by a call to mmap().
  76         */
  77        spin_lock_bh(&rxe->pending_lock);
  78        list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
  79                if (context != ip->context || (__u64)offset != ip->info.offset)
  80                        continue;
  81
  82                /* Don't allow a mmap larger than the object. */
  83                if (size > ip->info.size) {
  84                        pr_err("mmap region is larger than the object!\n");
  85                        spin_unlock_bh(&rxe->pending_lock);
  86                        ret = -EINVAL;
  87                        goto done;
  88                }
  89
  90                goto found_it;
  91        }
  92        pr_warn("unable to find pending mmap info\n");
  93        spin_unlock_bh(&rxe->pending_lock);
  94        ret = -EINVAL;
  95        goto done;
  96
  97found_it:
  98        list_del_init(&ip->pending_mmaps);
  99        spin_unlock_bh(&rxe->pending_lock);
 100
 101        ret = remap_vmalloc_range(vma, ip->obj, 0);
 102        if (ret) {
 103                pr_err("err %d from remap_vmalloc_range\n", ret);
 104                goto done;
 105        }
 106
 107        vma->vm_ops = &rxe_vm_ops;
 108        vma->vm_private_data = ip;
 109        rxe_vma_open(vma);
 110done:
 111        return ret;
 112}
 113
 114/*
 115 * Allocate information for rxe_mmap
 116 */
 117struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
 118                                           struct ib_udata *udata, void *obj)
 119{
 120        struct rxe_mmap_info *ip;
 121
 122        if (!udata)
 123                return ERR_PTR(-EINVAL);
 124
 125        ip = kmalloc(sizeof(*ip), GFP_KERNEL);
 126        if (!ip)
 127                return ERR_PTR(-ENOMEM);
 128
 129        size = PAGE_ALIGN(size);
 130
 131        spin_lock_bh(&rxe->mmap_offset_lock);
 132
 133        if (rxe->mmap_offset == 0)
 134                rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
 135
 136        ip->info.offset = rxe->mmap_offset;
 137        rxe->mmap_offset += ALIGN(size, SHMLBA);
 138
 139        spin_unlock_bh(&rxe->mmap_offset_lock);
 140
 141        INIT_LIST_HEAD(&ip->pending_mmaps);
 142        ip->info.size = size;
 143        ip->context =
 144                container_of(udata, struct uverbs_attr_bundle, driver_udata)
 145                        ->context;
 146        ip->obj = obj;
 147        kref_init(&ip->ref);
 148
 149        return ip;
 150}
 151