linux/drivers/infiniband/sw/rdmavt/mmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
   2/*
   3 * Copyright(c) 2016 Intel Corporation.
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/vmalloc.h>
   8#include <linux/mm.h>
   9#include <rdma/uverbs_ioctl.h>
  10#include "mmap.h"
  11
  12/**
  13 * rvt_mmap_init - init link list and lock for mem map
  14 * @rdi: rvt dev struct
  15 */
  16void rvt_mmap_init(struct rvt_dev_info *rdi)
  17{
  18        INIT_LIST_HEAD(&rdi->pending_mmaps);
  19        spin_lock_init(&rdi->pending_lock);
  20        rdi->mmap_offset = PAGE_SIZE;
  21        spin_lock_init(&rdi->mmap_offset_lock);
  22}
  23
  24/**
  25 * rvt_release_mmap_info - free mmap info structure
  26 * @ref: a pointer to the kref within struct rvt_mmap_info
  27 */
  28void rvt_release_mmap_info(struct kref *ref)
  29{
  30        struct rvt_mmap_info *ip =
  31                container_of(ref, struct rvt_mmap_info, ref);
  32        struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
  33
  34        spin_lock_irq(&rdi->pending_lock);
  35        list_del(&ip->pending_mmaps);
  36        spin_unlock_irq(&rdi->pending_lock);
  37
  38        vfree(ip->obj);
  39        kfree(ip);
  40}
  41
  42static void rvt_vma_open(struct vm_area_struct *vma)
  43{
  44        struct rvt_mmap_info *ip = vma->vm_private_data;
  45
  46        kref_get(&ip->ref);
  47}
  48
  49static void rvt_vma_close(struct vm_area_struct *vma)
  50{
  51        struct rvt_mmap_info *ip = vma->vm_private_data;
  52
  53        kref_put(&ip->ref, rvt_release_mmap_info);
  54}
  55
  56static const struct vm_operations_struct rvt_vm_ops = {
  57        .open = rvt_vma_open,
  58        .close = rvt_vma_close,
  59};
  60
  61/**
  62 * rvt_mmap - create a new mmap region
  63 * @context: the IB user context of the process making the mmap() call
  64 * @vma: the VMA to be initialized
  65 *
  66 * Return: zero if the mmap is OK. Otherwise, return an errno.
  67 */
  68int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  69{
  70        struct rvt_dev_info *rdi = ib_to_rvt(context->device);
  71        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
  72        unsigned long size = vma->vm_end - vma->vm_start;
  73        struct rvt_mmap_info *ip, *pp;
  74        int ret = -EINVAL;
  75
  76        /*
  77         * Search the device's list of objects waiting for a mmap call.
  78         * Normally, this list is very short since a call to create a
  79         * CQ, QP, or SRQ is soon followed by a call to mmap().
  80         */
  81        spin_lock_irq(&rdi->pending_lock);
  82        list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
  83                                 pending_mmaps) {
  84                /* Only the creator is allowed to mmap the object */
  85                if (context != ip->context || (__u64)offset != ip->offset)
  86                        continue;
  87                /* Don't allow a mmap larger than the object. */
  88                if (size > ip->size)
  89                        break;
  90
  91                list_del_init(&ip->pending_mmaps);
  92                spin_unlock_irq(&rdi->pending_lock);
  93
  94                ret = remap_vmalloc_range(vma, ip->obj, 0);
  95                if (ret)
  96                        goto done;
  97                vma->vm_ops = &rvt_vm_ops;
  98                vma->vm_private_data = ip;
  99                rvt_vma_open(vma);
 100                goto done;
 101        }
 102        spin_unlock_irq(&rdi->pending_lock);
 103done:
 104        return ret;
 105}
 106
 107/**
 108 * rvt_create_mmap_info - allocate information for hfi1_mmap
 109 * @rdi: rvt dev struct
 110 * @size: size in bytes to map
 111 * @udata: user data (must be valid!)
 112 * @obj: opaque pointer to a cq, wq etc
 113 *
 114 * Return: rvt_mmap struct on success, ERR_PTR on failure
 115 */
 116struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
 117                                           struct ib_udata *udata, void *obj)
 118{
 119        struct rvt_mmap_info *ip;
 120
 121        if (!udata)
 122                return ERR_PTR(-EINVAL);
 123
 124        ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
 125        if (!ip)
 126                return ERR_PTR(-ENOMEM);
 127
 128        size = PAGE_ALIGN(size);
 129
 130        spin_lock_irq(&rdi->mmap_offset_lock);
 131        if (rdi->mmap_offset == 0)
 132                rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
 133        ip->offset = rdi->mmap_offset;
 134        rdi->mmap_offset += ALIGN(size, SHMLBA);
 135        spin_unlock_irq(&rdi->mmap_offset_lock);
 136
 137        INIT_LIST_HEAD(&ip->pending_mmaps);
 138        ip->size = size;
 139        ip->context =
 140                container_of(udata, struct uverbs_attr_bundle, driver_udata)
 141                        ->context;
 142        ip->obj = obj;
 143        kref_init(&ip->ref);
 144
 145        return ip;
 146}
 147
 148/**
 149 * rvt_update_mmap_info - update a mem map
 150 * @rdi: rvt dev struct
 151 * @ip: mmap info pointer
 152 * @size: size to grow by
 153 * @obj: opaque pointer to cq, wq, etc.
 154 */
 155void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
 156                          u32 size, void *obj)
 157{
 158        size = PAGE_ALIGN(size);
 159
 160        spin_lock_irq(&rdi->mmap_offset_lock);
 161        if (rdi->mmap_offset == 0)
 162                rdi->mmap_offset = PAGE_SIZE;
 163        ip->offset = rdi->mmap_offset;
 164        rdi->mmap_offset += size;
 165        spin_unlock_irq(&rdi->mmap_offset_lock);
 166
 167        ip->size = size;
 168        ip->obj = obj;
 169}
 170