linux/drivers/infiniband/core/umem.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/mm.h>
  36#include <linux/dma-mapping.h>
  37#include <linux/sched.h>
  38#include <linux/export.h>
  39#include <linux/hugetlb.h>
  40#include <linux/dma-attrs.h>
  41#include <linux/slab.h>
  42
  43#include "uverbs.h"
  44
  45
  46static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
  47{
  48        struct scatterlist *sg;
  49        struct page *page;
  50        int i;
  51
  52        if (umem->nmap > 0)
  53                ib_dma_unmap_sg(dev, umem->sg_head.sgl,
  54                                umem->nmap,
  55                                DMA_BIDIRECTIONAL);
  56
  57        for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
  58
  59                page = sg_page(sg);
  60                if (umem->writable && dirty)
  61                        set_page_dirty_lock(page);
  62                put_page(page);
  63        }
  64
  65        sg_free_table(&umem->sg_head);
  66        return;
  67
  68}
  69
  70/**
  71 * ib_umem_get - Pin and DMA map userspace memory.
  72 * @context: userspace context to pin memory for
  73 * @addr: userspace virtual address to start at
  74 * @size: length of region to pin
  75 * @access: IB_ACCESS_xxx flags for memory being pinned
  76 * @dmasync: flush in-flight DMA when the memory region is written
  77 */
  78struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
  79                            size_t size, int access, int dmasync)
  80{
  81        struct ib_umem *umem;
  82        struct page **page_list;
  83        struct vm_area_struct **vma_list;
  84        unsigned long locked;
  85        unsigned long lock_limit;
  86        unsigned long cur_base;
  87        unsigned long npages;
  88        int ret;
  89        int i;
  90        DEFINE_DMA_ATTRS(attrs);
  91        struct scatterlist *sg, *sg_list_start;
  92        int need_release = 0;
  93
  94        if (dmasync)
  95                dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
  96
  97        if (!can_do_mlock())
  98                return ERR_PTR(-EPERM);
  99
 100        umem = kzalloc(sizeof *umem, GFP_KERNEL);
 101        if (!umem)
 102                return ERR_PTR(-ENOMEM);
 103
 104        umem->context   = context;
 105        umem->length    = size;
 106        umem->offset    = addr & ~PAGE_MASK;
 107        umem->page_size = PAGE_SIZE;
 108        umem->pid       = get_task_pid(current, PIDTYPE_PID);
 109        /*
 110         * We ask for writable memory if any access flags other than
 111         * "remote read" are set.  "Local write" and "remote write"
 112         * obviously require write access.  "Remote atomic" can do
 113         * things like fetch and add, which will modify memory, and
 114         * "MW bind" can change permissions by binding a window.
 115         */
 116        umem->writable  = !!(access & ~IB_ACCESS_REMOTE_READ);
 117
 118        /* We assume the memory is from hugetlb until proved otherwise */
 119        umem->hugetlb   = 1;
 120
 121        page_list = (struct page **) __get_free_page(GFP_KERNEL);
 122        if (!page_list) {
 123                kfree(umem);
 124                return ERR_PTR(-ENOMEM);
 125        }
 126
 127        /*
 128         * if we can't alloc the vma_list, it's not so bad;
 129         * just assume the memory is not hugetlb memory
 130         */
 131        vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
 132        if (!vma_list)
 133                umem->hugetlb = 0;
 134
 135        npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
 136
 137        down_write(&current->mm->mmap_sem);
 138
 139        locked     = npages + current->mm->pinned_vm;
 140        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 141
 142        if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
 143                ret = -ENOMEM;
 144                goto out;
 145        }
 146
 147        cur_base = addr & PAGE_MASK;
 148
 149        if (npages == 0) {
 150                ret = -EINVAL;
 151                goto out;
 152        }
 153
 154        ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
 155        if (ret)
 156                goto out;
 157
 158        need_release = 1;
 159        sg_list_start = umem->sg_head.sgl;
 160
 161        while (npages) {
 162                ret = get_user_pages(current, current->mm, cur_base,
 163                                     min_t(unsigned long, npages,
 164                                           PAGE_SIZE / sizeof (struct page *)),
 165                                     1, !umem->writable, page_list, vma_list);
 166
 167                if (ret < 0)
 168                        goto out;
 169
 170                umem->npages += ret;
 171                cur_base += ret * PAGE_SIZE;
 172                npages   -= ret;
 173
 174                for_each_sg(sg_list_start, sg, ret, i) {
 175                        if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
 176                                umem->hugetlb = 0;
 177
 178                        sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
 179                }
 180
 181                /* preparing for next loop */
 182                sg_list_start = sg;
 183        }
 184
 185        umem->nmap = ib_dma_map_sg_attrs(context->device,
 186                                  umem->sg_head.sgl,
 187                                  umem->npages,
 188                                  DMA_BIDIRECTIONAL,
 189                                  &attrs);
 190
 191        if (umem->nmap <= 0) {
 192                ret = -ENOMEM;
 193                goto out;
 194        }
 195
 196        ret = 0;
 197
 198out:
 199        if (ret < 0) {
 200                if (need_release)
 201                        __ib_umem_release(context->device, umem, 0);
 202                put_pid(umem->pid);
 203                kfree(umem);
 204        } else
 205                current->mm->pinned_vm = locked;
 206
 207        up_write(&current->mm->mmap_sem);
 208        if (vma_list)
 209                free_page((unsigned long) vma_list);
 210        free_page((unsigned long) page_list);
 211
 212        return ret < 0 ? ERR_PTR(ret) : umem;
 213}
 214EXPORT_SYMBOL(ib_umem_get);
 215
 216static void ib_umem_account(struct work_struct *work)
 217{
 218        struct ib_umem *umem = container_of(work, struct ib_umem, work);
 219
 220        down_write(&umem->mm->mmap_sem);
 221        umem->mm->pinned_vm -= umem->diff;
 222        up_write(&umem->mm->mmap_sem);
 223        mmput(umem->mm);
 224        kfree(umem);
 225}
 226
 227/**
 228 * ib_umem_release - release memory pinned with ib_umem_get
 229 * @umem: umem struct to release
 230 */
 231void ib_umem_release(struct ib_umem *umem)
 232{
 233        struct ib_ucontext *context = umem->context;
 234        struct mm_struct *mm;
 235        struct task_struct *task;
 236        unsigned long diff;
 237
 238        __ib_umem_release(umem->context->device, umem, 1);
 239
 240        task = get_pid_task(umem->pid, PIDTYPE_PID);
 241        put_pid(umem->pid);
 242        if (!task)
 243                goto out;
 244        mm = get_task_mm(task);
 245        put_task_struct(task);
 246        if (!mm)
 247                goto out;
 248
 249        diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 250
 251        /*
 252         * We may be called with the mm's mmap_sem already held.  This
 253         * can happen when a userspace munmap() is the call that drops
 254         * the last reference to our file and calls our release
 255         * method.  If there are memory regions to destroy, we'll end
 256         * up here and not be able to take the mmap_sem.  In that case
 257         * we defer the vm_locked accounting to the system workqueue.
 258         */
 259        if (context->closing) {
 260                if (!down_write_trylock(&mm->mmap_sem)) {
 261                        INIT_WORK(&umem->work, ib_umem_account);
 262                        umem->mm   = mm;
 263                        umem->diff = diff;
 264
 265                        queue_work(ib_wq, &umem->work);
 266                        return;
 267                }
 268        } else
 269                down_write(&mm->mmap_sem);
 270
 271        mm->pinned_vm -= diff;
 272        up_write(&mm->mmap_sem);
 273        mmput(mm);
 274out:
 275        kfree(umem);
 276}
 277EXPORT_SYMBOL(ib_umem_release);
 278
 279int ib_umem_page_count(struct ib_umem *umem)
 280{
 281        int shift;
 282        int i;
 283        int n;
 284        struct scatterlist *sg;
 285
 286        shift = ilog2(umem->page_size);
 287
 288        n = 0;
 289        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
 290                n += sg_dma_len(sg) >> shift;
 291
 292        return n;
 293}
 294EXPORT_SYMBOL(ib_umem_page_count);
 295