linux/drivers/infiniband/core/umem.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * $Id: uverbs_mem.c 2743 2005-06-28 22:27:59Z roland $
  35 */
  36
  37#include <linux/mm.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/sched.h>
  40#include <linux/hugetlb.h>
  41
  42#include "uverbs.h"
  43
  44#define IB_UMEM_MAX_PAGE_CHUNK                                          \
  45        ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /      \
  46         ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -        \
  47          (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
  48
  49static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
  50{
  51        struct ib_umem_chunk *chunk, *tmp;
  52        int i;
  53
  54        list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
  55                ib_dma_unmap_sg(dev, chunk->page_list,
  56                                chunk->nents, DMA_BIDIRECTIONAL);
  57                for (i = 0; i < chunk->nents; ++i) {
  58                        struct page *page = sg_page(&chunk->page_list[i]);
  59
  60                        if (umem->writable && dirty)
  61                                set_page_dirty_lock(page);
  62                        put_page(page);
  63                }
  64
  65                kfree(chunk);
  66        }
  67}
  68
  69/**
  70 * ib_umem_get - Pin and DMA map userspace memory.
  71 * @context: userspace context to pin memory for
  72 * @addr: userspace virtual address to start at
  73 * @size: length of region to pin
  74 * @access: IB_ACCESS_xxx flags for memory being pinned
  75 */
  76struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
  77                            size_t size, int access)
  78{
  79        struct ib_umem *umem;
  80        struct page **page_list;
  81        struct vm_area_struct **vma_list;
  82        struct ib_umem_chunk *chunk;
  83        unsigned long locked;
  84        unsigned long lock_limit;
  85        unsigned long cur_base;
  86        unsigned long npages;
  87        int ret;
  88        int off;
  89        int i;
  90
  91        if (!can_do_mlock())
  92                return ERR_PTR(-EPERM);
  93
  94        umem = kmalloc(sizeof *umem, GFP_KERNEL);
  95        if (!umem)
  96                return ERR_PTR(-ENOMEM);
  97
  98        umem->context   = context;
  99        umem->length    = size;
 100        umem->offset    = addr & ~PAGE_MASK;
 101        umem->page_size = PAGE_SIZE;
 102        /*
 103         * We ask for writable memory if any access flags other than
 104         * "remote read" are set.  "Local write" and "remote write"
 105         * obviously require write access.  "Remote atomic" can do
 106         * things like fetch and add, which will modify memory, and
 107         * "MW bind" can change permissions by binding a window.
 108         */
 109        umem->writable  = !!(access & ~IB_ACCESS_REMOTE_READ);
 110
 111        /* We assume the memory is from hugetlb until proved otherwise */
 112        umem->hugetlb   = 1;
 113
 114        INIT_LIST_HEAD(&umem->chunk_list);
 115
 116        page_list = (struct page **) __get_free_page(GFP_KERNEL);
 117        if (!page_list) {
 118                kfree(umem);
 119                return ERR_PTR(-ENOMEM);
 120        }
 121
 122        /*
 123         * if we can't alloc the vma_list, it's not so bad;
 124         * just assume the memory is not hugetlb memory
 125         */
 126        vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
 127        if (!vma_list)
 128                umem->hugetlb = 0;
 129
 130        npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
 131
 132        down_write(&current->mm->mmap_sem);
 133
 134        locked     = npages + current->mm->locked_vm;
 135        lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
 136
 137        if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
 138                ret = -ENOMEM;
 139                goto out;
 140        }
 141
 142        cur_base = addr & PAGE_MASK;
 143
 144        ret = 0;
 145        while (npages) {
 146                ret = get_user_pages(current, current->mm, cur_base,
 147                                     min_t(int, npages,
 148                                           PAGE_SIZE / sizeof (struct page *)),
 149                                     1, !umem->writable, page_list, vma_list);
 150
 151                if (ret < 0)
 152                        goto out;
 153
 154                cur_base += ret * PAGE_SIZE;
 155                npages   -= ret;
 156
 157                off = 0;
 158
 159                while (ret) {
 160                        chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
 161                                        min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
 162                                        GFP_KERNEL);
 163                        if (!chunk) {
 164                                ret = -ENOMEM;
 165                                goto out;
 166                        }
 167
 168                        chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
 169                        sg_init_table(chunk->page_list, chunk->nents);
 170                        for (i = 0; i < chunk->nents; ++i) {
 171                                if (vma_list &&
 172                                    !is_vm_hugetlb_page(vma_list[i + off]))
 173                                        umem->hugetlb = 0;
 174                                sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
 175                        }
 176
 177                        chunk->nmap = ib_dma_map_sg(context->device,
 178                                                    &chunk->page_list[0],
 179                                                    chunk->nents,
 180                                                    DMA_BIDIRECTIONAL);
 181                        if (chunk->nmap <= 0) {
 182                                for (i = 0; i < chunk->nents; ++i)
 183                                        put_page(sg_page(&chunk->page_list[i]));
 184                                kfree(chunk);
 185
 186                                ret = -ENOMEM;
 187                                goto out;
 188                        }
 189
 190                        ret -= chunk->nents;
 191                        off += chunk->nents;
 192                        list_add_tail(&chunk->list, &umem->chunk_list);
 193                }
 194
 195                ret = 0;
 196        }
 197
 198out:
 199        if (ret < 0) {
 200                __ib_umem_release(context->device, umem, 0);
 201                kfree(umem);
 202        } else
 203                current->mm->locked_vm = locked;
 204
 205        up_write(&current->mm->mmap_sem);
 206        if (vma_list)
 207                free_page((unsigned long) vma_list);
 208        free_page((unsigned long) page_list);
 209
 210        return ret < 0 ? ERR_PTR(ret) : umem;
 211}
 212EXPORT_SYMBOL(ib_umem_get);
 213
 214static void ib_umem_account(struct work_struct *work)
 215{
 216        struct ib_umem *umem = container_of(work, struct ib_umem, work);
 217
 218        down_write(&umem->mm->mmap_sem);
 219        umem->mm->locked_vm -= umem->diff;
 220        up_write(&umem->mm->mmap_sem);
 221        mmput(umem->mm);
 222        kfree(umem);
 223}
 224
 225/**
 226 * ib_umem_release - release memory pinned with ib_umem_get
 227 * @umem: umem struct to release
 228 */
 229void ib_umem_release(struct ib_umem *umem)
 230{
 231        struct ib_ucontext *context = umem->context;
 232        struct mm_struct *mm;
 233        unsigned long diff;
 234
 235        __ib_umem_release(umem->context->device, umem, 1);
 236
 237        mm = get_task_mm(current);
 238        if (!mm) {
 239                kfree(umem);
 240                return;
 241        }
 242
 243        diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 244
 245        /*
 246         * We may be called with the mm's mmap_sem already held.  This
 247         * can happen when a userspace munmap() is the call that drops
 248         * the last reference to our file and calls our release
 249         * method.  If there are memory regions to destroy, we'll end
 250         * up here and not be able to take the mmap_sem.  In that case
 251         * we defer the vm_locked accounting to the system workqueue.
 252         */
 253        if (context->closing) {
 254                if (!down_write_trylock(&mm->mmap_sem)) {
 255                        INIT_WORK(&umem->work, ib_umem_account);
 256                        umem->mm   = mm;
 257                        umem->diff = diff;
 258
 259                        schedule_work(&umem->work);
 260                        return;
 261                }
 262        } else
 263                down_write(&mm->mmap_sem);
 264
 265        current->mm->locked_vm -= diff;
 266        up_write(&mm->mmap_sem);
 267        mmput(mm);
 268        kfree(umem);
 269}
 270EXPORT_SYMBOL(ib_umem_release);
 271
 272int ib_umem_page_count(struct ib_umem *umem)
 273{
 274        struct ib_umem_chunk *chunk;
 275        int shift;
 276        int i;
 277        int n;
 278
 279        shift = ilog2(umem->page_size);
 280
 281        n = 0;
 282        list_for_each_entry(chunk, &umem->chunk_list, list)
 283                for (i = 0; i < chunk->nmap; ++i)
 284                        n += sg_dma_len(&chunk->page_list[i]) >> shift;
 285
 286        return n;
 287}
 288EXPORT_SYMBOL(ib_umem_page_count);
 289