linux/include/rdma/ib_umem_odp.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifndef IB_UMEM_ODP_H
  34#define IB_UMEM_ODP_H
  35
  36#include <rdma/ib_umem.h>
  37#include <rdma/ib_verbs.h>
  38#include <linux/interval_tree.h>
  39
  40struct umem_odp_node {
  41        u64 __subtree_last;
  42        struct rb_node rb;
  43};
  44
  45struct ib_umem_odp {
  46        struct ib_umem umem;
  47        struct ib_ucontext_per_mm *per_mm;
  48
  49        /*
  50         * An array of the pages included in the on-demand paging umem.
  51         * Indices of pages that are currently not mapped into the device will
  52         * contain NULL.
  53         */
  54        struct page             **page_list;
  55        /*
  56         * An array of the same size as page_list, with DMA addresses mapped
  57         * for pages the pages in page_list. The lower two bits designate
  58         * access permissions. See ODP_READ_ALLOWED_BIT and
  59         * ODP_WRITE_ALLOWED_BIT.
  60         */
  61        dma_addr_t              *dma_list;
  62        /*
  63         * The umem_mutex protects the page_list and dma_list fields of an ODP
  64         * umem, allowing only a single thread to map/unmap pages. The mutex
  65         * also protects access to the mmu notifier counters.
  66         */
  67        struct mutex            umem_mutex;
  68        void                    *private; /* for the HW driver to use. */
  69
  70        int notifiers_seq;
  71        int notifiers_count;
  72        int npages;
  73
  74        /* Tree tracking */
  75        struct umem_odp_node    interval_tree;
  76
  77        struct completion       notifier_completion;
  78        int                     dying;
  79        struct work_struct      work;
  80};
  81
  82static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
  83{
  84        return container_of(umem, struct ib_umem_odp, umem);
  85}
  86
  87/*
  88 * The lower 2 bits of the DMA address signal the R/W permissions for
  89 * the entry. To upgrade the permissions, provide the appropriate
  90 * bitmask to the map_dma_pages function.
  91 *
  92 * Be aware that upgrading a mapped address might result in change of
  93 * the DMA address for the page.
  94 */
  95#define ODP_READ_ALLOWED_BIT  (1<<0ULL)
  96#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
  97
  98#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
  99
 100#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 101
 102struct ib_ucontext_per_mm {
 103        struct ib_ucontext *context;
 104        struct mm_struct *mm;
 105        struct pid *tgid;
 106        bool active;
 107
 108        struct rb_root_cached umem_tree;
 109        /* Protects umem_tree */
 110        struct rw_semaphore umem_rwsem;
 111
 112        struct mmu_notifier mn;
 113        unsigned int odp_mrs_count;
 114
 115        struct list_head ucontext_list;
 116        struct rcu_head rcu;
 117};
 118
 119int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
 120struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem,
 121                                      unsigned long addr, size_t size);
 122void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
 123
 124int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
 125                              u64 bcnt, u64 access_mask,
 126                              unsigned long current_seq);
 127
 128void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
 129                                 u64 bound);
 130
 131typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
 132                              void *cookie);
 133/*
 134 * Call the callback on each ib_umem in the range. Returns the logical or of
 135 * the return values of the functions called.
 136 */
 137int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
 138                                  u64 start, u64 end,
 139                                  umem_call_back cb,
 140                                  bool blockable, void *cookie);
 141
 142/*
 143 * Find first region intersecting with address range.
 144 * Return NULL if not found
 145 */
 146struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
 147                                       u64 addr, u64 length);
 148
 149static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
 150                                             unsigned long mmu_seq)
 151{
 152        /*
 153         * This code is strongly based on the KVM code from
 154         * mmu_notifier_retry. Should be called with
 155         * the relevant locks taken (umem_odp->umem_mutex
 156         * and the ucontext umem_mutex semaphore locked for read).
 157         */
 158
 159        if (unlikely(umem_odp->notifiers_count))
 160                return 1;
 161        if (umem_odp->notifiers_seq != mmu_seq)
 162                return 1;
 163        return 0;
 164}
 165
 166#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 167
 168static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
 169{
 170        return -EINVAL;
 171}
 172
 173static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
 174
 175#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 176
 177#endif /* IB_UMEM_ODP_H */
 178