1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef IB_UMEM_ODP_H
34#define IB_UMEM_ODP_H
35
36#include <rdma/ib_umem.h>
37#include <rdma/ib_verbs.h>
38#include <linux/interval_tree.h>
39
40struct umem_odp_node {
41 u64 __subtree_last;
42 struct rb_node rb;
43};
44
45struct ib_umem_odp {
46 struct ib_umem umem;
47 struct ib_ucontext_per_mm *per_mm;
48
49
50
51
52
53
54 struct page **page_list;
55
56
57
58
59
60
61 dma_addr_t *dma_list;
62
63
64
65
66
67 struct mutex umem_mutex;
68 void *private;
69
70 int notifiers_seq;
71 int notifiers_count;
72 int npages;
73
74
75 struct umem_odp_node interval_tree;
76
77 struct completion notifier_completion;
78 int dying;
79 struct work_struct work;
80};
81
82static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
83{
84 return container_of(umem, struct ib_umem_odp, umem);
85}
86
87
88
89
90
91
92
93
94
95#define ODP_READ_ALLOWED_BIT (1<<0ULL)
96#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
97
98#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
99
100#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
101
102struct ib_ucontext_per_mm {
103 struct ib_ucontext *context;
104 struct mm_struct *mm;
105 struct pid *tgid;
106 bool active;
107
108 struct rb_root_cached umem_tree;
109
110 struct rw_semaphore umem_rwsem;
111
112 struct mmu_notifier mn;
113 unsigned int odp_mrs_count;
114
115 struct list_head ucontext_list;
116 struct rcu_head rcu;
117};
118
119int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
120struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem,
121 unsigned long addr, size_t size);
122void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
123
124int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
125 u64 bcnt, u64 access_mask,
126 unsigned long current_seq);
127
128void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
129 u64 bound);
130
131typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
132 void *cookie);
133
134
135
136
137int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
138 u64 start, u64 end,
139 umem_call_back cb,
140 bool blockable, void *cookie);
141
142
143
144
145
146struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
147 u64 addr, u64 length);
148
149static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
150 unsigned long mmu_seq)
151{
152
153
154
155
156
157
158
159 if (unlikely(umem_odp->notifiers_count))
160 return 1;
161 if (umem_odp->notifiers_seq != mmu_seq)
162 return 1;
163 return 0;
164}
165
166#else
167
168static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
169{
170 return -EINVAL;
171}
172
173static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
174
175#endif
176
177#endif
178