1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "qib.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
50{
51 unsigned long flags;
52 u32 r;
53 u32 n;
54 int ret = 0;
55 struct qib_ibdev *dev = to_idev(mr->pd->device);
56 struct rvt_lkey_table *rkt = &dev->lk_table;
57
58 spin_lock_irqsave(&rkt->lock, flags);
59
60
61 if (dma_region) {
62 struct rvt_mregion *tmr;
63
64 tmr = rcu_access_pointer(dev->dma_mr);
65 if (!tmr) {
66 qib_get_mr(mr);
67 rcu_assign_pointer(dev->dma_mr, mr);
68 mr->lkey_published = 1;
69 }
70 goto success;
71 }
72
73
74 r = rkt->next;
75 n = r;
76 for (;;) {
77 if (rkt->table[r] == NULL)
78 break;
79 r = (r + 1) & (rkt->max - 1);
80 if (r == n)
81 goto bail;
82 }
83 rkt->next = (r + 1) & (rkt->max - 1);
84
85
86
87
88 rkt->gen++;
89
90
91
92
93 mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
94 ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
95 << 8);
96 if (mr->lkey == 0) {
97 mr->lkey |= 1 << 8;
98 rkt->gen++;
99 }
100 qib_get_mr(mr);
101 rcu_assign_pointer(rkt->table[r], mr);
102 mr->lkey_published = 1;
103success:
104 spin_unlock_irqrestore(&rkt->lock, flags);
105out:
106 return ret;
107bail:
108 spin_unlock_irqrestore(&rkt->lock, flags);
109 ret = -ENOMEM;
110 goto out;
111}
112
113
114
115
116
117void qib_free_lkey(struct rvt_mregion *mr)
118{
119 unsigned long flags;
120 u32 lkey = mr->lkey;
121 u32 r;
122 struct qib_ibdev *dev = to_idev(mr->pd->device);
123 struct rvt_lkey_table *rkt = &dev->lk_table;
124
125 spin_lock_irqsave(&rkt->lock, flags);
126 if (!mr->lkey_published)
127 goto out;
128 if (lkey == 0)
129 RCU_INIT_POINTER(dev->dma_mr, NULL);
130 else {
131 r = lkey >> (32 - ib_rvt_lkey_table_size);
132 RCU_INIT_POINTER(rkt->table[r], NULL);
133 }
134 qib_put_mr(mr);
135 mr->lkey_published = 0;
136out:
137 spin_unlock_irqrestore(&rkt->lock, flags);
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
154 u32 len, u64 vaddr, u32 rkey, int acc)
155{
156 struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
157 struct rvt_mregion *mr;
158 unsigned n, m;
159 size_t off;
160
161
162
163
164
165 rcu_read_lock();
166 if (rkey == 0) {
167 struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
168 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
169
170 if (pd->user)
171 goto bail;
172 mr = rcu_dereference(dev->dma_mr);
173 if (!mr)
174 goto bail;
175 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
176 goto bail;
177 rcu_read_unlock();
178
179 sge->mr = mr;
180 sge->vaddr = (void *) vaddr;
181 sge->length = len;
182 sge->sge_length = len;
183 sge->m = 0;
184 sge->n = 0;
185 goto ok;
186 }
187
188 mr = rcu_dereference(
189 rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
190 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
191 goto bail;
192
193 off = vaddr - mr->iova;
194 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
195 (mr->access_flags & acc) == 0))
196 goto bail;
197 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
198 goto bail;
199 rcu_read_unlock();
200
201 off += mr->offset;
202 if (mr->page_shift) {
203
204
205
206
207
208 size_t entries_spanned_by_off;
209
210 entries_spanned_by_off = off >> mr->page_shift;
211 off -= (entries_spanned_by_off << mr->page_shift);
212 m = entries_spanned_by_off / RVT_SEGSZ;
213 n = entries_spanned_by_off % RVT_SEGSZ;
214 } else {
215 m = 0;
216 n = 0;
217 while (off >= mr->map[m]->segs[n].length) {
218 off -= mr->map[m]->segs[n].length;
219 n++;
220 if (n >= RVT_SEGSZ) {
221 m++;
222 n = 0;
223 }
224 }
225 }
226 sge->mr = mr;
227 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
228 sge->length = mr->map[m]->segs[n].length - off;
229 sge->sge_length = len;
230 sge->m = m;
231 sge->n = n;
232ok:
233 return 1;
234bail:
235 rcu_read_unlock();
236 return 0;
237}
238
239