1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/bitmap.h>
13#include <linux/sched.h>
14#include <linux/pid.h>
15#include <linux/fs.h>
16#include <linux/mm.h>
17#include <linux/debugfs.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
20#include <asm/cputable.h>
21#include <asm/current.h>
22#include <asm/copro.h>
23
24#include "cxl.h"
25
26
27
28
29struct cxl_context *cxl_context_alloc(void)
30{
31 return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
32}
33
34
35
36
37int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
38{
39 int i;
40
41 spin_lock_init(&ctx->sste_lock);
42 ctx->afu = afu;
43 ctx->master = master;
44 ctx->pid = ctx->glpid = NULL;
45 mutex_init(&ctx->mapping_lock);
46 ctx->mapping = NULL;
47
48
49
50
51
52
53
54
55 i = cxl_alloc_sst(ctx);
56 if (i)
57 return i;
58
59 INIT_WORK(&ctx->fault_work, cxl_handle_fault);
60
61 init_waitqueue_head(&ctx->wq);
62 spin_lock_init(&ctx->lock);
63
64 ctx->irq_bitmap = NULL;
65 ctx->pending_irq = false;
66 ctx->pending_fault = false;
67 ctx->pending_afu_err = false;
68
69
70
71
72
73
74
75
76 for (i = 0; i < CXL_IRQ_RANGES; i++)
77 ctx->irqs.range[i] = 0;
78
79 mutex_init(&ctx->status_mutex);
80
81 ctx->status = OPENED;
82
83
84
85
86
87 mutex_lock(&afu->contexts_lock);
88 idr_preload(GFP_KERNEL);
89 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
90 ctx->afu->num_procs, GFP_NOWAIT);
91 idr_preload_end();
92 mutex_unlock(&afu->contexts_lock);
93 if (i < 0)
94 return i;
95
96 ctx->pe = i;
97 if (cpu_has_feature(CPU_FTR_HVMODE)) {
98 ctx->elem = &ctx->afu->native->spa[i];
99 ctx->external_pe = ctx->pe;
100 } else {
101 ctx->external_pe = -1;
102 }
103 ctx->pe_inserted = false;
104
105
106
107
108
109 cxl_afu_get(afu);
110 return 0;
111}
112
113void cxl_context_set_mapping(struct cxl_context *ctx,
114 struct address_space *mapping)
115{
116 mutex_lock(&ctx->mapping_lock);
117 ctx->mapping = mapping;
118 mutex_unlock(&ctx->mapping_lock);
119}
120
121static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122{
123 struct cxl_context *ctx = vma->vm_file->private_data;
124 unsigned long address = (unsigned long)vmf->virtual_address;
125 u64 area, offset;
126
127 offset = vmf->pgoff << PAGE_SHIFT;
128
129 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
130 __func__, ctx->pe, address, offset);
131
132 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
133 area = ctx->afu->psn_phys;
134 if (offset >= ctx->afu->adapter->ps_size)
135 return VM_FAULT_SIGBUS;
136 } else {
137 area = ctx->psn_phys;
138 if (offset >= ctx->psn_size)
139 return VM_FAULT_SIGBUS;
140 }
141
142 mutex_lock(&ctx->status_mutex);
143
144 if (ctx->status != STARTED) {
145 mutex_unlock(&ctx->status_mutex);
146 pr_devel("%s: Context not started, failing problem state access\n", __func__);
147 if (ctx->mmio_err_ff) {
148 if (!ctx->ff_page) {
149 ctx->ff_page = alloc_page(GFP_USER);
150 if (!ctx->ff_page)
151 return VM_FAULT_OOM;
152 memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
153 }
154 get_page(ctx->ff_page);
155 vmf->page = ctx->ff_page;
156 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
157 return 0;
158 }
159 return VM_FAULT_SIGBUS;
160 }
161
162 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
163
164 mutex_unlock(&ctx->status_mutex);
165
166 return VM_FAULT_NOPAGE;
167}
168
169static const struct vm_operations_struct cxl_mmap_vmops = {
170 .fault = cxl_mmap_fault,
171};
172
173
174
175
176int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
177{
178 u64 start = vma->vm_pgoff << PAGE_SHIFT;
179 u64 len = vma->vm_end - vma->vm_start;
180
181 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
182 if (start + len > ctx->afu->adapter->ps_size)
183 return -EINVAL;
184 } else {
185 if (start + len > ctx->psn_size)
186 return -EINVAL;
187 }
188
189 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
190
191 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
192 pr_devel("AFU doesn't support mmio space\n");
193 return -EINVAL;
194 }
195
196
197 if (!ctx->afu->enabled)
198 return -EBUSY;
199 }
200
201 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
202 ctx->psn_phys, ctx->pe , ctx->master);
203
204 vma->vm_flags |= VM_IO | VM_PFNMAP;
205 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
206 vma->vm_ops = &cxl_mmap_vmops;
207 return 0;
208}
209
210
211
212
213
214
215int __detach_context(struct cxl_context *ctx)
216{
217 enum cxl_context_status status;
218
219 mutex_lock(&ctx->status_mutex);
220 status = ctx->status;
221 ctx->status = CLOSED;
222 mutex_unlock(&ctx->status_mutex);
223 if (status != STARTED)
224 return -EBUSY;
225
226
227
228
229 WARN_ON(cxl_ops->detach_process(ctx) &&
230 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
231 flush_work(&ctx->fault_work);
232
233
234
235
236
237 if (cxl_ops->irq_wait)
238 cxl_ops->irq_wait(ctx);
239
240
241 put_pid(ctx->pid);
242 put_pid(ctx->glpid);
243
244 cxl_ctx_put();
245
246
247 cxl_adapter_context_put(ctx->afu->adapter);
248 return 0;
249}
250
251
252
253
254
255
256
257void cxl_context_detach(struct cxl_context *ctx)
258{
259 int rc;
260
261 rc = __detach_context(ctx);
262 if (rc)
263 return;
264
265 afu_release_irqs(ctx, ctx);
266 wake_up_all(&ctx->wq);
267}
268
269
270
271
272void cxl_context_detach_all(struct cxl_afu *afu)
273{
274 struct cxl_context *ctx;
275 int tmp;
276
277 mutex_lock(&afu->contexts_lock);
278 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
279
280
281
282
283 cxl_context_detach(ctx);
284
285
286
287
288
289
290
291 mutex_lock(&ctx->mapping_lock);
292 if (ctx->mapping)
293 unmap_mapping_range(ctx->mapping, 0, 0, 1);
294 mutex_unlock(&ctx->mapping_lock);
295 }
296 mutex_unlock(&afu->contexts_lock);
297}
298
299static void reclaim_ctx(struct rcu_head *rcu)
300{
301 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
302
303 free_page((u64)ctx->sstp);
304 if (ctx->ff_page)
305 __free_page(ctx->ff_page);
306 ctx->sstp = NULL;
307
308 if (ctx->irq_bitmap)
309 kfree(ctx->irq_bitmap);
310
311
312 cxl_afu_put(ctx->afu);
313
314 kfree(ctx);
315}
316
317void cxl_context_free(struct cxl_context *ctx)
318{
319 if (ctx->kernelapi && ctx->mapping)
320 cxl_release_mapping(ctx);
321 mutex_lock(&ctx->afu->contexts_lock);
322 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
323 mutex_unlock(&ctx->afu->contexts_lock);
324 call_rcu(&ctx->rcu, reclaim_ctx);
325}
326