1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#ifndef SCIF_RMA_H
54#define SCIF_RMA_H
55
56#include <linux/intel-iommu.h>
57#include <linux/mmu_notifier.h>
58
59#include "../bus/scif_bus.h"
60
61
62#define SCIF_REMOTE_FENCE_BIT 31
63
64#define SCIF_REMOTE_FENCE BIT_ULL(SCIF_REMOTE_FENCE_BIT)
65
66#define SCIF_MAX_UNALIGNED_BUF_SIZE (1024 * 1024ULL)
67#define SCIF_KMEM_UNALIGNED_BUF_SIZE (SCIF_MAX_UNALIGNED_BUF_SIZE + \
68 (L1_CACHE_BYTES << 1))
69
70#define SCIF_IOVA_START_PFN (1)
71#define SCIF_IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
72#define SCIF_DMA_64BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(64))
73#define SCIF_DMA_63BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(63))
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99struct scif_endpt_rma_info {
100 struct list_head reg_list;
101 struct list_head remote_reg_list;
102 struct iova_domain iovad;
103 struct mutex rma_lock;
104 spinlock_t tc_lock;
105 struct mutex mmn_lock;
106 atomic_t tw_refcount;
107 atomic_t tcw_refcount;
108 atomic_t tcw_total_pages;
109 struct list_head mmn_list;
110 atomic_t fence_refcount;
111 struct dma_chan *dma_chan;
112 int async_list_del;
113 struct list_head vma_list;
114 wait_queue_head_t markwq;
115};
116
117
118
119
120
121
122
123
124struct scif_fence_info {
125 enum scif_msg_state state;
126 struct completion comp;
127 int dma_mark;
128};
129
130
131
132
133
134
135
136struct scif_remote_fence_info {
137 struct scifmsg msg;
138 struct list_head list;
139};
140
141
142
143
144
145
146
147enum scif_window_type {
148 SCIF_WINDOW_PARTIAL,
149 SCIF_WINDOW_SINGLE,
150 SCIF_WINDOW_FULL,
151 SCIF_WINDOW_SELF,
152 SCIF_WINDOW_PEER
153};
154
155
156#define SCIF_NR_ADDR_IN_PAGE (0x1000 >> 3)
157
158
159
160
161
162
163
164
165
166
167
168struct scif_rma_lookup {
169 dma_addr_t *lookup;
170 dma_addr_t offset;
171};
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186struct scif_pinned_pages {
187 s64 nr_pages;
188 int prot;
189 int map_flags;
190 atomic_t ref_count;
191 u64 magic;
192 struct page **pages;
193};
194
195
196
197
198
199
200
201
202struct scif_status {
203 dma_addr_t src_dma_addr;
204 u64 val;
205 struct scif_endpt *ep;
206};
207
208
209
210
211
212
213
214
215struct scif_cb_arg {
216 dma_addr_t src_dma_addr;
217 struct scif_status *status;
218 struct scif_endpt *ep;
219};
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255struct scif_window {
256 s64 nr_pages;
257 int nr_contig_chunks;
258 int prot;
259 int ref_count;
260 u64 magic;
261 s64 offset;
262 unsigned long va_for_temp;
263 int dma_mark;
264 u64 ep;
265 struct list_head list;
266 enum scif_window_type type;
267 u64 peer_window;
268 enum scif_msg_state unreg_state;
269 bool offset_freed;
270 bool temp;
271 struct mm_struct *mm;
272 struct sg_table *st;
273 union {
274 struct {
275 struct scif_pinned_pages *pinned_pages;
276 struct scif_allocmsg alloc_handle;
277 wait_queue_head_t regwq;
278 enum scif_msg_state reg_state;
279 wait_queue_head_t unregwq;
280 };
281 struct {
282 struct scif_rma_lookup dma_addr_lookup;
283 struct scif_rma_lookup num_pages_lookup;
284 int nr_lookup;
285 dma_addr_t mapped_offset;
286 };
287 };
288 dma_addr_t *dma_addr;
289 u64 *num_pages;
290} __packed;
291
292
293
294
295
296
297
298
299
300
301struct scif_mmu_notif {
302#ifdef CONFIG_MMU_NOTIFIER
303 struct mmu_notifier ep_mmu_notifier;
304#endif
305 struct list_head tc_reg_list;
306 struct mm_struct *mm;
307 struct scif_endpt *ep;
308 struct list_head list;
309};
310
311enum scif_rma_dir {
312 SCIF_LOCAL_TO_REMOTE,
313 SCIF_REMOTE_TO_LOCAL
314};
315
316extern struct kmem_cache *unaligned_cache;
317
318void scif_rma_ep_init(struct scif_endpt *ep);
319
320int scif_rma_ep_can_uninit(struct scif_endpt *ep);
321
322int scif_get_window_offset(struct scif_endpt *ep, int flags,
323 s64 offset, int nr_pages, s64 *out_offset);
324
325void scif_free_window_offset(struct scif_endpt *ep,
326 struct scif_window *window, s64 offset);
327
328struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
329 s64 offset, bool temp);
330
331int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window);
332void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window);
333
334int scif_map_window(struct scif_dev *remote_dev,
335 struct scif_window *window);
336
337int scif_unregister_window(struct scif_window *window);
338
339void
340scif_destroy_remote_window(struct scif_window *window);
341
342void scif_zap_mmaps(int node);
343
344bool scif_rma_do_apps_have_mmaps(int node);
345
346void scif_cleanup_rma_for_zombies(int node);
347
348int scif_reserve_dma_chan(struct scif_endpt *ep);
349
350int _scif_fence_mark(scif_epd_t epd, int *mark);
351int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
352 enum scif_window_type type);
353void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg);
354void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg);
355void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg);
356void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg);
357void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg);
358void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
359void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
360void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
361void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
362void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg);
363void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg);
364void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg);
365void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg);
366void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg);
367void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg);
368void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg);
369void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg);
370void scif_mmu_notif_handler(struct work_struct *work);
371void scif_rma_handle_remote_fences(void);
372void scif_rma_destroy_windows(void);
373void scif_rma_destroy_tcw_invalid(void);
374int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan);
375
376struct scif_window_iter {
377 s64 offset;
378 int index;
379};
380
381static inline void
382scif_init_window_iter(struct scif_window *window, struct scif_window_iter *iter)
383{
384 iter->offset = window->offset;
385 iter->index = 0;
386}
387
388dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
389 size_t *nr_bytes,
390 struct scif_window_iter *iter);
391static inline
392dma_addr_t __scif_off_to_dma_addr(struct scif_window *window, s64 off)
393{
394 return scif_off_to_dma_addr(window, off, NULL, NULL);
395}
396
397static inline bool scif_unaligned(off_t src_offset, off_t dst_offset)
398{
399 src_offset = src_offset & (L1_CACHE_BYTES - 1);
400 dst_offset = dst_offset & (L1_CACHE_BYTES - 1);
401 return !(src_offset == dst_offset);
402}
403
404
405
406
407
408
409
410
411
412static inline void *scif_zalloc(size_t size)
413{
414 void *ret = NULL;
415 size_t align = ALIGN(size, PAGE_SIZE);
416
417 if (align && get_order(align) < MAX_ORDER)
418 ret = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
419 get_order(align));
420 return ret ? ret : vzalloc(align);
421}
422
423
424
425
426
427
428
429static inline void scif_free(void *addr, size_t size)
430{
431 size_t align = ALIGN(size, PAGE_SIZE);
432
433 if (is_vmalloc_addr(addr))
434 vfree(addr);
435 else
436 free_pages((unsigned long)addr, get_order(align));
437}
438
439static inline void scif_get_window(struct scif_window *window, int nr_pages)
440{
441 window->ref_count += nr_pages;
442}
443
444static inline void scif_put_window(struct scif_window *window, int nr_pages)
445{
446 window->ref_count -= nr_pages;
447}
448
449static inline void scif_set_window_ref(struct scif_window *window, int nr_pages)
450{
451 window->ref_count = nr_pages;
452}
453
454static inline void
455scif_queue_for_cleanup(struct scif_window *window, struct list_head *list)
456{
457 spin_lock(&scif_info.rmalock);
458 list_add_tail(&window->list, list);
459 spin_unlock(&scif_info.rmalock);
460 schedule_work(&scif_info.misc_work);
461}
462
463static inline void __scif_rma_destroy_tcw_helper(struct scif_window *window)
464{
465 list_del_init(&window->list);
466 scif_queue_for_cleanup(window, &scif_info.rma_tc);
467}
468
469static inline bool scif_is_iommu_enabled(void)
470{
471#ifdef CONFIG_INTEL_IOMMU
472 return intel_iommu_enabled;
473#else
474 return false;
475#endif
476}
477#endif
478