1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#ifndef SCIF_RMA_H
54#define SCIF_RMA_H
55
56#include <linux/dma_remapping.h>
57#include <linux/mmu_notifier.h>
58
59#include "../bus/scif_bus.h"
60
61
62#define SCIF_REMOTE_FENCE_BIT 31
63
64#define SCIF_REMOTE_FENCE BIT_ULL(SCIF_REMOTE_FENCE_BIT)
65
66#define SCIF_MAX_UNALIGNED_BUF_SIZE (1024 * 1024ULL)
67#define SCIF_KMEM_UNALIGNED_BUF_SIZE (SCIF_MAX_UNALIGNED_BUF_SIZE + \
68 (L1_CACHE_BYTES << 1))
69
70#define SCIF_IOVA_START_PFN (1)
71#define SCIF_IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
72#define SCIF_DMA_64BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(64))
73#define SCIF_DMA_63BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(63))
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99struct scif_endpt_rma_info {
100 struct list_head reg_list;
101 struct list_head remote_reg_list;
102 struct iova_domain iovad;
103 struct mutex rma_lock;
104 spinlock_t tc_lock;
105 struct mutex mmn_lock;
106 atomic_t tw_refcount;
107 atomic_t tcw_refcount;
108 atomic_t tcw_total_pages;
109 struct list_head mmn_list;
110 atomic_t fence_refcount;
111 struct dma_chan *dma_chan;
112 int async_list_del;
113 struct list_head vma_list;
114 wait_queue_head_t markwq;
115};
116
117
118
119
120
121
122
123
124struct scif_fence_info {
125 enum scif_msg_state state;
126 struct completion comp;
127 int dma_mark;
128};
129
130
131
132
133
134
135
136struct scif_remote_fence_info {
137 struct scifmsg msg;
138 struct list_head list;
139};
140
141
142
143
144
145
146
147enum scif_window_type {
148 SCIF_WINDOW_PARTIAL,
149 SCIF_WINDOW_SINGLE,
150 SCIF_WINDOW_FULL,
151 SCIF_WINDOW_SELF,
152 SCIF_WINDOW_PEER
153};
154
155
156#define SCIF_NR_ADDR_IN_PAGE (0x1000 >> 3)
157
158
159
160
161
162
163
164
165
166
167
168struct scif_rma_lookup {
169 dma_addr_t *lookup;
170 dma_addr_t offset;
171};
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186struct scif_pinned_pages {
187 s64 nr_pages;
188 int prot;
189 int map_flags;
190 atomic_t ref_count;
191 u64 magic;
192 struct page **pages;
193};
194
195
196
197
198
199
200
201
202struct scif_status {
203 dma_addr_t src_dma_addr;
204 u64 val;
205 struct scif_endpt *ep;
206};
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242struct scif_window {
243 s64 nr_pages;
244 int nr_contig_chunks;
245 int prot;
246 int ref_count;
247 u64 magic;
248 s64 offset;
249 unsigned long va_for_temp;
250 int dma_mark;
251 u64 ep;
252 struct list_head list;
253 enum scif_window_type type;
254 u64 peer_window;
255 enum scif_msg_state unreg_state;
256 bool offset_freed;
257 bool temp;
258 struct mm_struct *mm;
259 struct sg_table *st;
260 union {
261 struct {
262 struct scif_pinned_pages *pinned_pages;
263 struct scif_allocmsg alloc_handle;
264 wait_queue_head_t regwq;
265 enum scif_msg_state reg_state;
266 wait_queue_head_t unregwq;
267 };
268 struct {
269 struct scif_rma_lookup dma_addr_lookup;
270 struct scif_rma_lookup num_pages_lookup;
271 int nr_lookup;
272 dma_addr_t mapped_offset;
273 };
274 };
275 dma_addr_t *dma_addr;
276 u64 *num_pages;
277} __packed;
278
279
280
281
282
283
284
285
286
287
288struct scif_mmu_notif {
289#ifdef CONFIG_MMU_NOTIFIER
290 struct mmu_notifier ep_mmu_notifier;
291#endif
292 struct list_head tc_reg_list;
293 struct mm_struct *mm;
294 struct scif_endpt *ep;
295 struct list_head list;
296};
297
298enum scif_rma_dir {
299 SCIF_LOCAL_TO_REMOTE,
300 SCIF_REMOTE_TO_LOCAL
301};
302
303extern struct kmem_cache *unaligned_cache;
304
305void scif_rma_ep_init(struct scif_endpt *ep);
306
307int scif_rma_ep_can_uninit(struct scif_endpt *ep);
308
309int scif_get_window_offset(struct scif_endpt *ep, int flags,
310 s64 offset, int nr_pages, s64 *out_offset);
311
312void scif_free_window_offset(struct scif_endpt *ep,
313 struct scif_window *window, s64 offset);
314
315struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
316 s64 offset, bool temp);
317
318int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window);
319void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window);
320
321int scif_map_window(struct scif_dev *remote_dev,
322 struct scif_window *window);
323
324int scif_unregister_window(struct scif_window *window);
325
326void
327scif_destroy_remote_window(struct scif_window *window);
328
329void scif_zap_mmaps(int node);
330
331bool scif_rma_do_apps_have_mmaps(int node);
332
333void scif_cleanup_rma_for_zombies(int node);
334
335int scif_reserve_dma_chan(struct scif_endpt *ep);
336
337int _scif_fence_mark(scif_epd_t epd, int *mark);
338int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
339 enum scif_window_type type);
340void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg);
341void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg);
342void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg);
343void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg);
344void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg);
345void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
346void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
347void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
348void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
349void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg);
350void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg);
351void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg);
352void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg);
353void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg);
354void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg);
355void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg);
356void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg);
357void scif_mmu_notif_handler(struct work_struct *work);
358void scif_rma_handle_remote_fences(void);
359void scif_rma_destroy_windows(void);
360void scif_rma_destroy_tcw_invalid(void);
361int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan);
362
363struct scif_window_iter {
364 s64 offset;
365 int index;
366};
367
368static inline void
369scif_init_window_iter(struct scif_window *window, struct scif_window_iter *iter)
370{
371 iter->offset = window->offset;
372 iter->index = 0;
373}
374
375dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
376 size_t *nr_bytes,
377 struct scif_window_iter *iter);
378static inline
379dma_addr_t __scif_off_to_dma_addr(struct scif_window *window, s64 off)
380{
381 return scif_off_to_dma_addr(window, off, NULL, NULL);
382}
383
384static inline bool scif_unaligned(off_t src_offset, off_t dst_offset)
385{
386 src_offset = src_offset & (L1_CACHE_BYTES - 1);
387 dst_offset = dst_offset & (L1_CACHE_BYTES - 1);
388 return !(src_offset == dst_offset);
389}
390
391
392
393
394
395
396
397
398
399static inline void *scif_zalloc(size_t size)
400{
401 void *ret = NULL;
402 size_t align = ALIGN(size, PAGE_SIZE);
403
404 if (align && get_order(align) < MAX_ORDER)
405 ret = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
406 get_order(align));
407 return ret ? ret : vzalloc(align);
408}
409
410
411
412
413
414
415
416static inline void scif_free(void *addr, size_t size)
417{
418 size_t align = ALIGN(size, PAGE_SIZE);
419
420 if (is_vmalloc_addr(addr))
421 vfree(addr);
422 else
423 free_pages((unsigned long)addr, get_order(align));
424}
425
426static inline void scif_get_window(struct scif_window *window, int nr_pages)
427{
428 window->ref_count += nr_pages;
429}
430
431static inline void scif_put_window(struct scif_window *window, int nr_pages)
432{
433 window->ref_count -= nr_pages;
434}
435
436static inline void scif_set_window_ref(struct scif_window *window, int nr_pages)
437{
438 window->ref_count = nr_pages;
439}
440
441static inline void
442scif_queue_for_cleanup(struct scif_window *window, struct list_head *list)
443{
444 spin_lock(&scif_info.rmalock);
445 list_add_tail(&window->list, list);
446 spin_unlock(&scif_info.rmalock);
447 schedule_work(&scif_info.misc_work);
448}
449
450static inline void __scif_rma_destroy_tcw_helper(struct scif_window *window)
451{
452 list_del_init(&window->list);
453 scif_queue_for_cleanup(window, &scif_info.rma_tc);
454}
455
456static inline bool scif_is_iommu_enabled(void)
457{
458#ifdef CONFIG_INTEL_IOMMU
459 return intel_iommu_enabled;
460#else
461 return false;
462#endif
463}
464#endif
465