1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef __ASM_GNTTAB_H__
38#define __ASM_GNTTAB_H__
39
40#include <asm/page.h>
41
42#include <xen/interface/xen.h>
43#include <xen/interface/grant_table.h>
44
45#include <asm/xen/hypervisor.h>
46
47#include <xen/features.h>
48#include <xen/page.h>
49#include <linux/mm_types.h>
50#include <linux/page-flags.h>
51#include <linux/kernel.h>
52
53#define GNTTAB_RESERVED_XENSTORE 1
54
55
56#define NR_GRANT_FRAMES 4
57
58struct gnttab_free_callback {
59 struct gnttab_free_callback *next;
60 void (*fn)(void *);
61 void *arg;
62 u16 count;
63};
64
65struct gntab_unmap_queue_data;
66
67typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
68
69struct gntab_unmap_queue_data
70{
71 struct delayed_work gnttab_work;
72 void *data;
73 gnttab_unmap_refs_done done;
74 struct gnttab_unmap_grant_ref *unmap_ops;
75 struct gnttab_unmap_grant_ref *kunmap_ops;
76 struct page **pages;
77 unsigned int count;
78 unsigned int age;
79};
80
81int gnttab_init(void);
82int gnttab_suspend(void);
83int gnttab_resume(void);
84
85int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
86 int readonly);
87
88
89
90
91
92
93int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
94
95
96
97
98
99
100
101void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
102 unsigned long page);
103
104int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
105
106unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
107unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
108
109int gnttab_query_foreign_access(grant_ref_t ref);
110
111
112
113
114int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
115
116void gnttab_free_grant_reference(grant_ref_t ref);
117
118void gnttab_free_grant_references(grant_ref_t head);
119
120int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
121
122int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
123
124void gnttab_release_grant_reference(grant_ref_t *private_head,
125 grant_ref_t release);
126
127void gnttab_request_free_callback(struct gnttab_free_callback *callback,
128 void (*fn)(void *), void *arg, u16 count);
129void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
130
131void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
132 unsigned long frame, int readonly);
133
134
135static inline void gnttab_page_grant_foreign_access_ref_one(
136 grant_ref_t ref, domid_t domid,
137 struct page *page, int readonly)
138{
139 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
140 readonly);
141}
142
143void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
144 unsigned long pfn);
145
146static inline void
147gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
148 uint32_t flags, grant_ref_t ref, domid_t domid)
149{
150 if (flags & GNTMAP_contains_pte)
151 map->host_addr = addr;
152 else if (xen_feature(XENFEAT_auto_translated_physmap))
153 map->host_addr = __pa(addr);
154 else
155 map->host_addr = addr;
156
157 map->flags = flags;
158 map->ref = ref;
159 map->dom = domid;
160}
161
162static inline void
163gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
164 uint32_t flags, grant_handle_t handle)
165{
166 if (flags & GNTMAP_contains_pte)
167 unmap->host_addr = addr;
168 else if (xen_feature(XENFEAT_auto_translated_physmap))
169 unmap->host_addr = __pa(addr);
170 else
171 unmap->host_addr = addr;
172
173 unmap->handle = handle;
174 unmap->dev_bus_addr = 0;
175}
176
177int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
178int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
179 unsigned long max_nr_gframes,
180 void **__shared);
181int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
182 unsigned long max_nr_gframes,
183 grant_status_t **__shared);
184void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
185
186struct grant_frames {
187 xen_pfn_t *pfn;
188 unsigned int count;
189 void *vaddr;
190};
191extern struct grant_frames xen_auto_xlat_grant_frames;
192unsigned int gnttab_max_grant_frames(void);
193int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
194void gnttab_free_auto_xlat_frames(void);
195
196#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
197
198int gnttab_alloc_pages(int nr_pages, struct page **pages);
199void gnttab_free_pages(int nr_pages, struct page **pages);
200
201#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
202struct gnttab_dma_alloc_args {
203
204 struct device *dev;
205
206 bool coherent;
207
208 int nr_pages;
209 struct page **pages;
210 xen_pfn_t *frames;
211 void *vaddr;
212 dma_addr_t dev_bus_addr;
213};
214
215int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
216int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
217#endif
218
219int gnttab_pages_set_private(int nr_pages, struct page **pages);
220void gnttab_pages_clear_private(int nr_pages, struct page **pages);
221
222int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
223 struct gnttab_map_grant_ref *kmap_ops,
224 struct page **pages, unsigned int count);
225int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
226 struct gnttab_unmap_grant_ref *kunmap_ops,
227 struct page **pages, unsigned int count);
228void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
229int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
230
231
232
233
234
235
236
237
238
239
240
241void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
242void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
243
244
245struct xen_page_foreign {
246 domid_t domid;
247 grant_ref_t gref;
248};
249
250static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
251{
252 if (!PageForeign(page))
253 return NULL;
254#if BITS_PER_LONG < 64
255 return (struct xen_page_foreign *)page->private;
256#else
257 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
258 return (struct xen_page_foreign *)&page->private;
259#endif
260}
261
262
263
264
265
266
267
268
269
270typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
271 unsigned int len, void *data);
272
273void gnttab_foreach_grant_in_range(struct page *page,
274 unsigned int offset,
275 unsigned int len,
276 xen_grant_fn_t fn,
277 void *data);
278
279
280static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
281 unsigned len, xen_grant_fn_t fn,
282 void *data)
283{
284
285 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
286 len);
287
288 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
289}
290
291
292void gnttab_foreach_grant(struct page **pages,
293 unsigned int nr_grefs,
294 xen_grant_fn_t fn,
295 void *data);
296
297
298
299
300
301
302static inline unsigned int gnttab_count_grant(unsigned int start,
303 unsigned int len)
304{
305 return XEN_PFN_UP(xen_offset_in_page(start) + len);
306}
307
308#endif
309