1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef __ASM_GNTTAB_H__
38#define __ASM_GNTTAB_H__
39
40#include <asm/page.h>
41
42#include <xen/interface/xen.h>
43#include <xen/interface/grant_table.h>
44
45#include <asm/xen/hypervisor.h>
46
47#include <xen/features.h>
48#include <xen/page.h>
49#include <linux/mm_types.h>
50#include <linux/page-flags.h>
51#include <linux/kernel.h>
52
53#define GNTTAB_RESERVED_XENSTORE 1
54
55
56#define NR_GRANT_FRAMES 4
57
58struct gnttab_free_callback {
59 struct gnttab_free_callback *next;
60 void (*fn)(void *);
61 void *arg;
62 u16 count;
63};
64
65struct gntab_unmap_queue_data;
66
67typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
68
69struct gntab_unmap_queue_data
70{
71 struct delayed_work gnttab_work;
72 void *data;
73 gnttab_unmap_refs_done done;
74 struct gnttab_unmap_grant_ref *unmap_ops;
75 struct gnttab_unmap_grant_ref *kunmap_ops;
76 struct page **pages;
77 unsigned int count;
78 unsigned int age;
79};
80
81int gnttab_init(void);
82int gnttab_suspend(void);
83int gnttab_resume(void);
84
85int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
86 int readonly);
87
88
89
90
91
92
93int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
94
95
96
97
98
99
100
101void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
102 unsigned long page);
103
104int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
105
106unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
107unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
108
109int gnttab_query_foreign_access(grant_ref_t ref);
110
111
112
113
114int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
115
116void gnttab_free_grant_reference(grant_ref_t ref);
117
118void gnttab_free_grant_references(grant_ref_t head);
119
120int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
121
122int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
123
124void gnttab_release_grant_reference(grant_ref_t *private_head,
125 grant_ref_t release);
126
127void gnttab_request_free_callback(struct gnttab_free_callback *callback,
128 void (*fn)(void *), void *arg, u16 count);
129void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
130
131void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
132 unsigned long frame, int readonly);
133
134
135static inline void gnttab_page_grant_foreign_access_ref_one(
136 grant_ref_t ref, domid_t domid,
137 struct page *page, int readonly)
138{
139 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
140 readonly);
141}
142
143void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
144 unsigned long pfn);
145
146static inline void
147gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
148 uint32_t flags, grant_ref_t ref, domid_t domid)
149{
150 if (flags & GNTMAP_contains_pte)
151 map->host_addr = addr;
152 else if (xen_feature(XENFEAT_auto_translated_physmap))
153 map->host_addr = __pa(addr);
154 else
155 map->host_addr = addr;
156
157 map->flags = flags;
158 map->ref = ref;
159 map->dom = domid;
160}
161
162static inline void
163gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
164 uint32_t flags, grant_handle_t handle)
165{
166 if (flags & GNTMAP_contains_pte)
167 unmap->host_addr = addr;
168 else if (xen_feature(XENFEAT_auto_translated_physmap))
169 unmap->host_addr = __pa(addr);
170 else
171 unmap->host_addr = addr;
172
173 unmap->handle = handle;
174 unmap->dev_bus_addr = 0;
175}
176
177int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
178int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
179 unsigned long max_nr_gframes,
180 void **__shared);
181int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
182 unsigned long max_nr_gframes,
183 grant_status_t **__shared);
184void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
185
186struct grant_frames {
187 xen_pfn_t *pfn;
188 unsigned int count;
189 void *vaddr;
190};
191extern struct grant_frames xen_auto_xlat_grant_frames;
192unsigned int gnttab_max_grant_frames(void);
193int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
194void gnttab_free_auto_xlat_frames(void);
195
196#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
197
198int gnttab_alloc_pages(int nr_pages, struct page **pages);
199void gnttab_free_pages(int nr_pages, struct page **pages);
200
201struct gnttab_page_cache {
202 spinlock_t lock;
203#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
204 struct page *pages;
205#else
206 struct list_head pages;
207#endif
208 unsigned int num_pages;
209};
210
211void gnttab_page_cache_init(struct gnttab_page_cache *cache);
212int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
213void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
214 unsigned int num);
215void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
216 unsigned int num);
217
218#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
219struct gnttab_dma_alloc_args {
220
221 struct device *dev;
222
223 bool coherent;
224
225 int nr_pages;
226 struct page **pages;
227 xen_pfn_t *frames;
228 void *vaddr;
229 dma_addr_t dev_bus_addr;
230};
231
232int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
233int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
234#endif
235
236int gnttab_pages_set_private(int nr_pages, struct page **pages);
237void gnttab_pages_clear_private(int nr_pages, struct page **pages);
238
239int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
240 struct gnttab_map_grant_ref *kmap_ops,
241 struct page **pages, unsigned int count);
242int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
243 struct gnttab_unmap_grant_ref *kunmap_ops,
244 struct page **pages, unsigned int count);
245void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
246int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
247
248
249
250
251
252
253
254
255
256
257
258void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
259void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
260
261
262struct xen_page_foreign {
263 domid_t domid;
264 grant_ref_t gref;
265};
266
267static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
268{
269 if (!PageForeign(page))
270 return NULL;
271#if BITS_PER_LONG < 64
272 return (struct xen_page_foreign *)page->private;
273#else
274 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
275 return (struct xen_page_foreign *)&page->private;
276#endif
277}
278
279
280
281
282
283
284
285
286
287typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
288 unsigned int len, void *data);
289
290void gnttab_foreach_grant_in_range(struct page *page,
291 unsigned int offset,
292 unsigned int len,
293 xen_grant_fn_t fn,
294 void *data);
295
296
297static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
298 unsigned len, xen_grant_fn_t fn,
299 void *data)
300{
301
302 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
303 len);
304
305 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
306}
307
308
309void gnttab_foreach_grant(struct page **pages,
310 unsigned int nr_grefs,
311 xen_grant_fn_t fn,
312 void *data);
313
314
315
316
317
318
319static inline unsigned int gnttab_count_grant(unsigned int start,
320 unsigned int len)
321{
322 return XEN_PFN_UP(xen_offset_in_page(start) + len);
323}
324
325#endif
326