1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
29
30#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
36#include <linux/io.h>
37#include <linux/rbtree.h>
38#include <asm/setup.h>
39#include <asm/pgalloc.h>
40#include <asm/hypervisor.h>
41#include <xen/grant_table.h>
42#include <xen/page.h>
43#include <xen/xenbus.h>
44#include <xen/interface/io/ring.h>
45#include <xen/interface/io/blkif.h>
46#include <xen/interface/io/protocols.h>
47
48extern unsigned int xen_blkif_max_ring_order;
49extern unsigned int xenblk_max_queues;
50
51
52
53
54#define MAX_INDIRECT_SEGMENTS 256
55
56
57
58
59
60#define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
61
62#define XEN_PAGES_PER_INDIRECT_FRAME \
63 (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
64#define SEGS_PER_INDIRECT_FRAME \
65 (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
66
67#define MAX_INDIRECT_PAGES \
68 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
69#define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
70
71
72
73
74
75struct blkif_common_request {
76 char dummy;
77};
78
79
80
81struct blkif_x86_32_request_rw {
82 uint8_t nr_segments;
83 blkif_vdev_t handle;
84 uint64_t id;
85 blkif_sector_t sector_number;
86 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
87} __attribute__((__packed__));
88
89struct blkif_x86_32_request_discard {
90 uint8_t flag;
91 blkif_vdev_t _pad1;
92 uint64_t id;
93 blkif_sector_t sector_number;
94 uint64_t nr_sectors;
95} __attribute__((__packed__));
96
97struct blkif_x86_32_request_other {
98 uint8_t _pad1;
99 blkif_vdev_t _pad2;
100 uint64_t id;
101} __attribute__((__packed__));
102
103struct blkif_x86_32_request_indirect {
104 uint8_t indirect_op;
105 uint16_t nr_segments;
106 uint64_t id;
107 blkif_sector_t sector_number;
108 blkif_vdev_t handle;
109 uint16_t _pad1;
110 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
111
112
113
114
115
116
117
118 uint64_t _pad2;
119} __attribute__((__packed__));
120
121struct blkif_x86_32_request {
122 uint8_t operation;
123 union {
124 struct blkif_x86_32_request_rw rw;
125 struct blkif_x86_32_request_discard discard;
126 struct blkif_x86_32_request_other other;
127 struct blkif_x86_32_request_indirect indirect;
128 } u;
129} __attribute__((__packed__));
130
131
132
133struct blkif_x86_64_request_rw {
134 uint8_t nr_segments;
135 blkif_vdev_t handle;
136 uint32_t _pad1;
137 uint64_t id;
138 blkif_sector_t sector_number;
139 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
140} __attribute__((__packed__));
141
142struct blkif_x86_64_request_discard {
143 uint8_t flag;
144 blkif_vdev_t _pad1;
145 uint32_t _pad2;
146 uint64_t id;
147 blkif_sector_t sector_number;
148 uint64_t nr_sectors;
149} __attribute__((__packed__));
150
151struct blkif_x86_64_request_other {
152 uint8_t _pad1;
153 blkif_vdev_t _pad2;
154 uint32_t _pad3;
155 uint64_t id;
156} __attribute__((__packed__));
157
158struct blkif_x86_64_request_indirect {
159 uint8_t indirect_op;
160 uint16_t nr_segments;
161 uint32_t _pad1;
162 uint64_t id;
163 blkif_sector_t sector_number;
164 blkif_vdev_t handle;
165 uint16_t _pad2;
166 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
167
168
169
170
171
172
173
174 uint32_t _pad3;
175} __attribute__((__packed__));
176
177struct blkif_x86_64_request {
178 uint8_t operation;
179 union {
180 struct blkif_x86_64_request_rw rw;
181 struct blkif_x86_64_request_discard discard;
182 struct blkif_x86_64_request_other other;
183 struct blkif_x86_64_request_indirect indirect;
184 } u;
185} __attribute__((__packed__));
186
187DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
188 struct blkif_response);
189DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
190 struct blkif_response __packed);
191DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
192 struct blkif_response);
193
194union blkif_back_rings {
195 struct blkif_back_ring native;
196 struct blkif_common_back_ring common;
197 struct blkif_x86_32_back_ring x86_32;
198 struct blkif_x86_64_back_ring x86_64;
199};
200
201enum blkif_protocol {
202 BLKIF_PROTOCOL_NATIVE = 1,
203 BLKIF_PROTOCOL_X86_32 = 2,
204 BLKIF_PROTOCOL_X86_64 = 3,
205};
206
207
208
209
210#ifdef CONFIG_X86
211# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
212#else
213# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
214#endif
215
216struct xen_vbd {
217
218 blkif_vdev_t handle;
219
220 unsigned char readonly;
221
222 unsigned char type;
223
224 u32 pdevice;
225 struct block_device *bdev;
226
227 sector_t size;
228 unsigned int flush_support:1;
229 unsigned int discard_secure:1;
230 unsigned int feature_gnt_persistent:1;
231 unsigned int overflow_max_grants:1;
232};
233
234struct backend_info;
235
236
237#define XEN_BLKIF_REQS_PER_PAGE 32
238
239struct persistent_gnt {
240 struct page *page;
241 grant_ref_t gnt;
242 grant_handle_t handle;
243 unsigned long last_used;
244 bool active;
245 struct rb_node node;
246 struct list_head remove_node;
247};
248
249
250struct xen_blkif_ring {
251
252 unsigned int irq;
253 union blkif_back_rings blk_rings;
254 void *blk_ring;
255
256 spinlock_t blk_ring_lock;
257
258 wait_queue_head_t wq;
259 atomic_t inflight;
260 bool active;
261
262 struct task_struct *xenblkd;
263 unsigned int waiting_reqs;
264
265
266 struct list_head pending_free;
267
268 spinlock_t pending_free_lock;
269 wait_queue_head_t pending_free_wq;
270
271
272 struct rb_root persistent_gnts;
273 unsigned int persistent_gnt_c;
274 atomic_t persistent_gnt_in_use;
275 unsigned long next_lru;
276
277
278 unsigned long st_print;
279 unsigned long long st_rd_req;
280 unsigned long long st_wr_req;
281 unsigned long long st_oo_req;
282 unsigned long long st_f_req;
283 unsigned long long st_ds_req;
284 unsigned long long st_rd_sect;
285 unsigned long long st_wr_sect;
286
287
288 struct list_head persistent_purge_list;
289 struct work_struct persistent_purge_work;
290
291
292 spinlock_t free_pages_lock;
293 int free_pages_num;
294 struct list_head free_pages;
295
296 struct work_struct free_work;
297
298 wait_queue_head_t shutdown_wq;
299 struct xen_blkif *blkif;
300};
301
302struct xen_blkif {
303
304 domid_t domid;
305 unsigned int handle;
306
307 enum blkif_protocol blk_protocol;
308
309 struct xen_vbd vbd;
310
311 struct backend_info *be;
312 atomic_t refcnt;
313
314 struct completion drain_complete;
315 atomic_t drain;
316
317 struct work_struct free_work;
318 unsigned int nr_ring_pages;
319
320 struct xen_blkif_ring *rings;
321 unsigned int nr_rings;
322 unsigned long buffer_squeeze_end;
323};
324
325struct seg_buf {
326 unsigned long offset;
327 unsigned int nsec;
328};
329
330struct grant_page {
331 struct page *page;
332 struct persistent_gnt *persistent_gnt;
333 grant_handle_t handle;
334 grant_ref_t gref;
335};
336
337
338
339
340
341
342
343struct pending_req {
344 struct xen_blkif_ring *ring;
345 u64 id;
346 int nr_segs;
347 atomic_t pendcnt;
348 unsigned short operation;
349 int status;
350 struct list_head free_list;
351 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
352
353 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
354 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
355 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
356 struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
357 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
358 struct gntab_unmap_queue_data gnttab_unmap_data;
359};
360
361
362#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
363 (_v)->bdev->bd_part->nr_sects : \
364 get_capacity((_v)->bdev->bd_disk))
365
366#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
367#define xen_blkif_put(_b) \
368 do { \
369 if (atomic_dec_and_test(&(_b)->refcnt)) \
370 schedule_work(&(_b)->free_work);\
371 } while (0)
372
373struct phys_req {
374 unsigned short dev;
375 blkif_sector_t nr_sects;
376 struct block_device *bdev;
377 blkif_sector_t sector_number;
378};
379
380int xen_blkif_interface_init(void);
381void xen_blkif_interface_fini(void);
382
383int xen_blkif_xenbus_init(void);
384void xen_blkif_xenbus_fini(void);
385
386irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
387int xen_blkif_schedule(void *arg);
388int xen_blkif_purge_persistent(void *arg);
389void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
390
391int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
392 struct backend_info *be, int state);
393
394int xen_blkbk_barrier(struct xenbus_transaction xbt,
395 struct backend_info *be, int state);
396struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
397void xen_blkbk_unmap_purged_grants(struct work_struct *work);
398
399static inline void blkif_get_x86_32_req(struct blkif_request *dst,
400 struct blkif_x86_32_request *src)
401{
402 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
403 dst->operation = READ_ONCE(src->operation);
404 switch (dst->operation) {
405 case BLKIF_OP_READ:
406 case BLKIF_OP_WRITE:
407 case BLKIF_OP_WRITE_BARRIER:
408 case BLKIF_OP_FLUSH_DISKCACHE:
409 dst->u.rw.nr_segments = src->u.rw.nr_segments;
410 dst->u.rw.handle = src->u.rw.handle;
411 dst->u.rw.id = src->u.rw.id;
412 dst->u.rw.sector_number = src->u.rw.sector_number;
413 barrier();
414 if (n > dst->u.rw.nr_segments)
415 n = dst->u.rw.nr_segments;
416 for (i = 0; i < n; i++)
417 dst->u.rw.seg[i] = src->u.rw.seg[i];
418 break;
419 case BLKIF_OP_DISCARD:
420 dst->u.discard.flag = src->u.discard.flag;
421 dst->u.discard.id = src->u.discard.id;
422 dst->u.discard.sector_number = src->u.discard.sector_number;
423 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
424 break;
425 case BLKIF_OP_INDIRECT:
426 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
427 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
428 dst->u.indirect.handle = src->u.indirect.handle;
429 dst->u.indirect.id = src->u.indirect.id;
430 dst->u.indirect.sector_number = src->u.indirect.sector_number;
431 barrier();
432 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
433 for (i = 0; i < j; i++)
434 dst->u.indirect.indirect_grefs[i] =
435 src->u.indirect.indirect_grefs[i];
436 break;
437 default:
438
439
440
441
442 dst->u.other.id = src->u.other.id;
443 break;
444 }
445}
446
447static inline void blkif_get_x86_64_req(struct blkif_request *dst,
448 struct blkif_x86_64_request *src)
449{
450 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
451 dst->operation = READ_ONCE(src->operation);
452 switch (dst->operation) {
453 case BLKIF_OP_READ:
454 case BLKIF_OP_WRITE:
455 case BLKIF_OP_WRITE_BARRIER:
456 case BLKIF_OP_FLUSH_DISKCACHE:
457 dst->u.rw.nr_segments = src->u.rw.nr_segments;
458 dst->u.rw.handle = src->u.rw.handle;
459 dst->u.rw.id = src->u.rw.id;
460 dst->u.rw.sector_number = src->u.rw.sector_number;
461 barrier();
462 if (n > dst->u.rw.nr_segments)
463 n = dst->u.rw.nr_segments;
464 for (i = 0; i < n; i++)
465 dst->u.rw.seg[i] = src->u.rw.seg[i];
466 break;
467 case BLKIF_OP_DISCARD:
468 dst->u.discard.flag = src->u.discard.flag;
469 dst->u.discard.id = src->u.discard.id;
470 dst->u.discard.sector_number = src->u.discard.sector_number;
471 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
472 break;
473 case BLKIF_OP_INDIRECT:
474 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
475 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
476 dst->u.indirect.handle = src->u.indirect.handle;
477 dst->u.indirect.id = src->u.indirect.id;
478 dst->u.indirect.sector_number = src->u.indirect.sector_number;
479 barrier();
480 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
481 for (i = 0; i < j; i++)
482 dst->u.indirect.indirect_grefs[i] =
483 src->u.indirect.indirect_grefs[i];
484 break;
485 default:
486
487
488
489
490 dst->u.other.id = src->u.other.id;
491 break;
492 }
493}
494
495#endif
496