1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
29
30#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
36#include <linux/io.h>
37#include <linux/rbtree.h>
38#include <asm/setup.h>
39#include <asm/pgalloc.h>
40#include <asm/hypervisor.h>
41#include <xen/grant_table.h>
42#include <xen/xenbus.h>
43#include <xen/interface/io/ring.h>
44#include <xen/interface/io/blkif.h>
45#include <xen/interface/io/protocols.h>
46
47#define DRV_PFX "xen-blkback:"
48#define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args)
51
52
53
54
55
56
57#define MAX_INDIRECT_SEGMENTS 256
58
59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment))
61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \
64 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
65
66
67
68
69
70struct blkif_common_request {
71 char dummy;
72};
73struct blkif_common_response {
74 char dummy;
75};
76
77struct blkif_x86_32_request_rw {
78 uint8_t nr_segments;
79 blkif_vdev_t handle;
80 uint64_t id;
81 blkif_sector_t sector_number;
82 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
83} __attribute__((__packed__));
84
85struct blkif_x86_32_request_discard {
86 uint8_t flag;
87 blkif_vdev_t _pad1;
88 uint64_t id;
89 blkif_sector_t sector_number;
90 uint64_t nr_sectors;
91} __attribute__((__packed__));
92
93struct blkif_x86_32_request_other {
94 uint8_t _pad1;
95 blkif_vdev_t _pad2;
96 uint64_t id;
97} __attribute__((__packed__));
98
99struct blkif_x86_32_request_indirect {
100 uint8_t indirect_op;
101 uint16_t nr_segments;
102 uint64_t id;
103 blkif_sector_t sector_number;
104 blkif_vdev_t handle;
105 uint16_t _pad1;
106 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
107
108
109
110
111
112
113
114 uint64_t _pad2;
115} __attribute__((__packed__));
116
117struct blkif_x86_32_request {
118 uint8_t operation;
119 union {
120 struct blkif_x86_32_request_rw rw;
121 struct blkif_x86_32_request_discard discard;
122 struct blkif_x86_32_request_other other;
123 struct blkif_x86_32_request_indirect indirect;
124 } u;
125} __attribute__((__packed__));
126
127
128#pragma pack(push, 4)
129struct blkif_x86_32_response {
130 uint64_t id;
131 uint8_t operation;
132 int16_t status;
133};
134#pragma pack(pop)
135
136
137struct blkif_x86_64_request_rw {
138 uint8_t nr_segments;
139 blkif_vdev_t handle;
140 uint32_t _pad1;
141 uint64_t id;
142 blkif_sector_t sector_number;
143 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
144} __attribute__((__packed__));
145
146struct blkif_x86_64_request_discard {
147 uint8_t flag;
148 blkif_vdev_t _pad1;
149 uint32_t _pad2;
150 uint64_t id;
151 blkif_sector_t sector_number;
152 uint64_t nr_sectors;
153} __attribute__((__packed__));
154
155struct blkif_x86_64_request_other {
156 uint8_t _pad1;
157 blkif_vdev_t _pad2;
158 uint32_t _pad3;
159 uint64_t id;
160} __attribute__((__packed__));
161
162struct blkif_x86_64_request_indirect {
163 uint8_t indirect_op;
164 uint16_t nr_segments;
165 uint32_t _pad1;
166 uint64_t id;
167 blkif_sector_t sector_number;
168 blkif_vdev_t handle;
169 uint16_t _pad2;
170 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
171
172
173
174
175
176
177
178 uint32_t _pad3;
179} __attribute__((__packed__));
180
181struct blkif_x86_64_request {
182 uint8_t operation;
183 union {
184 struct blkif_x86_64_request_rw rw;
185 struct blkif_x86_64_request_discard discard;
186 struct blkif_x86_64_request_other other;
187 struct blkif_x86_64_request_indirect indirect;
188 } u;
189} __attribute__((__packed__));
190
191struct blkif_x86_64_response {
192 uint64_t __attribute__((__aligned__(8))) id;
193 uint8_t operation;
194 int16_t status;
195};
196
197DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
198 struct blkif_common_response);
199DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
200 struct blkif_x86_32_response);
201DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
202 struct blkif_x86_64_response);
203
204union blkif_back_rings {
205 struct blkif_back_ring native;
206 struct blkif_common_back_ring common;
207 struct blkif_x86_32_back_ring x86_32;
208 struct blkif_x86_64_back_ring x86_64;
209};
210
211enum blkif_protocol {
212 BLKIF_PROTOCOL_NATIVE = 1,
213 BLKIF_PROTOCOL_X86_32 = 2,
214 BLKIF_PROTOCOL_X86_64 = 3,
215};
216
217struct xen_vbd {
218
219 blkif_vdev_t handle;
220
221 unsigned char readonly;
222
223 unsigned char type;
224
225 u32 pdevice;
226 struct block_device *bdev;
227
228 sector_t size;
229 unsigned int flush_support:1;
230 unsigned int discard_secure:1;
231 unsigned int feature_gnt_persistent:1;
232 unsigned int overflow_max_grants:1;
233};
234
235struct backend_info;
236
237
238#define PERSISTENT_GNT_FLAGS_SIZE 2
239
240#define PERSISTENT_GNT_ACTIVE 0
241
242
243
244
245#define PERSISTENT_GNT_WAS_ACTIVE 1
246
247
248#define XEN_BLKIF_REQS 32
249
250struct persistent_gnt {
251 struct page *page;
252 grant_ref_t gnt;
253 grant_handle_t handle;
254 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
255 struct rb_node node;
256 struct list_head remove_node;
257};
258
259struct xen_blkif {
260
261 domid_t domid;
262 unsigned int handle;
263
264 unsigned int irq;
265
266 enum blkif_protocol blk_protocol;
267 union blkif_back_rings blk_rings;
268 void *blk_ring;
269
270 struct xen_vbd vbd;
271
272 struct backend_info *be;
273
274 spinlock_t blk_ring_lock;
275 atomic_t refcnt;
276
277 wait_queue_head_t wq;
278
279 struct completion drain_complete;
280 atomic_t drain;
281 atomic_t inflight;
282
283 struct task_struct *xenblkd;
284 unsigned int waiting_reqs;
285
286
287 struct rb_root persistent_gnts;
288 unsigned int persistent_gnt_c;
289 atomic_t persistent_gnt_in_use;
290 unsigned long next_lru;
291
292
293 struct list_head persistent_purge_list;
294 struct work_struct persistent_purge_work;
295
296
297 spinlock_t free_pages_lock;
298 int free_pages_num;
299 struct list_head free_pages;
300
301
302 struct list_head pending_free;
303
304 spinlock_t pending_free_lock;
305 wait_queue_head_t pending_free_wq;
306
307
308 unsigned long st_print;
309 unsigned long long st_rd_req;
310 unsigned long long st_wr_req;
311 unsigned long long st_oo_req;
312 unsigned long long st_f_req;
313 unsigned long long st_ds_req;
314 unsigned long long st_rd_sect;
315 unsigned long long st_wr_sect;
316
317 struct work_struct free_work;
318
319 wait_queue_head_t shutdown_wq;
320};
321
322struct seg_buf {
323 unsigned long offset;
324 unsigned int nsec;
325};
326
327struct grant_page {
328 struct page *page;
329 struct persistent_gnt *persistent_gnt;
330 grant_handle_t handle;
331 grant_ref_t gref;
332};
333
334
335
336
337
338
339
340struct pending_req {
341 struct xen_blkif *blkif;
342 u64 id;
343 int nr_pages;
344 atomic_t pendcnt;
345 unsigned short operation;
346 int status;
347 struct list_head free_list;
348 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
349
350 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
351 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
352 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
353};
354
355
356#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
357 (_v)->bdev->bd_part->nr_sects : \
358 get_capacity((_v)->bdev->bd_disk))
359
360#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
361#define xen_blkif_put(_b) \
362 do { \
363 if (atomic_dec_and_test(&(_b)->refcnt)) \
364 schedule_work(&(_b)->free_work);\
365 } while (0)
366
367struct phys_req {
368 unsigned short dev;
369 blkif_sector_t nr_sects;
370 struct block_device *bdev;
371 blkif_sector_t sector_number;
372};
373int xen_blkif_interface_init(void);
374
375int xen_blkif_xenbus_init(void);
376
377irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
378int xen_blkif_schedule(void *arg);
379int xen_blkif_purge_persistent(void *arg);
380void xen_blkbk_free_caches(struct xen_blkif *blkif);
381
382int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
383 struct backend_info *be, int state);
384
385int xen_blkbk_barrier(struct xenbus_transaction xbt,
386 struct backend_info *be, int state);
387struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
388void xen_blkbk_unmap_purged_grants(struct work_struct *work);
389
390static inline void blkif_get_x86_32_req(struct blkif_request *dst,
391 struct blkif_x86_32_request *src)
392{
393 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
394 dst->operation = src->operation;
395 switch (src->operation) {
396 case BLKIF_OP_READ:
397 case BLKIF_OP_WRITE:
398 case BLKIF_OP_WRITE_BARRIER:
399 case BLKIF_OP_FLUSH_DISKCACHE:
400 dst->u.rw.nr_segments = src->u.rw.nr_segments;
401 dst->u.rw.handle = src->u.rw.handle;
402 dst->u.rw.id = src->u.rw.id;
403 dst->u.rw.sector_number = src->u.rw.sector_number;
404 barrier();
405 if (n > dst->u.rw.nr_segments)
406 n = dst->u.rw.nr_segments;
407 for (i = 0; i < n; i++)
408 dst->u.rw.seg[i] = src->u.rw.seg[i];
409 break;
410 case BLKIF_OP_DISCARD:
411 dst->u.discard.flag = src->u.discard.flag;
412 dst->u.discard.id = src->u.discard.id;
413 dst->u.discard.sector_number = src->u.discard.sector_number;
414 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
415 break;
416 case BLKIF_OP_INDIRECT:
417 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
418 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
419 dst->u.indirect.handle = src->u.indirect.handle;
420 dst->u.indirect.id = src->u.indirect.id;
421 dst->u.indirect.sector_number = src->u.indirect.sector_number;
422 barrier();
423 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
424 for (i = 0; i < j; i++)
425 dst->u.indirect.indirect_grefs[i] =
426 src->u.indirect.indirect_grefs[i];
427 break;
428 default:
429
430
431
432
433 dst->u.other.id = src->u.other.id;
434 break;
435 }
436}
437
438static inline void blkif_get_x86_64_req(struct blkif_request *dst,
439 struct blkif_x86_64_request *src)
440{
441 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
442 dst->operation = src->operation;
443 switch (src->operation) {
444 case BLKIF_OP_READ:
445 case BLKIF_OP_WRITE:
446 case BLKIF_OP_WRITE_BARRIER:
447 case BLKIF_OP_FLUSH_DISKCACHE:
448 dst->u.rw.nr_segments = src->u.rw.nr_segments;
449 dst->u.rw.handle = src->u.rw.handle;
450 dst->u.rw.id = src->u.rw.id;
451 dst->u.rw.sector_number = src->u.rw.sector_number;
452 barrier();
453 if (n > dst->u.rw.nr_segments)
454 n = dst->u.rw.nr_segments;
455 for (i = 0; i < n; i++)
456 dst->u.rw.seg[i] = src->u.rw.seg[i];
457 break;
458 case BLKIF_OP_DISCARD:
459 dst->u.discard.flag = src->u.discard.flag;
460 dst->u.discard.id = src->u.discard.id;
461 dst->u.discard.sector_number = src->u.discard.sector_number;
462 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
463 break;
464 case BLKIF_OP_INDIRECT:
465 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
466 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
467 dst->u.indirect.handle = src->u.indirect.handle;
468 dst->u.indirect.id = src->u.indirect.id;
469 dst->u.indirect.sector_number = src->u.indirect.sector_number;
470 barrier();
471 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
472 for (i = 0; i < j; i++)
473 dst->u.indirect.indirect_grefs[i] =
474 src->u.indirect.indirect_grefs[i];
475 break;
476 default:
477
478
479
480
481 dst->u.other.id = src->u.other.id;
482 break;
483 }
484}
485
486#endif
487