1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <linux/interrupt.h>
39#include <linux/blkdev.h>
40#include <linux/blk-mq.h>
41#include <linux/hdreg.h>
42#include <linux/cdrom.h>
43#include <linux/module.h>
44#include <linux/slab.h>
45#include <linux/mutex.h>
46#include <linux/scatterlist.h>
47#include <linux/bitmap.h>
48#include <linux/list.h>
49
50#include <xen/xen.h>
51#include <xen/xenbus.h>
52#include <xen/grant_table.h>
53#include <xen/events.h>
54#include <xen/page.h>
55#include <xen/platform_pci.h>
56
57#include <xen/interface/grant_table.h>
58#include <xen/interface/io/blkif.h>
59#include <xen/interface/io/protocols.h>
60
61#include <asm/xen/hypervisor.h>
62
63
64
65
66
67
68
69
70
71
72
73
74
75#define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
76
77enum blkif_state {
78 BLKIF_STATE_DISCONNECTED,
79 BLKIF_STATE_CONNECTED,
80 BLKIF_STATE_SUSPENDED,
81};
82
83struct grant {
84 grant_ref_t gref;
85 struct page *page;
86 struct list_head node;
87};
88
89enum blk_req_status {
90 REQ_WAITING,
91 REQ_DONE,
92 REQ_ERROR,
93 REQ_EOPNOTSUPP,
94};
95
96struct blk_shadow {
97 struct blkif_request req;
98 struct request *request;
99 struct grant **grants_used;
100 struct grant **indirect_grants;
101 struct scatterlist *sg;
102 unsigned int num_sg;
103 enum blk_req_status status;
104
105 #define NO_ASSOCIATED_ID ~0UL
106
107
108
109
110 unsigned long associated_id;
111};
112
113struct blkif_req {
114 blk_status_t error;
115};
116
117static inline struct blkif_req *blkif_req(struct request *rq)
118{
119 return blk_mq_rq_to_pdu(rq);
120}
121
122static DEFINE_MUTEX(blkfront_mutex);
123static const struct block_device_operations xlvbd_block_fops;
124
125
126
127
128
129
130
131static unsigned int xen_blkif_max_segments = 32;
132module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
133MODULE_PARM_DESC(max_indirect_segments,
134 "Maximum amount of segments in indirect requests (default is 32)");
135
136static unsigned int xen_blkif_max_queues = 4;
137module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
138MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
139
140
141
142
143
144static unsigned int xen_blkif_max_ring_order;
145module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
146MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
147
148#define BLK_RING_SIZE(info) \
149 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
150
151#define BLK_MAX_RING_SIZE \
152 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
153
154
155
156
157
158#define RINGREF_NAME_LEN (20)
159
160
161
162#define QUEUE_NAME_LEN (17)
163
164
165
166
167
168
169struct blkfront_ring_info {
170
171 spinlock_t ring_lock;
172 struct blkif_front_ring ring;
173 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
174 unsigned int evtchn, irq;
175 struct work_struct work;
176 struct gnttab_free_callback callback;
177 struct blk_shadow shadow[BLK_MAX_RING_SIZE];
178 struct list_head indirect_pages;
179 struct list_head grants;
180 unsigned int persistent_gnts_c;
181 unsigned long shadow_free;
182 struct blkfront_info *dev_info;
183};
184
185
186
187
188
189
190struct blkfront_info
191{
192 struct mutex mutex;
193 struct xenbus_device *xbdev;
194 struct gendisk *gd;
195 u16 sector_size;
196 unsigned int physical_sector_size;
197 int vdevice;
198 blkif_vdev_t handle;
199 enum blkif_state connected;
200
201 unsigned int nr_ring_pages;
202 struct request_queue *rq;
203 unsigned int feature_flush:1;
204 unsigned int feature_fua:1;
205 unsigned int feature_discard:1;
206 unsigned int feature_secdiscard:1;
207 unsigned int feature_persistent:1;
208 unsigned int discard_granularity;
209 unsigned int discard_alignment;
210
211 unsigned int max_indirect_segments;
212 int is_ready;
213 struct blk_mq_tag_set tag_set;
214 struct blkfront_ring_info *rinfo;
215 unsigned int nr_rings;
216
217 struct list_head requests;
218 struct bio_list bio_list;
219};
220
221static unsigned int nr_minors;
222static unsigned long *minors;
223static DEFINE_SPINLOCK(minor_lock);
224
225#define GRANT_INVALID_REF 0
226
227#define PARTS_PER_DISK 16
228#define PARTS_PER_EXT_DISK 256
229
230#define BLKIF_MAJOR(dev) ((dev)>>8)
231#define BLKIF_MINOR(dev) ((dev) & 0xff)
232
233#define EXT_SHIFT 28
234#define EXTENDED (1<<EXT_SHIFT)
235#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
236#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
237#define EMULATED_HD_DISK_MINOR_OFFSET (0)
238#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
239#define EMULATED_SD_DISK_MINOR_OFFSET (0)
240#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
241
242#define DEV_NAME "xvd"
243
244
245
246
247
248
249#define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
250
251#define GRANTS_PER_INDIRECT_FRAME \
252 (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
253
254#define INDIRECT_GREFS(_grants) \
255 DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
256
257static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
258static void blkfront_gather_backend_features(struct blkfront_info *info);
259static int negotiate_mq(struct blkfront_info *info);
260
261static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
262{
263 unsigned long free = rinfo->shadow_free;
264
265 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
266 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
267 rinfo->shadow[free].req.u.rw.id = 0x0fffffee;
268 return free;
269}
270
271static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
272 unsigned long id)
273{
274 if (rinfo->shadow[id].req.u.rw.id != id)
275 return -EINVAL;
276 if (rinfo->shadow[id].request == NULL)
277 return -EINVAL;
278 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
279 rinfo->shadow[id].request = NULL;
280 rinfo->shadow_free = id;
281 return 0;
282}
283
284static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
285{
286 struct blkfront_info *info = rinfo->dev_info;
287 struct page *granted_page;
288 struct grant *gnt_list_entry, *n;
289 int i = 0;
290
291 while (i < num) {
292 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
293 if (!gnt_list_entry)
294 goto out_of_memory;
295
296 if (info->feature_persistent) {
297 granted_page = alloc_page(GFP_NOIO);
298 if (!granted_page) {
299 kfree(gnt_list_entry);
300 goto out_of_memory;
301 }
302 gnt_list_entry->page = granted_page;
303 }
304
305 gnt_list_entry->gref = GRANT_INVALID_REF;
306 list_add(&gnt_list_entry->node, &rinfo->grants);
307 i++;
308 }
309
310 return 0;
311
312out_of_memory:
313 list_for_each_entry_safe(gnt_list_entry, n,
314 &rinfo->grants, node) {
315 list_del(&gnt_list_entry->node);
316 if (info->feature_persistent)
317 __free_page(gnt_list_entry->page);
318 kfree(gnt_list_entry);
319 i--;
320 }
321 BUG_ON(i != 0);
322 return -ENOMEM;
323}
324
325static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
326{
327 struct grant *gnt_list_entry;
328
329 BUG_ON(list_empty(&rinfo->grants));
330 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
331 node);
332 list_del(&gnt_list_entry->node);
333
334 if (gnt_list_entry->gref != GRANT_INVALID_REF)
335 rinfo->persistent_gnts_c--;
336
337 return gnt_list_entry;
338}
339
340static inline void grant_foreign_access(const struct grant *gnt_list_entry,
341 const struct blkfront_info *info)
342{
343 gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
344 info->xbdev->otherend_id,
345 gnt_list_entry->page,
346 0);
347}
348
349static struct grant *get_grant(grant_ref_t *gref_head,
350 unsigned long gfn,
351 struct blkfront_ring_info *rinfo)
352{
353 struct grant *gnt_list_entry = get_free_grant(rinfo);
354 struct blkfront_info *info = rinfo->dev_info;
355
356 if (gnt_list_entry->gref != GRANT_INVALID_REF)
357 return gnt_list_entry;
358
359
360 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
361 BUG_ON(gnt_list_entry->gref == -ENOSPC);
362 if (info->feature_persistent)
363 grant_foreign_access(gnt_list_entry, info);
364 else {
365
366 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
367 info->xbdev->otherend_id,
368 gfn, 0);
369 }
370
371 return gnt_list_entry;
372}
373
374static struct grant *get_indirect_grant(grant_ref_t *gref_head,
375 struct blkfront_ring_info *rinfo)
376{
377 struct grant *gnt_list_entry = get_free_grant(rinfo);
378 struct blkfront_info *info = rinfo->dev_info;
379
380 if (gnt_list_entry->gref != GRANT_INVALID_REF)
381 return gnt_list_entry;
382
383
384 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
385 BUG_ON(gnt_list_entry->gref == -ENOSPC);
386 if (!info->feature_persistent) {
387 struct page *indirect_page;
388
389
390 BUG_ON(list_empty(&rinfo->indirect_pages));
391 indirect_page = list_first_entry(&rinfo->indirect_pages,
392 struct page, lru);
393 list_del(&indirect_page->lru);
394 gnt_list_entry->page = indirect_page;
395 }
396 grant_foreign_access(gnt_list_entry, info);
397
398 return gnt_list_entry;
399}
400
401static const char *op_name(int op)
402{
403 static const char *const names[] = {
404 [BLKIF_OP_READ] = "read",
405 [BLKIF_OP_WRITE] = "write",
406 [BLKIF_OP_WRITE_BARRIER] = "barrier",
407 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
408 [BLKIF_OP_DISCARD] = "discard" };
409
410 if (op < 0 || op >= ARRAY_SIZE(names))
411 return "unknown";
412
413 if (!names[op])
414 return "reserved";
415
416 return names[op];
417}
418static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
419{
420 unsigned int end = minor + nr;
421 int rc;
422
423 if (end > nr_minors) {
424 unsigned long *bitmap, *old;
425
426 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
427 GFP_KERNEL);
428 if (bitmap == NULL)
429 return -ENOMEM;
430
431 spin_lock(&minor_lock);
432 if (end > nr_minors) {
433 old = minors;
434 memcpy(bitmap, minors,
435 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
436 minors = bitmap;
437 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
438 } else
439 old = bitmap;
440 spin_unlock(&minor_lock);
441 kfree(old);
442 }
443
444 spin_lock(&minor_lock);
445 if (find_next_bit(minors, end, minor) >= end) {
446 bitmap_set(minors, minor, nr);
447 rc = 0;
448 } else
449 rc = -EBUSY;
450 spin_unlock(&minor_lock);
451
452 return rc;
453}
454
455static void xlbd_release_minors(unsigned int minor, unsigned int nr)
456{
457 unsigned int end = minor + nr;
458
459 BUG_ON(end > nr_minors);
460 spin_lock(&minor_lock);
461 bitmap_clear(minors, minor, nr);
462 spin_unlock(&minor_lock);
463}
464
465static void blkif_restart_queue_callback(void *arg)
466{
467 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
468 schedule_work(&rinfo->work);
469}
470
471static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
472{
473
474
475 sector_t nsect = get_capacity(bd->bd_disk);
476 sector_t cylinders = nsect;
477
478 hg->heads = 0xff;
479 hg->sectors = 0x3f;
480 sector_div(cylinders, hg->heads * hg->sectors);
481 hg->cylinders = cylinders;
482 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
483 hg->cylinders = 0xffff;
484 return 0;
485}
486
487static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
488 unsigned command, unsigned long argument)
489{
490 struct blkfront_info *info = bdev->bd_disk->private_data;
491 int i;
492
493 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
494 command, (long)argument);
495
496 switch (command) {
497 case CDROMMULTISESSION:
498 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
499 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
500 if (put_user(0, (char __user *)(argument + i)))
501 return -EFAULT;
502 return 0;
503
504 case CDROM_GET_CAPABILITY: {
505 struct gendisk *gd = info->gd;
506 if (gd->flags & GENHD_FL_CD)
507 return 0;
508 return -EINVAL;
509 }
510
511 default:
512
513
514 return -EINVAL;
515 }
516
517 return 0;
518}
519
520static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
521 struct request *req,
522 struct blkif_request **ring_req)
523{
524 unsigned long id;
525
526 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
527 rinfo->ring.req_prod_pvt++;
528
529 id = get_id_from_freelist(rinfo);
530 rinfo->shadow[id].request = req;
531 rinfo->shadow[id].status = REQ_WAITING;
532 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
533
534 (*ring_req)->u.rw.id = id;
535
536 return id;
537}
538
539static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
540{
541 struct blkfront_info *info = rinfo->dev_info;
542 struct blkif_request *ring_req;
543 unsigned long id;
544
545
546 id = blkif_ring_get_request(rinfo, req, &ring_req);
547
548 ring_req->operation = BLKIF_OP_DISCARD;
549 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
550 ring_req->u.discard.id = id;
551 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
552 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
553 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
554 else
555 ring_req->u.discard.flag = 0;
556
557
558 rinfo->shadow[id].req = *ring_req;
559
560 return 0;
561}
562
563struct setup_rw_req {
564 unsigned int grant_idx;
565 struct blkif_request_segment *segments;
566 struct blkfront_ring_info *rinfo;
567 struct blkif_request *ring_req;
568 grant_ref_t gref_head;
569 unsigned int id;
570
571 bool need_copy;
572 unsigned int bvec_off;
573 char *bvec_data;
574
575 bool require_extra_req;
576 struct blkif_request *extra_ring_req;
577};
578
579static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
580 unsigned int len, void *data)
581{
582 struct setup_rw_req *setup = data;
583 int n, ref;
584 struct grant *gnt_list_entry;
585 unsigned int fsect, lsect;
586
587 unsigned int grant_idx = setup->grant_idx;
588 struct blkif_request *ring_req = setup->ring_req;
589 struct blkfront_ring_info *rinfo = setup->rinfo;
590
591
592
593
594
595
596 struct blk_shadow *shadow = &rinfo->shadow[setup->id];
597
598 if (unlikely(setup->require_extra_req &&
599 grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
600
601
602
603
604 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
605 ring_req = setup->extra_ring_req;
606 }
607
608 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
609 (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
610 if (setup->segments)
611 kunmap_atomic(setup->segments);
612
613 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
614 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
615 shadow->indirect_grants[n] = gnt_list_entry;
616 setup->segments = kmap_atomic(gnt_list_entry->page);
617 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
618 }
619
620 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
621 ref = gnt_list_entry->gref;
622
623
624
625
626 shadow->grants_used[setup->grant_idx] = gnt_list_entry;
627
628 if (setup->need_copy) {
629 void *shared_data;
630
631 shared_data = kmap_atomic(gnt_list_entry->page);
632
633
634
635
636
637
638
639
640
641 memcpy(shared_data + offset,
642 setup->bvec_data + setup->bvec_off,
643 len);
644
645 kunmap_atomic(shared_data);
646 setup->bvec_off += len;
647 }
648
649 fsect = offset >> 9;
650 lsect = fsect + (len >> 9) - 1;
651 if (ring_req->operation != BLKIF_OP_INDIRECT) {
652 ring_req->u.rw.seg[grant_idx] =
653 (struct blkif_request_segment) {
654 .gref = ref,
655 .first_sect = fsect,
656 .last_sect = lsect };
657 } else {
658 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
659 (struct blkif_request_segment) {
660 .gref = ref,
661 .first_sect = fsect,
662 .last_sect = lsect };
663 }
664
665 (setup->grant_idx)++;
666}
667
668static void blkif_setup_extra_req(struct blkif_request *first,
669 struct blkif_request *second)
670{
671 uint16_t nr_segments = first->u.rw.nr_segments;
672
673
674
675
676
677 first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
678
679 second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
680 second->u.rw.sector_number = first->u.rw.sector_number +
681 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
682
683 second->u.rw.handle = first->u.rw.handle;
684 second->operation = first->operation;
685}
686
687static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
688{
689 struct blkfront_info *info = rinfo->dev_info;
690 struct blkif_request *ring_req, *extra_ring_req = NULL;
691 unsigned long id, extra_id = NO_ASSOCIATED_ID;
692 bool require_extra_req = false;
693 int i;
694 struct setup_rw_req setup = {
695 .grant_idx = 0,
696 .segments = NULL,
697 .rinfo = rinfo,
698 .need_copy = rq_data_dir(req) && info->feature_persistent,
699 };
700
701
702
703
704
705
706 bool new_persistent_gnts = false;
707 struct scatterlist *sg;
708 int num_sg, max_grefs, num_grant;
709
710 max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
711 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
712
713
714
715
716 max_grefs += INDIRECT_GREFS(max_grefs);
717
718
719 if (rinfo->persistent_gnts_c < max_grefs) {
720 new_persistent_gnts = true;
721
722 if (gnttab_alloc_grant_references(
723 max_grefs - rinfo->persistent_gnts_c,
724 &setup.gref_head) < 0) {
725 gnttab_request_free_callback(
726 &rinfo->callback,
727 blkif_restart_queue_callback,
728 rinfo,
729 max_grefs - rinfo->persistent_gnts_c);
730 return 1;
731 }
732 }
733
734
735 id = blkif_ring_get_request(rinfo, req, &ring_req);
736
737 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
738 num_grant = 0;
739
740 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
741 num_grant += gnttab_count_grant(sg->offset, sg->length);
742
743 require_extra_req = info->max_indirect_segments == 0 &&
744 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
745 BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
746
747 rinfo->shadow[id].num_sg = num_sg;
748 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
749 likely(!require_extra_req)) {
750
751
752
753
754 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
755 ring_req->operation = BLKIF_OP_INDIRECT;
756 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
757 BLKIF_OP_WRITE : BLKIF_OP_READ;
758 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
759 ring_req->u.indirect.handle = info->handle;
760 ring_req->u.indirect.nr_segments = num_grant;
761 } else {
762 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
763 ring_req->u.rw.handle = info->handle;
764 ring_req->operation = rq_data_dir(req) ?
765 BLKIF_OP_WRITE : BLKIF_OP_READ;
766 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
767
768
769
770
771
772
773
774 if (info->feature_flush && info->feature_fua)
775 ring_req->operation =
776 BLKIF_OP_WRITE_BARRIER;
777 else if (info->feature_flush)
778 ring_req->operation =
779 BLKIF_OP_FLUSH_DISKCACHE;
780 else
781 ring_req->operation = 0;
782 }
783 ring_req->u.rw.nr_segments = num_grant;
784 if (unlikely(require_extra_req)) {
785 extra_id = blkif_ring_get_request(rinfo, req,
786 &extra_ring_req);
787
788
789
790
791 rinfo->shadow[extra_id].num_sg = 0;
792
793 blkif_setup_extra_req(ring_req, extra_ring_req);
794
795
796 rinfo->shadow[extra_id].associated_id = id;
797 rinfo->shadow[id].associated_id = extra_id;
798 }
799 }
800
801 setup.ring_req = ring_req;
802 setup.id = id;
803
804 setup.require_extra_req = require_extra_req;
805 if (unlikely(require_extra_req))
806 setup.extra_ring_req = extra_ring_req;
807
808 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
809 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
810
811 if (setup.need_copy) {
812 setup.bvec_off = sg->offset;
813 setup.bvec_data = kmap_atomic(sg_page(sg));
814 }
815
816 gnttab_foreach_grant_in_range(sg_page(sg),
817 sg->offset,
818 sg->length,
819 blkif_setup_rw_req_grant,
820 &setup);
821
822 if (setup.need_copy)
823 kunmap_atomic(setup.bvec_data);
824 }
825 if (setup.segments)
826 kunmap_atomic(setup.segments);
827
828
829 rinfo->shadow[id].req = *ring_req;
830 if (unlikely(require_extra_req))
831 rinfo->shadow[extra_id].req = *extra_ring_req;
832
833 if (new_persistent_gnts)
834 gnttab_free_grant_references(setup.gref_head);
835
836 return 0;
837}
838
839
840
841
842
843
844
845static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
846{
847 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
848 return 1;
849
850 if (unlikely(req_op(req) == REQ_OP_DISCARD ||
851 req_op(req) == REQ_OP_SECURE_ERASE))
852 return blkif_queue_discard_req(req, rinfo);
853 else
854 return blkif_queue_rw_req(req, rinfo);
855}
856
857static inline void flush_requests(struct blkfront_ring_info *rinfo)
858{
859 int notify;
860
861 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
862
863 if (notify)
864 notify_remote_via_irq(rinfo->irq);
865}
866
867static inline bool blkif_request_flush_invalid(struct request *req,
868 struct blkfront_info *info)
869{
870 return (blk_rq_is_passthrough(req) ||
871 ((req_op(req) == REQ_OP_FLUSH) &&
872 !info->feature_flush) ||
873 ((req->cmd_flags & REQ_FUA) &&
874 !info->feature_fua));
875}
876
877static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
878 const struct blk_mq_queue_data *qd)
879{
880 unsigned long flags;
881 int qid = hctx->queue_num;
882 struct blkfront_info *info = hctx->queue->queuedata;
883 struct blkfront_ring_info *rinfo = NULL;
884
885 BUG_ON(info->nr_rings <= qid);
886 rinfo = &info->rinfo[qid];
887 blk_mq_start_request(qd->rq);
888 spin_lock_irqsave(&rinfo->ring_lock, flags);
889 if (RING_FULL(&rinfo->ring))
890 goto out_busy;
891
892 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
893 goto out_err;
894
895 if (blkif_queue_request(qd->rq, rinfo))
896 goto out_busy;
897
898 flush_requests(rinfo);
899 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
900 return BLK_STS_OK;
901
902out_err:
903 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
904 return BLK_STS_IOERR;
905
906out_busy:
907 blk_mq_stop_hw_queue(hctx);
908 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
909 return BLK_STS_DEV_RESOURCE;
910}
911
912static void blkif_complete_rq(struct request *rq)
913{
914 blk_mq_end_request(rq, blkif_req(rq)->error);
915}
916
917static const struct blk_mq_ops blkfront_mq_ops = {
918 .queue_rq = blkif_queue_rq,
919 .complete = blkif_complete_rq,
920};
921
922static void blkif_set_queue_limits(struct blkfront_info *info)
923{
924 struct request_queue *rq = info->rq;
925 struct gendisk *gd = info->gd;
926 unsigned int segments = info->max_indirect_segments ? :
927 BLKIF_MAX_SEGMENTS_PER_REQUEST;
928
929 blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
930
931 if (info->feature_discard) {
932 blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
933 blk_queue_max_discard_sectors(rq, get_capacity(gd));
934 rq->limits.discard_granularity = info->discard_granularity;
935 rq->limits.discard_alignment = info->discard_alignment;
936 if (info->feature_secdiscard)
937 blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
938 }
939
940
941 blk_queue_logical_block_size(rq, info->sector_size);
942 blk_queue_physical_block_size(rq, info->physical_sector_size);
943 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
944
945
946 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
947 blk_queue_max_segment_size(rq, PAGE_SIZE);
948
949
950 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
951
952
953 blk_queue_dma_alignment(rq, 511);
954}
955
956static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
957 unsigned int physical_sector_size)
958{
959 struct request_queue *rq;
960 struct blkfront_info *info = gd->private_data;
961
962 memset(&info->tag_set, 0, sizeof(info->tag_set));
963 info->tag_set.ops = &blkfront_mq_ops;
964 info->tag_set.nr_hw_queues = info->nr_rings;
965 if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
966
967
968
969
970
971
972 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
973 } else
974 info->tag_set.queue_depth = BLK_RING_SIZE(info);
975 info->tag_set.numa_node = NUMA_NO_NODE;
976 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
977 info->tag_set.cmd_size = sizeof(struct blkif_req);
978 info->tag_set.driver_data = info;
979
980 if (blk_mq_alloc_tag_set(&info->tag_set))
981 return -EINVAL;
982 rq = blk_mq_init_queue(&info->tag_set);
983 if (IS_ERR(rq)) {
984 blk_mq_free_tag_set(&info->tag_set);
985 return PTR_ERR(rq);
986 }
987
988 rq->queuedata = info;
989 info->rq = gd->queue = rq;
990 info->gd = gd;
991 info->sector_size = sector_size;
992 info->physical_sector_size = physical_sector_size;
993 blkif_set_queue_limits(info);
994
995 return 0;
996}
997
998static const char *flush_info(struct blkfront_info *info)
999{
1000 if (info->feature_flush && info->feature_fua)
1001 return "barrier: enabled;";
1002 else if (info->feature_flush)
1003 return "flush diskcache: enabled;";
1004 else
1005 return "barrier or flush: disabled;";
1006}
1007
1008static void xlvbd_flush(struct blkfront_info *info)
1009{
1010 blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
1011 info->feature_fua ? true : false);
1012 pr_info("blkfront: %s: %s %s %s %s %s\n",
1013 info->gd->disk_name, flush_info(info),
1014 "persistent grants:", info->feature_persistent ?
1015 "enabled;" : "disabled;", "indirect descriptors:",
1016 info->max_indirect_segments ? "enabled;" : "disabled;");
1017}
1018
1019static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
1020{
1021 int major;
1022 major = BLKIF_MAJOR(vdevice);
1023 *minor = BLKIF_MINOR(vdevice);
1024 switch (major) {
1025 case XEN_IDE0_MAJOR:
1026 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
1027 *minor = ((*minor / 64) * PARTS_PER_DISK) +
1028 EMULATED_HD_DISK_MINOR_OFFSET;
1029 break;
1030 case XEN_IDE1_MAJOR:
1031 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1032 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1033 EMULATED_HD_DISK_MINOR_OFFSET;
1034 break;
1035 case XEN_SCSI_DISK0_MAJOR:
1036 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1037 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1038 break;
1039 case XEN_SCSI_DISK1_MAJOR:
1040 case XEN_SCSI_DISK2_MAJOR:
1041 case XEN_SCSI_DISK3_MAJOR:
1042 case XEN_SCSI_DISK4_MAJOR:
1043 case XEN_SCSI_DISK5_MAJOR:
1044 case XEN_SCSI_DISK6_MAJOR:
1045 case XEN_SCSI_DISK7_MAJOR:
1046 *offset = (*minor / PARTS_PER_DISK) +
1047 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1048 EMULATED_SD_DISK_NAME_OFFSET;
1049 *minor = *minor +
1050 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1051 EMULATED_SD_DISK_MINOR_OFFSET;
1052 break;
1053 case XEN_SCSI_DISK8_MAJOR:
1054 case XEN_SCSI_DISK9_MAJOR:
1055 case XEN_SCSI_DISK10_MAJOR:
1056 case XEN_SCSI_DISK11_MAJOR:
1057 case XEN_SCSI_DISK12_MAJOR:
1058 case XEN_SCSI_DISK13_MAJOR:
1059 case XEN_SCSI_DISK14_MAJOR:
1060 case XEN_SCSI_DISK15_MAJOR:
1061 *offset = (*minor / PARTS_PER_DISK) +
1062 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1063 EMULATED_SD_DISK_NAME_OFFSET;
1064 *minor = *minor +
1065 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1066 EMULATED_SD_DISK_MINOR_OFFSET;
1067 break;
1068 case XENVBD_MAJOR:
1069 *offset = *minor / PARTS_PER_DISK;
1070 break;
1071 default:
1072 printk(KERN_WARNING "blkfront: your disk configuration is "
1073 "incorrect, please use an xvd device instead\n");
1074 return -ENODEV;
1075 }
1076 return 0;
1077}
1078
1079static char *encode_disk_name(char *ptr, unsigned int n)
1080{
1081 if (n >= 26)
1082 ptr = encode_disk_name(ptr, n / 26 - 1);
1083 *ptr = 'a' + n % 26;
1084 return ptr + 1;
1085}
1086
1087static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1088 struct blkfront_info *info,
1089 u16 vdisk_info, u16 sector_size,
1090 unsigned int physical_sector_size)
1091{
1092 struct gendisk *gd;
1093 int nr_minors = 1;
1094 int err;
1095 unsigned int offset;
1096 int minor;
1097 int nr_parts;
1098 char *ptr;
1099
1100 BUG_ON(info->gd != NULL);
1101 BUG_ON(info->rq != NULL);
1102
1103 if ((info->vdevice>>EXT_SHIFT) > 1) {
1104
1105 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1106 return -ENODEV;
1107 }
1108
1109 if (!VDEV_IS_EXTENDED(info->vdevice)) {
1110 err = xen_translate_vdev(info->vdevice, &minor, &offset);
1111 if (err)
1112 return err;
1113 nr_parts = PARTS_PER_DISK;
1114 } else {
1115 minor = BLKIF_MINOR_EXT(info->vdevice);
1116 nr_parts = PARTS_PER_EXT_DISK;
1117 offset = minor / nr_parts;
1118 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1119 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1120 "emulated IDE disks,\n\t choose an xvd device name"
1121 "from xvde on\n", info->vdevice);
1122 }
1123 if (minor >> MINORBITS) {
1124 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1125 info->vdevice, minor);
1126 return -ENODEV;
1127 }
1128
1129 if ((minor % nr_parts) == 0)
1130 nr_minors = nr_parts;
1131
1132 err = xlbd_reserve_minors(minor, nr_minors);
1133 if (err)
1134 goto out;
1135 err = -ENODEV;
1136
1137 gd = alloc_disk(nr_minors);
1138 if (gd == NULL)
1139 goto release;
1140
1141 strcpy(gd->disk_name, DEV_NAME);
1142 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1143 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1144 if (nr_minors > 1)
1145 *ptr = 0;
1146 else
1147 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1148 "%d", minor & (nr_parts - 1));
1149
1150 gd->major = XENVBD_MAJOR;
1151 gd->first_minor = minor;
1152 gd->fops = &xlvbd_block_fops;
1153 gd->private_data = info;
1154 set_capacity(gd, capacity);
1155
1156 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
1157 del_gendisk(gd);
1158 goto release;
1159 }
1160
1161 xlvbd_flush(info);
1162
1163 if (vdisk_info & VDISK_READONLY)
1164 set_disk_ro(gd, 1);
1165
1166 if (vdisk_info & VDISK_REMOVABLE)
1167 gd->flags |= GENHD_FL_REMOVABLE;
1168
1169 if (vdisk_info & VDISK_CDROM)
1170 gd->flags |= GENHD_FL_CD;
1171
1172 return 0;
1173
1174 release:
1175 xlbd_release_minors(minor, nr_minors);
1176 out:
1177 return err;
1178}
1179
1180static void xlvbd_release_gendisk(struct blkfront_info *info)
1181{
1182 unsigned int minor, nr_minors, i;
1183
1184 if (info->rq == NULL)
1185 return;
1186
1187
1188 blk_mq_stop_hw_queues(info->rq);
1189
1190 for (i = 0; i < info->nr_rings; i++) {
1191 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1192
1193
1194 gnttab_cancel_free_callback(&rinfo->callback);
1195
1196
1197 flush_work(&rinfo->work);
1198 }
1199
1200 del_gendisk(info->gd);
1201
1202 minor = info->gd->first_minor;
1203 nr_minors = info->gd->minors;
1204 xlbd_release_minors(minor, nr_minors);
1205
1206 blk_cleanup_queue(info->rq);
1207 blk_mq_free_tag_set(&info->tag_set);
1208 info->rq = NULL;
1209
1210 put_disk(info->gd);
1211 info->gd = NULL;
1212}
1213
1214
1215static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1216{
1217 if (!RING_FULL(&rinfo->ring))
1218 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1219}
1220
1221static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1222{
1223 unsigned long flags;
1224
1225 spin_lock_irqsave(&rinfo->ring_lock, flags);
1226 kick_pending_request_queues_locked(rinfo);
1227 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1228}
1229
1230static void blkif_restart_queue(struct work_struct *work)
1231{
1232 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1233
1234 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1235 kick_pending_request_queues(rinfo);
1236}
1237
1238static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1239{
1240 struct grant *persistent_gnt, *n;
1241 struct blkfront_info *info = rinfo->dev_info;
1242 int i, j, segs;
1243
1244
1245
1246
1247
1248 if (!list_empty(&rinfo->indirect_pages)) {
1249 struct page *indirect_page, *n;
1250
1251 BUG_ON(info->feature_persistent);
1252 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1253 list_del(&indirect_page->lru);
1254 __free_page(indirect_page);
1255 }
1256 }
1257
1258
1259 if (!list_empty(&rinfo->grants)) {
1260 list_for_each_entry_safe(persistent_gnt, n,
1261 &rinfo->grants, node) {
1262 list_del(&persistent_gnt->node);
1263 if (persistent_gnt->gref != GRANT_INVALID_REF) {
1264 gnttab_end_foreign_access(persistent_gnt->gref,
1265 0, 0UL);
1266 rinfo->persistent_gnts_c--;
1267 }
1268 if (info->feature_persistent)
1269 __free_page(persistent_gnt->page);
1270 kfree(persistent_gnt);
1271 }
1272 }
1273 BUG_ON(rinfo->persistent_gnts_c != 0);
1274
1275 for (i = 0; i < BLK_RING_SIZE(info); i++) {
1276
1277
1278
1279
1280 if (!rinfo->shadow[i].request)
1281 goto free_shadow;
1282
1283 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1284 rinfo->shadow[i].req.u.indirect.nr_segments :
1285 rinfo->shadow[i].req.u.rw.nr_segments;
1286 for (j = 0; j < segs; j++) {
1287 persistent_gnt = rinfo->shadow[i].grants_used[j];
1288 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1289 if (info->feature_persistent)
1290 __free_page(persistent_gnt->page);
1291 kfree(persistent_gnt);
1292 }
1293
1294 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1295
1296
1297
1298
1299 goto free_shadow;
1300
1301 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1302 persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1303 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1304 __free_page(persistent_gnt->page);
1305 kfree(persistent_gnt);
1306 }
1307
1308free_shadow:
1309 kfree(rinfo->shadow[i].grants_used);
1310 rinfo->shadow[i].grants_used = NULL;
1311 kfree(rinfo->shadow[i].indirect_grants);
1312 rinfo->shadow[i].indirect_grants = NULL;
1313 kfree(rinfo->shadow[i].sg);
1314 rinfo->shadow[i].sg = NULL;
1315 }
1316
1317
1318 gnttab_cancel_free_callback(&rinfo->callback);
1319
1320
1321 flush_work(&rinfo->work);
1322
1323
1324 for (i = 0; i < info->nr_ring_pages; i++) {
1325 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1326 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1327 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1328 }
1329 }
1330 free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
1331 rinfo->ring.sring = NULL;
1332
1333 if (rinfo->irq)
1334 unbind_from_irqhandler(rinfo->irq, rinfo);
1335 rinfo->evtchn = rinfo->irq = 0;
1336}
1337
1338static void blkif_free(struct blkfront_info *info, int suspend)
1339{
1340 unsigned int i;
1341
1342
1343 info->connected = suspend ?
1344 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1345
1346 if (info->rq)
1347 blk_mq_stop_hw_queues(info->rq);
1348
1349 for (i = 0; i < info->nr_rings; i++)
1350 blkif_free_ring(&info->rinfo[i]);
1351
1352 kfree(info->rinfo);
1353 info->rinfo = NULL;
1354 info->nr_rings = 0;
1355}
1356
1357struct copy_from_grant {
1358 const struct blk_shadow *s;
1359 unsigned int grant_idx;
1360 unsigned int bvec_offset;
1361 char *bvec_data;
1362};
1363
1364static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1365 unsigned int len, void *data)
1366{
1367 struct copy_from_grant *info = data;
1368 char *shared_data;
1369
1370 const struct blk_shadow *s = info->s;
1371
1372 shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1373
1374 memcpy(info->bvec_data + info->bvec_offset,
1375 shared_data + offset, len);
1376
1377 info->bvec_offset += len;
1378 info->grant_idx++;
1379
1380 kunmap_atomic(shared_data);
1381}
1382
1383static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1384{
1385 switch (rsp)
1386 {
1387 case BLKIF_RSP_OKAY:
1388 return REQ_DONE;
1389 case BLKIF_RSP_EOPNOTSUPP:
1390 return REQ_EOPNOTSUPP;
1391 case BLKIF_RSP_ERROR:
1392
1393 default:
1394 return REQ_ERROR;
1395 }
1396}
1397
1398
1399
1400
1401static int blkif_get_final_status(enum blk_req_status s1,
1402 enum blk_req_status s2)
1403{
1404 BUG_ON(s1 == REQ_WAITING);
1405 BUG_ON(s2 == REQ_WAITING);
1406
1407 if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1408 return BLKIF_RSP_ERROR;
1409 else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1410 return BLKIF_RSP_EOPNOTSUPP;
1411 return BLKIF_RSP_OKAY;
1412}
1413
1414static bool blkif_completion(unsigned long *id,
1415 struct blkfront_ring_info *rinfo,
1416 struct blkif_response *bret)
1417{
1418 int i = 0;
1419 struct scatterlist *sg;
1420 int num_sg, num_grant;
1421 struct blkfront_info *info = rinfo->dev_info;
1422 struct blk_shadow *s = &rinfo->shadow[*id];
1423 struct copy_from_grant data = {
1424 .grant_idx = 0,
1425 };
1426
1427 num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1428 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1429
1430
1431 if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1432 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1433
1434
1435 s->status = blkif_rsp_to_req_status(bret->status);
1436
1437
1438 if (s2->status == REQ_WAITING)
1439 return false;
1440
1441 bret->status = blkif_get_final_status(s->status,
1442 s2->status);
1443
1444
1445
1446
1447
1448 num_grant += s2->req.u.rw.nr_segments;
1449
1450
1451
1452
1453
1454 if (s2->num_sg != 0) {
1455
1456 *id = s->associated_id;
1457 s = s2;
1458 }
1459
1460
1461
1462
1463
1464 if (add_id_to_freelist(rinfo, s->associated_id))
1465 WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1466 info->gd->disk_name, s->associated_id);
1467 }
1468
1469 data.s = s;
1470 num_sg = s->num_sg;
1471
1472 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1473 for_each_sg(s->sg, sg, num_sg, i) {
1474 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1475
1476 data.bvec_offset = sg->offset;
1477 data.bvec_data = kmap_atomic(sg_page(sg));
1478
1479 gnttab_foreach_grant_in_range(sg_page(sg),
1480 sg->offset,
1481 sg->length,
1482 blkif_copy_from_grant,
1483 &data);
1484
1485 kunmap_atomic(data.bvec_data);
1486 }
1487 }
1488
1489 for (i = 0; i < num_grant; i++) {
1490 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1491
1492
1493
1494
1495
1496
1497 if (!info->feature_persistent)
1498 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1499 s->grants_used[i]->gref);
1500 list_add(&s->grants_used[i]->node, &rinfo->grants);
1501 rinfo->persistent_gnts_c++;
1502 } else {
1503
1504
1505
1506
1507
1508
1509 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1510 s->grants_used[i]->gref = GRANT_INVALID_REF;
1511 list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1512 }
1513 }
1514 if (s->req.operation == BLKIF_OP_INDIRECT) {
1515 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1516 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1517 if (!info->feature_persistent)
1518 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1519 s->indirect_grants[i]->gref);
1520 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1521 rinfo->persistent_gnts_c++;
1522 } else {
1523 struct page *indirect_page;
1524
1525 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1526
1527
1528
1529
1530 if (!info->feature_persistent) {
1531 indirect_page = s->indirect_grants[i]->page;
1532 list_add(&indirect_page->lru, &rinfo->indirect_pages);
1533 }
1534 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1535 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1536 }
1537 }
1538 }
1539
1540 return true;
1541}
1542
1543static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1544{
1545 struct request *req;
1546 struct blkif_response *bret;
1547 RING_IDX i, rp;
1548 unsigned long flags;
1549 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1550 struct blkfront_info *info = rinfo->dev_info;
1551
1552 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
1553 return IRQ_HANDLED;
1554
1555 spin_lock_irqsave(&rinfo->ring_lock, flags);
1556 again:
1557 rp = rinfo->ring.sring->rsp_prod;
1558 rmb();
1559
1560 for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1561 unsigned long id;
1562
1563 bret = RING_GET_RESPONSE(&rinfo->ring, i);
1564 id = bret->id;
1565
1566
1567
1568
1569
1570 if (id >= BLK_RING_SIZE(info)) {
1571 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1572 info->gd->disk_name, op_name(bret->operation), id);
1573
1574
1575 continue;
1576 }
1577 req = rinfo->shadow[id].request;
1578
1579 if (bret->operation != BLKIF_OP_DISCARD) {
1580
1581
1582
1583
1584 if (!blkif_completion(&id, rinfo, bret))
1585 continue;
1586 }
1587
1588 if (add_id_to_freelist(rinfo, id)) {
1589 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1590 info->gd->disk_name, op_name(bret->operation), id);
1591 continue;
1592 }
1593
1594 if (bret->status == BLKIF_RSP_OKAY)
1595 blkif_req(req)->error = BLK_STS_OK;
1596 else
1597 blkif_req(req)->error = BLK_STS_IOERR;
1598
1599 switch (bret->operation) {
1600 case BLKIF_OP_DISCARD:
1601 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1602 struct request_queue *rq = info->rq;
1603 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1604 info->gd->disk_name, op_name(bret->operation));
1605 blkif_req(req)->error = BLK_STS_NOTSUPP;
1606 info->feature_discard = 0;
1607 info->feature_secdiscard = 0;
1608 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1609 blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1610 }
1611 break;
1612 case BLKIF_OP_FLUSH_DISKCACHE:
1613 case BLKIF_OP_WRITE_BARRIER:
1614 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1615 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1616 info->gd->disk_name, op_name(bret->operation));
1617 blkif_req(req)->error = BLK_STS_NOTSUPP;
1618 }
1619 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1620 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1621 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1622 info->gd->disk_name, op_name(bret->operation));
1623 blkif_req(req)->error = BLK_STS_NOTSUPP;
1624 }
1625 if (unlikely(blkif_req(req)->error)) {
1626 if (blkif_req(req)->error == BLK_STS_NOTSUPP)
1627 blkif_req(req)->error = BLK_STS_OK;
1628 info->feature_fua = 0;
1629 info->feature_flush = 0;
1630 xlvbd_flush(info);
1631 }
1632
1633 case BLKIF_OP_READ:
1634 case BLKIF_OP_WRITE:
1635 if (unlikely(bret->status != BLKIF_RSP_OKAY))
1636 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1637 "request: %x\n", bret->status);
1638
1639 break;
1640 default:
1641 BUG();
1642 }
1643
1644 blk_mq_complete_request(req);
1645 }
1646
1647 rinfo->ring.rsp_cons = i;
1648
1649 if (i != rinfo->ring.req_prod_pvt) {
1650 int more_to_do;
1651 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1652 if (more_to_do)
1653 goto again;
1654 } else
1655 rinfo->ring.sring->rsp_event = i + 1;
1656
1657 kick_pending_request_queues_locked(rinfo);
1658
1659 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1660
1661 return IRQ_HANDLED;
1662}
1663
1664
1665static int setup_blkring(struct xenbus_device *dev,
1666 struct blkfront_ring_info *rinfo)
1667{
1668 struct blkif_sring *sring;
1669 int err, i;
1670 struct blkfront_info *info = rinfo->dev_info;
1671 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1672 grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1673
1674 for (i = 0; i < info->nr_ring_pages; i++)
1675 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1676
1677 sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1678 get_order(ring_size));
1679 if (!sring) {
1680 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1681 return -ENOMEM;
1682 }
1683 SHARED_RING_INIT(sring);
1684 FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1685
1686 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1687 if (err < 0) {
1688 free_pages((unsigned long)sring, get_order(ring_size));
1689 rinfo->ring.sring = NULL;
1690 goto fail;
1691 }
1692 for (i = 0; i < info->nr_ring_pages; i++)
1693 rinfo->ring_ref[i] = gref[i];
1694
1695 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1696 if (err)
1697 goto fail;
1698
1699 err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
1700 "blkif", rinfo);
1701 if (err <= 0) {
1702 xenbus_dev_fatal(dev, err,
1703 "bind_evtchn_to_irqhandler failed");
1704 goto fail;
1705 }
1706 rinfo->irq = err;
1707
1708 return 0;
1709fail:
1710 blkif_free(info, 0);
1711 return err;
1712}
1713
1714
1715
1716
1717
1718static int write_per_ring_nodes(struct xenbus_transaction xbt,
1719 struct blkfront_ring_info *rinfo, const char *dir)
1720{
1721 int err;
1722 unsigned int i;
1723 const char *message = NULL;
1724 struct blkfront_info *info = rinfo->dev_info;
1725
1726 if (info->nr_ring_pages == 1) {
1727 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1728 if (err) {
1729 message = "writing ring-ref";
1730 goto abort_transaction;
1731 }
1732 } else {
1733 for (i = 0; i < info->nr_ring_pages; i++) {
1734 char ring_ref_name[RINGREF_NAME_LEN];
1735
1736 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1737 err = xenbus_printf(xbt, dir, ring_ref_name,
1738 "%u", rinfo->ring_ref[i]);
1739 if (err) {
1740 message = "writing ring-ref";
1741 goto abort_transaction;
1742 }
1743 }
1744 }
1745
1746 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1747 if (err) {
1748 message = "writing event-channel";
1749 goto abort_transaction;
1750 }
1751
1752 return 0;
1753
1754abort_transaction:
1755 xenbus_transaction_end(xbt, 1);
1756 if (message)
1757 xenbus_dev_fatal(info->xbdev, err, "%s", message);
1758
1759 return err;
1760}
1761
1762
1763static int talk_to_blkback(struct xenbus_device *dev,
1764 struct blkfront_info *info)
1765{
1766 const char *message = NULL;
1767 struct xenbus_transaction xbt;
1768 int err;
1769 unsigned int i, max_page_order;
1770 unsigned int ring_page_order;
1771
1772 if (!info)
1773 return -ENODEV;
1774
1775 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1776 "max-ring-page-order", 0);
1777 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1778 info->nr_ring_pages = 1 << ring_page_order;
1779
1780 err = negotiate_mq(info);
1781 if (err)
1782 goto destroy_blkring;
1783
1784 for (i = 0; i < info->nr_rings; i++) {
1785 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1786
1787
1788 err = setup_blkring(dev, rinfo);
1789 if (err)
1790 goto destroy_blkring;
1791 }
1792
1793again:
1794 err = xenbus_transaction_start(&xbt);
1795 if (err) {
1796 xenbus_dev_fatal(dev, err, "starting transaction");
1797 goto destroy_blkring;
1798 }
1799
1800 if (info->nr_ring_pages > 1) {
1801 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1802 ring_page_order);
1803 if (err) {
1804 message = "writing ring-page-order";
1805 goto abort_transaction;
1806 }
1807 }
1808
1809
1810 if (info->nr_rings == 1) {
1811 err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
1812 if (err)
1813 goto destroy_blkring;
1814 } else {
1815 char *path;
1816 size_t pathsize;
1817
1818 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1819 info->nr_rings);
1820 if (err) {
1821 message = "writing multi-queue-num-queues";
1822 goto abort_transaction;
1823 }
1824
1825 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1826 path = kmalloc(pathsize, GFP_KERNEL);
1827 if (!path) {
1828 err = -ENOMEM;
1829 message = "ENOMEM while writing ring references";
1830 goto abort_transaction;
1831 }
1832
1833 for (i = 0; i < info->nr_rings; i++) {
1834 memset(path, 0, pathsize);
1835 snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1836 err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
1837 if (err) {
1838 kfree(path);
1839 goto destroy_blkring;
1840 }
1841 }
1842 kfree(path);
1843 }
1844 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1845 XEN_IO_PROTO_ABI_NATIVE);
1846 if (err) {
1847 message = "writing protocol";
1848 goto abort_transaction;
1849 }
1850 err = xenbus_printf(xbt, dev->nodename,
1851 "feature-persistent", "%u", 1);
1852 if (err)
1853 dev_warn(&dev->dev,
1854 "writing persistent grants feature to xenbus");
1855
1856 err = xenbus_transaction_end(xbt, 0);
1857 if (err) {
1858 if (err == -EAGAIN)
1859 goto again;
1860 xenbus_dev_fatal(dev, err, "completing transaction");
1861 goto destroy_blkring;
1862 }
1863
1864 for (i = 0; i < info->nr_rings; i++) {
1865 unsigned int j;
1866 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1867
1868 for (j = 0; j < BLK_RING_SIZE(info); j++)
1869 rinfo->shadow[j].req.u.rw.id = j + 1;
1870 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1871 }
1872 xenbus_switch_state(dev, XenbusStateInitialised);
1873
1874 return 0;
1875
1876 abort_transaction:
1877 xenbus_transaction_end(xbt, 1);
1878 if (message)
1879 xenbus_dev_fatal(dev, err, "%s", message);
1880 destroy_blkring:
1881 blkif_free(info, 0);
1882
1883 kfree(info);
1884 dev_set_drvdata(&dev->dev, NULL);
1885
1886 return err;
1887}
1888
1889static int negotiate_mq(struct blkfront_info *info)
1890{
1891 unsigned int backend_max_queues;
1892 unsigned int i;
1893
1894 BUG_ON(info->nr_rings);
1895
1896
1897 backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1898 "multi-queue-max-queues", 1);
1899 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1900
1901 if (!info->nr_rings)
1902 info->nr_rings = 1;
1903
1904 info->rinfo = kcalloc(info->nr_rings,
1905 sizeof(struct blkfront_ring_info),
1906 GFP_KERNEL);
1907 if (!info->rinfo) {
1908 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1909 return -ENOMEM;
1910 }
1911
1912 for (i = 0; i < info->nr_rings; i++) {
1913 struct blkfront_ring_info *rinfo;
1914
1915 rinfo = &info->rinfo[i];
1916 INIT_LIST_HEAD(&rinfo->indirect_pages);
1917 INIT_LIST_HEAD(&rinfo->grants);
1918 rinfo->dev_info = info;
1919 INIT_WORK(&rinfo->work, blkif_restart_queue);
1920 spin_lock_init(&rinfo->ring_lock);
1921 }
1922 return 0;
1923}
1924
1925
1926
1927
1928
1929
1930static int blkfront_probe(struct xenbus_device *dev,
1931 const struct xenbus_device_id *id)
1932{
1933 int err, vdevice;
1934 struct blkfront_info *info;
1935
1936
1937 err = xenbus_scanf(XBT_NIL, dev->nodename,
1938 "virtual-device", "%i", &vdevice);
1939 if (err != 1) {
1940
1941 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1942 "%i", &vdevice);
1943 if (err != 1) {
1944 xenbus_dev_fatal(dev, err, "reading virtual-device");
1945 return err;
1946 }
1947 }
1948
1949 if (xen_hvm_domain()) {
1950 char *type;
1951 int len;
1952
1953 if (xen_has_pv_and_legacy_disk_devices()) {
1954 int major;
1955
1956 if (!VDEV_IS_EXTENDED(vdevice))
1957 major = BLKIF_MAJOR(vdevice);
1958 else
1959 major = XENVBD_MAJOR;
1960
1961 if (major != XENVBD_MAJOR) {
1962 printk(KERN_INFO
1963 "%s: HVM does not support vbd %d as xen block device\n",
1964 __func__, vdevice);
1965 return -ENODEV;
1966 }
1967 }
1968
1969 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1970 if (IS_ERR(type))
1971 return -ENODEV;
1972 if (strncmp(type, "cdrom", 5) == 0) {
1973 kfree(type);
1974 return -ENODEV;
1975 }
1976 kfree(type);
1977 }
1978 info = kzalloc(sizeof(*info), GFP_KERNEL);
1979 if (!info) {
1980 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1981 return -ENOMEM;
1982 }
1983
1984 info->xbdev = dev;
1985
1986 mutex_init(&info->mutex);
1987 info->vdevice = vdevice;
1988 info->connected = BLKIF_STATE_DISCONNECTED;
1989
1990
1991 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1992 dev_set_drvdata(&dev->dev, info);
1993
1994 return 0;
1995}
1996
1997static int blkif_recover(struct blkfront_info *info)
1998{
1999 unsigned int r_index;
2000 struct request *req, *n;
2001 int rc;
2002 struct bio *bio;
2003 unsigned int segs;
2004
2005 blkfront_gather_backend_features(info);
2006
2007 blkif_set_queue_limits(info);
2008 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2009 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2010
2011 for (r_index = 0; r_index < info->nr_rings; r_index++) {
2012 struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
2013
2014 rc = blkfront_setup_indirect(rinfo);
2015 if (rc)
2016 return rc;
2017 }
2018 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2019
2020
2021 info->connected = BLKIF_STATE_CONNECTED;
2022
2023 for (r_index = 0; r_index < info->nr_rings; r_index++) {
2024 struct blkfront_ring_info *rinfo;
2025
2026 rinfo = &info->rinfo[r_index];
2027
2028 kick_pending_request_queues(rinfo);
2029 }
2030
2031 list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2032
2033 list_del_init(&req->queuelist);
2034 BUG_ON(req->nr_phys_segments > segs);
2035 blk_mq_requeue_request(req, false);
2036 }
2037 blk_mq_start_stopped_hw_queues(info->rq, true);
2038 blk_mq_kick_requeue_list(info->rq);
2039
2040 while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2041
2042 submit_bio(bio);
2043 }
2044
2045 return 0;
2046}
2047
2048
2049
2050
2051
2052
2053
2054static int blkfront_resume(struct xenbus_device *dev)
2055{
2056 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2057 int err = 0;
2058 unsigned int i, j;
2059
2060 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2061
2062 bio_list_init(&info->bio_list);
2063 INIT_LIST_HEAD(&info->requests);
2064 for (i = 0; i < info->nr_rings; i++) {
2065 struct blkfront_ring_info *rinfo = &info->rinfo[i];
2066 struct bio_list merge_bio;
2067 struct blk_shadow *shadow = rinfo->shadow;
2068
2069 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2070
2071 if (!shadow[j].request)
2072 continue;
2073
2074
2075
2076
2077 if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2078 req_op(shadow[j].request) == REQ_OP_DISCARD ||
2079 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2080 shadow[j].request->cmd_flags & REQ_FUA) {
2081
2082
2083
2084
2085
2086
2087
2088 list_add(&shadow[j].request->queuelist, &info->requests);
2089 continue;
2090 }
2091 merge_bio.head = shadow[j].request->bio;
2092 merge_bio.tail = shadow[j].request->biotail;
2093 bio_list_merge(&info->bio_list, &merge_bio);
2094 shadow[j].request->bio = NULL;
2095 blk_mq_end_request(shadow[j].request, BLK_STS_OK);
2096 }
2097 }
2098
2099 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2100
2101 err = talk_to_blkback(dev, info);
2102 if (!err)
2103 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2104
2105
2106
2107
2108
2109
2110
2111 return err;
2112}
2113
2114static void blkfront_closing(struct blkfront_info *info)
2115{
2116 struct xenbus_device *xbdev = info->xbdev;
2117 struct block_device *bdev = NULL;
2118
2119 mutex_lock(&info->mutex);
2120
2121 if (xbdev->state == XenbusStateClosing) {
2122 mutex_unlock(&info->mutex);
2123 return;
2124 }
2125
2126 if (info->gd)
2127 bdev = bdget_disk(info->gd, 0);
2128
2129 mutex_unlock(&info->mutex);
2130
2131 if (!bdev) {
2132 xenbus_frontend_closed(xbdev);
2133 return;
2134 }
2135
2136 mutex_lock(&bdev->bd_mutex);
2137
2138 if (bdev->bd_openers) {
2139 xenbus_dev_error(xbdev, -EBUSY,
2140 "Device in use; refusing to close");
2141 xenbus_switch_state(xbdev, XenbusStateClosing);
2142 } else {
2143 xlvbd_release_gendisk(info);
2144 xenbus_frontend_closed(xbdev);
2145 }
2146
2147 mutex_unlock(&bdev->bd_mutex);
2148 bdput(bdev);
2149}
2150
2151static void blkfront_setup_discard(struct blkfront_info *info)
2152{
2153 int err;
2154 unsigned int discard_granularity;
2155 unsigned int discard_alignment;
2156
2157 info->feature_discard = 1;
2158 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2159 "discard-granularity", "%u", &discard_granularity,
2160 "discard-alignment", "%u", &discard_alignment,
2161 NULL);
2162 if (!err) {
2163 info->discard_granularity = discard_granularity;
2164 info->discard_alignment = discard_alignment;
2165 }
2166 info->feature_secdiscard =
2167 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
2168 0);
2169}
2170
2171static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2172{
2173 unsigned int psegs, grants;
2174 int err, i;
2175 struct blkfront_info *info = rinfo->dev_info;
2176
2177 if (info->max_indirect_segments == 0) {
2178 if (!HAS_EXTRA_REQ)
2179 grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2180 else {
2181
2182
2183
2184
2185
2186 grants = GRANTS_PER_PSEG;
2187 }
2188 }
2189 else
2190 grants = info->max_indirect_segments;
2191 psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2192
2193 err = fill_grant_buffer(rinfo,
2194 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2195 if (err)
2196 goto out_of_memory;
2197
2198 if (!info->feature_persistent && info->max_indirect_segments) {
2199
2200
2201
2202
2203
2204 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2205
2206 BUG_ON(!list_empty(&rinfo->indirect_pages));
2207 for (i = 0; i < num; i++) {
2208 struct page *indirect_page = alloc_page(GFP_NOIO);
2209 if (!indirect_page)
2210 goto out_of_memory;
2211 list_add(&indirect_page->lru, &rinfo->indirect_pages);
2212 }
2213 }
2214
2215 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2216 rinfo->shadow[i].grants_used =
2217 kcalloc(grants,
2218 sizeof(rinfo->shadow[i].grants_used[0]),
2219 GFP_NOIO);
2220 rinfo->shadow[i].sg = kcalloc(psegs,
2221 sizeof(rinfo->shadow[i].sg[0]),
2222 GFP_NOIO);
2223 if (info->max_indirect_segments)
2224 rinfo->shadow[i].indirect_grants =
2225 kcalloc(INDIRECT_GREFS(grants),
2226 sizeof(rinfo->shadow[i].indirect_grants[0]),
2227 GFP_NOIO);
2228 if ((rinfo->shadow[i].grants_used == NULL) ||
2229 (rinfo->shadow[i].sg == NULL) ||
2230 (info->max_indirect_segments &&
2231 (rinfo->shadow[i].indirect_grants == NULL)))
2232 goto out_of_memory;
2233 sg_init_table(rinfo->shadow[i].sg, psegs);
2234 }
2235
2236
2237 return 0;
2238
2239out_of_memory:
2240 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2241 kfree(rinfo->shadow[i].grants_used);
2242 rinfo->shadow[i].grants_used = NULL;
2243 kfree(rinfo->shadow[i].sg);
2244 rinfo->shadow[i].sg = NULL;
2245 kfree(rinfo->shadow[i].indirect_grants);
2246 rinfo->shadow[i].indirect_grants = NULL;
2247 }
2248 if (!list_empty(&rinfo->indirect_pages)) {
2249 struct page *indirect_page, *n;
2250 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2251 list_del(&indirect_page->lru);
2252 __free_page(indirect_page);
2253 }
2254 }
2255 return -ENOMEM;
2256}
2257
2258
2259
2260
2261static void blkfront_gather_backend_features(struct blkfront_info *info)
2262{
2263 unsigned int indirect_segments;
2264
2265 info->feature_flush = 0;
2266 info->feature_fua = 0;
2267
2268
2269
2270
2271
2272
2273
2274
2275 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
2276 info->feature_flush = 1;
2277 info->feature_fua = 1;
2278 }
2279
2280
2281
2282
2283
2284 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
2285 0)) {
2286 info->feature_flush = 1;
2287 info->feature_fua = 0;
2288 }
2289
2290 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
2291 blkfront_setup_discard(info);
2292
2293 info->feature_persistent =
2294 !!xenbus_read_unsigned(info->xbdev->otherend,
2295 "feature-persistent", 0);
2296
2297 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2298 "feature-max-indirect-segments", 0);
2299 if (indirect_segments > xen_blkif_max_segments)
2300 indirect_segments = xen_blkif_max_segments;
2301 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2302 indirect_segments = 0;
2303 info->max_indirect_segments = indirect_segments;
2304}
2305
2306
2307
2308
2309
2310static void blkfront_connect(struct blkfront_info *info)
2311{
2312 unsigned long long sectors;
2313 unsigned long sector_size;
2314 unsigned int physical_sector_size;
2315 unsigned int binfo;
2316 char *envp[] = { "RESIZE=1", NULL };
2317 int err, i;
2318
2319 switch (info->connected) {
2320 case BLKIF_STATE_CONNECTED:
2321
2322
2323
2324
2325 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2326 "sectors", "%Lu", §ors);
2327 if (XENBUS_EXIST_ERR(err))
2328 return;
2329 printk(KERN_INFO "Setting capacity to %Lu\n",
2330 sectors);
2331 set_capacity(info->gd, sectors);
2332 revalidate_disk(info->gd);
2333 kobject_uevent_env(&disk_to_dev(info->gd)->kobj,
2334 KOBJ_CHANGE, envp);
2335
2336 return;
2337 case BLKIF_STATE_SUSPENDED:
2338
2339
2340
2341
2342
2343
2344 blkif_recover(info);
2345 return;
2346
2347 default:
2348 break;
2349 }
2350
2351 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2352 __func__, info->xbdev->otherend);
2353
2354 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2355 "sectors", "%llu", §ors,
2356 "info", "%u", &binfo,
2357 "sector-size", "%lu", §or_size,
2358 NULL);
2359 if (err) {
2360 xenbus_dev_fatal(info->xbdev, err,
2361 "reading backend fields at %s",
2362 info->xbdev->otherend);
2363 return;
2364 }
2365
2366
2367
2368
2369
2370
2371 physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
2372 "physical-sector-size",
2373 sector_size);
2374 blkfront_gather_backend_features(info);
2375 for (i = 0; i < info->nr_rings; i++) {
2376 err = blkfront_setup_indirect(&info->rinfo[i]);
2377 if (err) {
2378 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2379 info->xbdev->otherend);
2380 blkif_free(info, 0);
2381 break;
2382 }
2383 }
2384
2385 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
2386 physical_sector_size);
2387 if (err) {
2388 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2389 info->xbdev->otherend);
2390 goto fail;
2391 }
2392
2393 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2394
2395
2396 info->connected = BLKIF_STATE_CONNECTED;
2397 for (i = 0; i < info->nr_rings; i++)
2398 kick_pending_request_queues(&info->rinfo[i]);
2399
2400 device_add_disk(&info->xbdev->dev, info->gd, NULL);
2401
2402 info->is_ready = 1;
2403 return;
2404
2405fail:
2406 blkif_free(info, 0);
2407 return;
2408}
2409
2410
2411
2412
2413static void blkback_changed(struct xenbus_device *dev,
2414 enum xenbus_state backend_state)
2415{
2416 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2417
2418 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2419
2420 switch (backend_state) {
2421 case XenbusStateInitWait:
2422 if (dev->state != XenbusStateInitialising)
2423 break;
2424 if (talk_to_blkback(dev, info))
2425 break;
2426 case XenbusStateInitialising:
2427 case XenbusStateInitialised:
2428 case XenbusStateReconfiguring:
2429 case XenbusStateReconfigured:
2430 case XenbusStateUnknown:
2431 break;
2432
2433 case XenbusStateConnected:
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445 if ((dev->state != XenbusStateInitialised) &&
2446 (dev->state != XenbusStateConnected)) {
2447 if (talk_to_blkback(dev, info))
2448 break;
2449 }
2450
2451 blkfront_connect(info);
2452 break;
2453
2454 case XenbusStateClosed:
2455 if (dev->state == XenbusStateClosed)
2456 break;
2457
2458 case XenbusStateClosing:
2459 if (info)
2460 blkfront_closing(info);
2461 break;
2462 }
2463}
2464
2465static int blkfront_remove(struct xenbus_device *xbdev)
2466{
2467 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2468 struct block_device *bdev = NULL;
2469 struct gendisk *disk;
2470
2471 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2472
2473 blkif_free(info, 0);
2474
2475 mutex_lock(&info->mutex);
2476
2477 disk = info->gd;
2478 if (disk)
2479 bdev = bdget_disk(disk, 0);
2480
2481 info->xbdev = NULL;
2482 mutex_unlock(&info->mutex);
2483
2484 if (!bdev) {
2485 kfree(info);
2486 return 0;
2487 }
2488
2489
2490
2491
2492
2493
2494
2495 mutex_lock(&bdev->bd_mutex);
2496 info = disk->private_data;
2497
2498 dev_warn(disk_to_dev(disk),
2499 "%s was hot-unplugged, %d stale handles\n",
2500 xbdev->nodename, bdev->bd_openers);
2501
2502 if (info && !bdev->bd_openers) {
2503 xlvbd_release_gendisk(info);
2504 disk->private_data = NULL;
2505 kfree(info);
2506 }
2507
2508 mutex_unlock(&bdev->bd_mutex);
2509 bdput(bdev);
2510
2511 return 0;
2512}
2513
2514static int blkfront_is_ready(struct xenbus_device *dev)
2515{
2516 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2517
2518 return info->is_ready && info->xbdev;
2519}
2520
2521static int blkif_open(struct block_device *bdev, fmode_t mode)
2522{
2523 struct gendisk *disk = bdev->bd_disk;
2524 struct blkfront_info *info;
2525 int err = 0;
2526
2527 mutex_lock(&blkfront_mutex);
2528
2529 info = disk->private_data;
2530 if (!info) {
2531
2532 err = -ERESTARTSYS;
2533 goto out;
2534 }
2535
2536 mutex_lock(&info->mutex);
2537
2538 if (!info->gd)
2539
2540 err = -ERESTARTSYS;
2541
2542 mutex_unlock(&info->mutex);
2543
2544out:
2545 mutex_unlock(&blkfront_mutex);
2546 return err;
2547}
2548
2549static void blkif_release(struct gendisk *disk, fmode_t mode)
2550{
2551 struct blkfront_info *info = disk->private_data;
2552 struct block_device *bdev;
2553 struct xenbus_device *xbdev;
2554
2555 mutex_lock(&blkfront_mutex);
2556
2557 bdev = bdget_disk(disk, 0);
2558
2559 if (!bdev) {
2560 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2561 goto out_mutex;
2562 }
2563 if (bdev->bd_openers)
2564 goto out;
2565
2566
2567
2568
2569
2570
2571 mutex_lock(&info->mutex);
2572 xbdev = info->xbdev;
2573
2574 if (xbdev && xbdev->state == XenbusStateClosing) {
2575
2576 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2577 xlvbd_release_gendisk(info);
2578 xenbus_frontend_closed(info->xbdev);
2579 }
2580
2581 mutex_unlock(&info->mutex);
2582
2583 if (!xbdev) {
2584
2585 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2586 xlvbd_release_gendisk(info);
2587 disk->private_data = NULL;
2588 kfree(info);
2589 }
2590
2591out:
2592 bdput(bdev);
2593out_mutex:
2594 mutex_unlock(&blkfront_mutex);
2595}
2596
2597static const struct block_device_operations xlvbd_block_fops =
2598{
2599 .owner = THIS_MODULE,
2600 .open = blkif_open,
2601 .release = blkif_release,
2602 .getgeo = blkif_getgeo,
2603 .ioctl = blkif_ioctl,
2604};
2605
2606
2607static const struct xenbus_device_id blkfront_ids[] = {
2608 { "vbd" },
2609 { "" }
2610};
2611
2612static struct xenbus_driver blkfront_driver = {
2613 .ids = blkfront_ids,
2614 .probe = blkfront_probe,
2615 .remove = blkfront_remove,
2616 .resume = blkfront_resume,
2617 .otherend_changed = blkback_changed,
2618 .is_ready = blkfront_is_ready,
2619};
2620
2621static int __init xlblk_init(void)
2622{
2623 int ret;
2624 int nr_cpus = num_online_cpus();
2625
2626 if (!xen_domain())
2627 return -ENODEV;
2628
2629 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2630 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2631
2632 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2633 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2634 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2635 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2636 }
2637
2638 if (xen_blkif_max_queues > nr_cpus) {
2639 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2640 xen_blkif_max_queues, nr_cpus);
2641 xen_blkif_max_queues = nr_cpus;
2642 }
2643
2644 if (!xen_has_pv_disk_devices())
2645 return -ENODEV;
2646
2647 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2648 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2649 XENVBD_MAJOR, DEV_NAME);
2650 return -ENODEV;
2651 }
2652
2653 ret = xenbus_register_frontend(&blkfront_driver);
2654 if (ret) {
2655 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2656 return ret;
2657 }
2658
2659 return 0;
2660}
2661module_init(xlblk_init);
2662
2663
2664static void __exit xlblk_exit(void)
2665{
2666 xenbus_unregister_driver(&blkfront_driver);
2667 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2668 kfree(minors);
2669}
2670module_exit(xlblk_exit);
2671
2672MODULE_DESCRIPTION("Xen virtual block device frontend");
2673MODULE_LICENSE("GPL");
2674MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2675MODULE_ALIAS("xen:vbd");
2676MODULE_ALIAS("xenblk");
2677