1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <linux/interrupt.h>
39#include <linux/blkdev.h>
40#include <linux/blk-mq.h>
41#include <linux/hdreg.h>
42#include <linux/cdrom.h>
43#include <linux/module.h>
44#include <linux/slab.h>
45#include <linux/mutex.h>
46#include <linux/scatterlist.h>
47#include <linux/bitmap.h>
48#include <linux/list.h>
49#include <linux/workqueue.h>
50#include <linux/sched/mm.h>
51
52#include <xen/xen.h>
53#include <xen/xenbus.h>
54#include <xen/grant_table.h>
55#include <xen/events.h>
56#include <xen/page.h>
57#include <xen/platform_pci.h>
58
59#include <xen/interface/grant_table.h>
60#include <xen/interface/io/blkif.h>
61#include <xen/interface/io/protocols.h>
62
63#include <asm/xen/hypervisor.h>
64
65
66
67
68
69
70
71
72
73
74
75
76
77#define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
78
79enum blkif_state {
80 BLKIF_STATE_DISCONNECTED,
81 BLKIF_STATE_CONNECTED,
82 BLKIF_STATE_SUSPENDED,
83};
84
85struct grant {
86 grant_ref_t gref;
87 struct page *page;
88 struct list_head node;
89};
90
91enum blk_req_status {
92 REQ_WAITING,
93 REQ_DONE,
94 REQ_ERROR,
95 REQ_EOPNOTSUPP,
96};
97
98struct blk_shadow {
99 struct blkif_request req;
100 struct request *request;
101 struct grant **grants_used;
102 struct grant **indirect_grants;
103 struct scatterlist *sg;
104 unsigned int num_sg;
105 enum blk_req_status status;
106
107 #define NO_ASSOCIATED_ID ~0UL
108
109
110
111
112 unsigned long associated_id;
113};
114
115struct blkif_req {
116 blk_status_t error;
117};
118
119static inline struct blkif_req *blkif_req(struct request *rq)
120{
121 return blk_mq_rq_to_pdu(rq);
122}
123
124static DEFINE_MUTEX(blkfront_mutex);
125static const struct block_device_operations xlvbd_block_fops;
126static struct delayed_work blkfront_work;
127static LIST_HEAD(info_list);
128
129
130
131
132
133
134
135static unsigned int xen_blkif_max_segments = 32;
136module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
137MODULE_PARM_DESC(max_indirect_segments,
138 "Maximum amount of segments in indirect requests (default is 32)");
139
140static unsigned int xen_blkif_max_queues = 4;
141module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
142MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
143
144
145
146
147
148static unsigned int xen_blkif_max_ring_order;
149module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
150MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
151
152#define BLK_RING_SIZE(info) \
153 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
154
155
156
157
158
159#define RINGREF_NAME_LEN (20)
160
161
162
163#define QUEUE_NAME_LEN (17)
164
165
166
167
168
169
170struct blkfront_ring_info {
171
172 spinlock_t ring_lock;
173 struct blkif_front_ring ring;
174 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
175 unsigned int evtchn, irq;
176 struct work_struct work;
177 struct gnttab_free_callback callback;
178 struct list_head indirect_pages;
179 struct list_head grants;
180 unsigned int persistent_gnts_c;
181 unsigned long shadow_free;
182 struct blkfront_info *dev_info;
183 struct blk_shadow shadow[];
184};
185
186
187
188
189
190
191struct blkfront_info
192{
193 struct mutex mutex;
194 struct xenbus_device *xbdev;
195 struct gendisk *gd;
196 u16 sector_size;
197 unsigned int physical_sector_size;
198 int vdevice;
199 blkif_vdev_t handle;
200 enum blkif_state connected;
201
202 unsigned int nr_ring_pages;
203 struct request_queue *rq;
204 unsigned int feature_flush:1;
205 unsigned int feature_fua:1;
206 unsigned int feature_discard:1;
207 unsigned int feature_secdiscard:1;
208 unsigned int feature_persistent:1;
209 unsigned int discard_granularity;
210 unsigned int discard_alignment;
211
212 unsigned int max_indirect_segments;
213 int is_ready;
214 struct blk_mq_tag_set tag_set;
215 struct blkfront_ring_info *rinfo;
216 unsigned int nr_rings;
217 unsigned int rinfo_size;
218
219 struct list_head requests;
220 struct bio_list bio_list;
221 struct list_head info_list;
222};
223
224static unsigned int nr_minors;
225static unsigned long *minors;
226static DEFINE_SPINLOCK(minor_lock);
227
228#define GRANT_INVALID_REF 0
229
230#define PARTS_PER_DISK 16
231#define PARTS_PER_EXT_DISK 256
232
233#define BLKIF_MAJOR(dev) ((dev)>>8)
234#define BLKIF_MINOR(dev) ((dev) & 0xff)
235
236#define EXT_SHIFT 28
237#define EXTENDED (1<<EXT_SHIFT)
238#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
239#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
240#define EMULATED_HD_DISK_MINOR_OFFSET (0)
241#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
242#define EMULATED_SD_DISK_MINOR_OFFSET (0)
243#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
244
245#define DEV_NAME "xvd"
246
247
248
249
250
251
252#define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
253
254#define GRANTS_PER_INDIRECT_FRAME \
255 (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
256
257#define INDIRECT_GREFS(_grants) \
258 DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
259
260static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
261static void blkfront_gather_backend_features(struct blkfront_info *info);
262static int negotiate_mq(struct blkfront_info *info);
263
264#define for_each_rinfo(info, ptr, idx) \
265 for ((ptr) = (info)->rinfo, (idx) = 0; \
266 (idx) < (info)->nr_rings; \
267 (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
268
269static inline struct blkfront_ring_info *
270get_rinfo(const struct blkfront_info *info, unsigned int i)
271{
272 BUG_ON(i >= info->nr_rings);
273 return (void *)info->rinfo + i * info->rinfo_size;
274}
275
276static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
277{
278 unsigned long free = rinfo->shadow_free;
279
280 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
281 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
282 rinfo->shadow[free].req.u.rw.id = 0x0fffffee;
283 return free;
284}
285
286static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
287 unsigned long id)
288{
289 if (rinfo->shadow[id].req.u.rw.id != id)
290 return -EINVAL;
291 if (rinfo->shadow[id].request == NULL)
292 return -EINVAL;
293 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
294 rinfo->shadow[id].request = NULL;
295 rinfo->shadow_free = id;
296 return 0;
297}
298
299static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
300{
301 struct blkfront_info *info = rinfo->dev_info;
302 struct page *granted_page;
303 struct grant *gnt_list_entry, *n;
304 int i = 0;
305
306 while (i < num) {
307 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
308 if (!gnt_list_entry)
309 goto out_of_memory;
310
311 if (info->feature_persistent) {
312 granted_page = alloc_page(GFP_NOIO);
313 if (!granted_page) {
314 kfree(gnt_list_entry);
315 goto out_of_memory;
316 }
317 gnt_list_entry->page = granted_page;
318 }
319
320 gnt_list_entry->gref = GRANT_INVALID_REF;
321 list_add(&gnt_list_entry->node, &rinfo->grants);
322 i++;
323 }
324
325 return 0;
326
327out_of_memory:
328 list_for_each_entry_safe(gnt_list_entry, n,
329 &rinfo->grants, node) {
330 list_del(&gnt_list_entry->node);
331 if (info->feature_persistent)
332 __free_page(gnt_list_entry->page);
333 kfree(gnt_list_entry);
334 i--;
335 }
336 BUG_ON(i != 0);
337 return -ENOMEM;
338}
339
340static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
341{
342 struct grant *gnt_list_entry;
343
344 BUG_ON(list_empty(&rinfo->grants));
345 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
346 node);
347 list_del(&gnt_list_entry->node);
348
349 if (gnt_list_entry->gref != GRANT_INVALID_REF)
350 rinfo->persistent_gnts_c--;
351
352 return gnt_list_entry;
353}
354
355static inline void grant_foreign_access(const struct grant *gnt_list_entry,
356 const struct blkfront_info *info)
357{
358 gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
359 info->xbdev->otherend_id,
360 gnt_list_entry->page,
361 0);
362}
363
364static struct grant *get_grant(grant_ref_t *gref_head,
365 unsigned long gfn,
366 struct blkfront_ring_info *rinfo)
367{
368 struct grant *gnt_list_entry = get_free_grant(rinfo);
369 struct blkfront_info *info = rinfo->dev_info;
370
371 if (gnt_list_entry->gref != GRANT_INVALID_REF)
372 return gnt_list_entry;
373
374
375 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
376 BUG_ON(gnt_list_entry->gref == -ENOSPC);
377 if (info->feature_persistent)
378 grant_foreign_access(gnt_list_entry, info);
379 else {
380
381 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
382 info->xbdev->otherend_id,
383 gfn, 0);
384 }
385
386 return gnt_list_entry;
387}
388
389static struct grant *get_indirect_grant(grant_ref_t *gref_head,
390 struct blkfront_ring_info *rinfo)
391{
392 struct grant *gnt_list_entry = get_free_grant(rinfo);
393 struct blkfront_info *info = rinfo->dev_info;
394
395 if (gnt_list_entry->gref != GRANT_INVALID_REF)
396 return gnt_list_entry;
397
398
399 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
400 BUG_ON(gnt_list_entry->gref == -ENOSPC);
401 if (!info->feature_persistent) {
402 struct page *indirect_page;
403
404
405 BUG_ON(list_empty(&rinfo->indirect_pages));
406 indirect_page = list_first_entry(&rinfo->indirect_pages,
407 struct page, lru);
408 list_del(&indirect_page->lru);
409 gnt_list_entry->page = indirect_page;
410 }
411 grant_foreign_access(gnt_list_entry, info);
412
413 return gnt_list_entry;
414}
415
416static const char *op_name(int op)
417{
418 static const char *const names[] = {
419 [BLKIF_OP_READ] = "read",
420 [BLKIF_OP_WRITE] = "write",
421 [BLKIF_OP_WRITE_BARRIER] = "barrier",
422 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
423 [BLKIF_OP_DISCARD] = "discard" };
424
425 if (op < 0 || op >= ARRAY_SIZE(names))
426 return "unknown";
427
428 if (!names[op])
429 return "reserved";
430
431 return names[op];
432}
433static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
434{
435 unsigned int end = minor + nr;
436 int rc;
437
438 if (end > nr_minors) {
439 unsigned long *bitmap, *old;
440
441 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
442 GFP_KERNEL);
443 if (bitmap == NULL)
444 return -ENOMEM;
445
446 spin_lock(&minor_lock);
447 if (end > nr_minors) {
448 old = minors;
449 memcpy(bitmap, minors,
450 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
451 minors = bitmap;
452 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
453 } else
454 old = bitmap;
455 spin_unlock(&minor_lock);
456 kfree(old);
457 }
458
459 spin_lock(&minor_lock);
460 if (find_next_bit(minors, end, minor) >= end) {
461 bitmap_set(minors, minor, nr);
462 rc = 0;
463 } else
464 rc = -EBUSY;
465 spin_unlock(&minor_lock);
466
467 return rc;
468}
469
470static void xlbd_release_minors(unsigned int minor, unsigned int nr)
471{
472 unsigned int end = minor + nr;
473
474 BUG_ON(end > nr_minors);
475 spin_lock(&minor_lock);
476 bitmap_clear(minors, minor, nr);
477 spin_unlock(&minor_lock);
478}
479
480static void blkif_restart_queue_callback(void *arg)
481{
482 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
483 schedule_work(&rinfo->work);
484}
485
486static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
487{
488
489
490 sector_t nsect = get_capacity(bd->bd_disk);
491 sector_t cylinders = nsect;
492
493 hg->heads = 0xff;
494 hg->sectors = 0x3f;
495 sector_div(cylinders, hg->heads * hg->sectors);
496 hg->cylinders = cylinders;
497 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
498 hg->cylinders = 0xffff;
499 return 0;
500}
501
502static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
503 unsigned command, unsigned long argument)
504{
505 struct blkfront_info *info = bdev->bd_disk->private_data;
506 int i;
507
508 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
509 command, (long)argument);
510
511 switch (command) {
512 case CDROMMULTISESSION:
513 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
514 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
515 if (put_user(0, (char __user *)(argument + i)))
516 return -EFAULT;
517 return 0;
518
519 case CDROM_GET_CAPABILITY: {
520 struct gendisk *gd = info->gd;
521 if (gd->flags & GENHD_FL_CD)
522 return 0;
523 return -EINVAL;
524 }
525
526 default:
527
528
529 return -EINVAL;
530 }
531
532 return 0;
533}
534
535static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
536 struct request *req,
537 struct blkif_request **ring_req)
538{
539 unsigned long id;
540
541 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
542 rinfo->ring.req_prod_pvt++;
543
544 id = get_id_from_freelist(rinfo);
545 rinfo->shadow[id].request = req;
546 rinfo->shadow[id].status = REQ_WAITING;
547 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
548
549 (*ring_req)->u.rw.id = id;
550
551 return id;
552}
553
554static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
555{
556 struct blkfront_info *info = rinfo->dev_info;
557 struct blkif_request *ring_req;
558 unsigned long id;
559
560
561 id = blkif_ring_get_request(rinfo, req, &ring_req);
562
563 ring_req->operation = BLKIF_OP_DISCARD;
564 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
565 ring_req->u.discard.id = id;
566 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
567 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
568 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
569 else
570 ring_req->u.discard.flag = 0;
571
572
573 rinfo->shadow[id].req = *ring_req;
574
575 return 0;
576}
577
578struct setup_rw_req {
579 unsigned int grant_idx;
580 struct blkif_request_segment *segments;
581 struct blkfront_ring_info *rinfo;
582 struct blkif_request *ring_req;
583 grant_ref_t gref_head;
584 unsigned int id;
585
586 bool need_copy;
587 unsigned int bvec_off;
588 char *bvec_data;
589
590 bool require_extra_req;
591 struct blkif_request *extra_ring_req;
592};
593
594static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
595 unsigned int len, void *data)
596{
597 struct setup_rw_req *setup = data;
598 int n, ref;
599 struct grant *gnt_list_entry;
600 unsigned int fsect, lsect;
601
602 unsigned int grant_idx = setup->grant_idx;
603 struct blkif_request *ring_req = setup->ring_req;
604 struct blkfront_ring_info *rinfo = setup->rinfo;
605
606
607
608
609
610
611 struct blk_shadow *shadow = &rinfo->shadow[setup->id];
612
613 if (unlikely(setup->require_extra_req &&
614 grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
615
616
617
618
619 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
620 ring_req = setup->extra_ring_req;
621 }
622
623 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
624 (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
625 if (setup->segments)
626 kunmap_atomic(setup->segments);
627
628 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
629 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
630 shadow->indirect_grants[n] = gnt_list_entry;
631 setup->segments = kmap_atomic(gnt_list_entry->page);
632 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
633 }
634
635 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
636 ref = gnt_list_entry->gref;
637
638
639
640
641 shadow->grants_used[setup->grant_idx] = gnt_list_entry;
642
643 if (setup->need_copy) {
644 void *shared_data;
645
646 shared_data = kmap_atomic(gnt_list_entry->page);
647
648
649
650
651
652
653
654
655
656 memcpy(shared_data + offset,
657 setup->bvec_data + setup->bvec_off,
658 len);
659
660 kunmap_atomic(shared_data);
661 setup->bvec_off += len;
662 }
663
664 fsect = offset >> 9;
665 lsect = fsect + (len >> 9) - 1;
666 if (ring_req->operation != BLKIF_OP_INDIRECT) {
667 ring_req->u.rw.seg[grant_idx] =
668 (struct blkif_request_segment) {
669 .gref = ref,
670 .first_sect = fsect,
671 .last_sect = lsect };
672 } else {
673 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
674 (struct blkif_request_segment) {
675 .gref = ref,
676 .first_sect = fsect,
677 .last_sect = lsect };
678 }
679
680 (setup->grant_idx)++;
681}
682
683static void blkif_setup_extra_req(struct blkif_request *first,
684 struct blkif_request *second)
685{
686 uint16_t nr_segments = first->u.rw.nr_segments;
687
688
689
690
691
692 first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
693
694 second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
695 second->u.rw.sector_number = first->u.rw.sector_number +
696 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
697
698 second->u.rw.handle = first->u.rw.handle;
699 second->operation = first->operation;
700}
701
702static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
703{
704 struct blkfront_info *info = rinfo->dev_info;
705 struct blkif_request *ring_req, *extra_ring_req = NULL;
706 unsigned long id, extra_id = NO_ASSOCIATED_ID;
707 bool require_extra_req = false;
708 int i;
709 struct setup_rw_req setup = {
710 .grant_idx = 0,
711 .segments = NULL,
712 .rinfo = rinfo,
713 .need_copy = rq_data_dir(req) && info->feature_persistent,
714 };
715
716
717
718
719
720
721 bool new_persistent_gnts = false;
722 struct scatterlist *sg;
723 int num_sg, max_grefs, num_grant;
724
725 max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
726 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
727
728
729
730
731 max_grefs += INDIRECT_GREFS(max_grefs);
732
733
734 if (rinfo->persistent_gnts_c < max_grefs) {
735 new_persistent_gnts = true;
736
737 if (gnttab_alloc_grant_references(
738 max_grefs - rinfo->persistent_gnts_c,
739 &setup.gref_head) < 0) {
740 gnttab_request_free_callback(
741 &rinfo->callback,
742 blkif_restart_queue_callback,
743 rinfo,
744 max_grefs - rinfo->persistent_gnts_c);
745 return 1;
746 }
747 }
748
749
750 id = blkif_ring_get_request(rinfo, req, &ring_req);
751
752 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
753 num_grant = 0;
754
755 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
756 num_grant += gnttab_count_grant(sg->offset, sg->length);
757
758 require_extra_req = info->max_indirect_segments == 0 &&
759 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
760 BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
761
762 rinfo->shadow[id].num_sg = num_sg;
763 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
764 likely(!require_extra_req)) {
765
766
767
768
769 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
770 ring_req->operation = BLKIF_OP_INDIRECT;
771 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
772 BLKIF_OP_WRITE : BLKIF_OP_READ;
773 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
774 ring_req->u.indirect.handle = info->handle;
775 ring_req->u.indirect.nr_segments = num_grant;
776 } else {
777 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
778 ring_req->u.rw.handle = info->handle;
779 ring_req->operation = rq_data_dir(req) ?
780 BLKIF_OP_WRITE : BLKIF_OP_READ;
781 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
782
783
784
785
786
787
788
789 if (info->feature_flush && info->feature_fua)
790 ring_req->operation =
791 BLKIF_OP_WRITE_BARRIER;
792 else if (info->feature_flush)
793 ring_req->operation =
794 BLKIF_OP_FLUSH_DISKCACHE;
795 else
796 ring_req->operation = 0;
797 }
798 ring_req->u.rw.nr_segments = num_grant;
799 if (unlikely(require_extra_req)) {
800 extra_id = blkif_ring_get_request(rinfo, req,
801 &extra_ring_req);
802
803
804
805
806 rinfo->shadow[extra_id].num_sg = 0;
807
808 blkif_setup_extra_req(ring_req, extra_ring_req);
809
810
811 rinfo->shadow[extra_id].associated_id = id;
812 rinfo->shadow[id].associated_id = extra_id;
813 }
814 }
815
816 setup.ring_req = ring_req;
817 setup.id = id;
818
819 setup.require_extra_req = require_extra_req;
820 if (unlikely(require_extra_req))
821 setup.extra_ring_req = extra_ring_req;
822
823 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
824 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
825
826 if (setup.need_copy) {
827 setup.bvec_off = sg->offset;
828 setup.bvec_data = kmap_atomic(sg_page(sg));
829 }
830
831 gnttab_foreach_grant_in_range(sg_page(sg),
832 sg->offset,
833 sg->length,
834 blkif_setup_rw_req_grant,
835 &setup);
836
837 if (setup.need_copy)
838 kunmap_atomic(setup.bvec_data);
839 }
840 if (setup.segments)
841 kunmap_atomic(setup.segments);
842
843
844 rinfo->shadow[id].req = *ring_req;
845 if (unlikely(require_extra_req))
846 rinfo->shadow[extra_id].req = *extra_ring_req;
847
848 if (new_persistent_gnts)
849 gnttab_free_grant_references(setup.gref_head);
850
851 return 0;
852}
853
854
855
856
857
858
859
860static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
861{
862 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
863 return 1;
864
865 if (unlikely(req_op(req) == REQ_OP_DISCARD ||
866 req_op(req) == REQ_OP_SECURE_ERASE))
867 return blkif_queue_discard_req(req, rinfo);
868 else
869 return blkif_queue_rw_req(req, rinfo);
870}
871
872static inline void flush_requests(struct blkfront_ring_info *rinfo)
873{
874 int notify;
875
876 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
877
878 if (notify)
879 notify_remote_via_irq(rinfo->irq);
880}
881
882static inline bool blkif_request_flush_invalid(struct request *req,
883 struct blkfront_info *info)
884{
885 return (blk_rq_is_passthrough(req) ||
886 ((req_op(req) == REQ_OP_FLUSH) &&
887 !info->feature_flush) ||
888 ((req->cmd_flags & REQ_FUA) &&
889 !info->feature_fua));
890}
891
892static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
893 const struct blk_mq_queue_data *qd)
894{
895 unsigned long flags;
896 int qid = hctx->queue_num;
897 struct blkfront_info *info = hctx->queue->queuedata;
898 struct blkfront_ring_info *rinfo = NULL;
899
900 rinfo = get_rinfo(info, qid);
901 blk_mq_start_request(qd->rq);
902 spin_lock_irqsave(&rinfo->ring_lock, flags);
903 if (RING_FULL(&rinfo->ring))
904 goto out_busy;
905
906 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
907 goto out_err;
908
909 if (blkif_queue_request(qd->rq, rinfo))
910 goto out_busy;
911
912 flush_requests(rinfo);
913 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
914 return BLK_STS_OK;
915
916out_err:
917 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
918 return BLK_STS_IOERR;
919
920out_busy:
921 blk_mq_stop_hw_queue(hctx);
922 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
923 return BLK_STS_DEV_RESOURCE;
924}
925
926static void blkif_complete_rq(struct request *rq)
927{
928 blk_mq_end_request(rq, blkif_req(rq)->error);
929}
930
931static const struct blk_mq_ops blkfront_mq_ops = {
932 .queue_rq = blkif_queue_rq,
933 .complete = blkif_complete_rq,
934};
935
936static void blkif_set_queue_limits(struct blkfront_info *info)
937{
938 struct request_queue *rq = info->rq;
939 struct gendisk *gd = info->gd;
940 unsigned int segments = info->max_indirect_segments ? :
941 BLKIF_MAX_SEGMENTS_PER_REQUEST;
942
943 blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
944
945 if (info->feature_discard) {
946 blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
947 blk_queue_max_discard_sectors(rq, get_capacity(gd));
948 rq->limits.discard_granularity = info->discard_granularity;
949 rq->limits.discard_alignment = info->discard_alignment;
950 if (info->feature_secdiscard)
951 blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
952 }
953
954
955 blk_queue_logical_block_size(rq, info->sector_size);
956 blk_queue_physical_block_size(rq, info->physical_sector_size);
957 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
958
959
960 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
961 blk_queue_max_segment_size(rq, PAGE_SIZE);
962
963
964 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
965
966
967 blk_queue_dma_alignment(rq, 511);
968}
969
970static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
971 unsigned int physical_sector_size)
972{
973 struct request_queue *rq;
974 struct blkfront_info *info = gd->private_data;
975
976 memset(&info->tag_set, 0, sizeof(info->tag_set));
977 info->tag_set.ops = &blkfront_mq_ops;
978 info->tag_set.nr_hw_queues = info->nr_rings;
979 if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
980
981
982
983
984
985
986 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
987 } else
988 info->tag_set.queue_depth = BLK_RING_SIZE(info);
989 info->tag_set.numa_node = NUMA_NO_NODE;
990 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
991 info->tag_set.cmd_size = sizeof(struct blkif_req);
992 info->tag_set.driver_data = info;
993
994 if (blk_mq_alloc_tag_set(&info->tag_set))
995 return -EINVAL;
996 rq = blk_mq_init_queue(&info->tag_set);
997 if (IS_ERR(rq)) {
998 blk_mq_free_tag_set(&info->tag_set);
999 return PTR_ERR(rq);
1000 }
1001
1002 rq->queuedata = info;
1003 info->rq = gd->queue = rq;
1004 info->gd = gd;
1005 info->sector_size = sector_size;
1006 info->physical_sector_size = physical_sector_size;
1007 blkif_set_queue_limits(info);
1008
1009 return 0;
1010}
1011
1012static const char *flush_info(struct blkfront_info *info)
1013{
1014 if (info->feature_flush && info->feature_fua)
1015 return "barrier: enabled;";
1016 else if (info->feature_flush)
1017 return "flush diskcache: enabled;";
1018 else
1019 return "barrier or flush: disabled;";
1020}
1021
1022static void xlvbd_flush(struct blkfront_info *info)
1023{
1024 blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
1025 info->feature_fua ? true : false);
1026 pr_info("blkfront: %s: %s %s %s %s %s\n",
1027 info->gd->disk_name, flush_info(info),
1028 "persistent grants:", info->feature_persistent ?
1029 "enabled;" : "disabled;", "indirect descriptors:",
1030 info->max_indirect_segments ? "enabled;" : "disabled;");
1031}
1032
1033static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
1034{
1035 int major;
1036 major = BLKIF_MAJOR(vdevice);
1037 *minor = BLKIF_MINOR(vdevice);
1038 switch (major) {
1039 case XEN_IDE0_MAJOR:
1040 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
1041 *minor = ((*minor / 64) * PARTS_PER_DISK) +
1042 EMULATED_HD_DISK_MINOR_OFFSET;
1043 break;
1044 case XEN_IDE1_MAJOR:
1045 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1046 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1047 EMULATED_HD_DISK_MINOR_OFFSET;
1048 break;
1049 case XEN_SCSI_DISK0_MAJOR:
1050 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1051 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1052 break;
1053 case XEN_SCSI_DISK1_MAJOR:
1054 case XEN_SCSI_DISK2_MAJOR:
1055 case XEN_SCSI_DISK3_MAJOR:
1056 case XEN_SCSI_DISK4_MAJOR:
1057 case XEN_SCSI_DISK5_MAJOR:
1058 case XEN_SCSI_DISK6_MAJOR:
1059 case XEN_SCSI_DISK7_MAJOR:
1060 *offset = (*minor / PARTS_PER_DISK) +
1061 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1062 EMULATED_SD_DISK_NAME_OFFSET;
1063 *minor = *minor +
1064 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1065 EMULATED_SD_DISK_MINOR_OFFSET;
1066 break;
1067 case XEN_SCSI_DISK8_MAJOR:
1068 case XEN_SCSI_DISK9_MAJOR:
1069 case XEN_SCSI_DISK10_MAJOR:
1070 case XEN_SCSI_DISK11_MAJOR:
1071 case XEN_SCSI_DISK12_MAJOR:
1072 case XEN_SCSI_DISK13_MAJOR:
1073 case XEN_SCSI_DISK14_MAJOR:
1074 case XEN_SCSI_DISK15_MAJOR:
1075 *offset = (*minor / PARTS_PER_DISK) +
1076 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1077 EMULATED_SD_DISK_NAME_OFFSET;
1078 *minor = *minor +
1079 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1080 EMULATED_SD_DISK_MINOR_OFFSET;
1081 break;
1082 case XENVBD_MAJOR:
1083 *offset = *minor / PARTS_PER_DISK;
1084 break;
1085 default:
1086 printk(KERN_WARNING "blkfront: your disk configuration is "
1087 "incorrect, please use an xvd device instead\n");
1088 return -ENODEV;
1089 }
1090 return 0;
1091}
1092
1093static char *encode_disk_name(char *ptr, unsigned int n)
1094{
1095 if (n >= 26)
1096 ptr = encode_disk_name(ptr, n / 26 - 1);
1097 *ptr = 'a' + n % 26;
1098 return ptr + 1;
1099}
1100
1101static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1102 struct blkfront_info *info,
1103 u16 vdisk_info, u16 sector_size,
1104 unsigned int physical_sector_size)
1105{
1106 struct gendisk *gd;
1107 int nr_minors = 1;
1108 int err;
1109 unsigned int offset;
1110 int minor;
1111 int nr_parts;
1112 char *ptr;
1113
1114 BUG_ON(info->gd != NULL);
1115 BUG_ON(info->rq != NULL);
1116
1117 if ((info->vdevice>>EXT_SHIFT) > 1) {
1118
1119 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1120 return -ENODEV;
1121 }
1122
1123 if (!VDEV_IS_EXTENDED(info->vdevice)) {
1124 err = xen_translate_vdev(info->vdevice, &minor, &offset);
1125 if (err)
1126 return err;
1127 nr_parts = PARTS_PER_DISK;
1128 } else {
1129 minor = BLKIF_MINOR_EXT(info->vdevice);
1130 nr_parts = PARTS_PER_EXT_DISK;
1131 offset = minor / nr_parts;
1132 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1133 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1134 "emulated IDE disks,\n\t choose an xvd device name"
1135 "from xvde on\n", info->vdevice);
1136 }
1137 if (minor >> MINORBITS) {
1138 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1139 info->vdevice, minor);
1140 return -ENODEV;
1141 }
1142
1143 if ((minor % nr_parts) == 0)
1144 nr_minors = nr_parts;
1145
1146 err = xlbd_reserve_minors(minor, nr_minors);
1147 if (err)
1148 goto out;
1149 err = -ENODEV;
1150
1151 gd = alloc_disk(nr_minors);
1152 if (gd == NULL)
1153 goto release;
1154
1155 strcpy(gd->disk_name, DEV_NAME);
1156 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1157 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1158 if (nr_minors > 1)
1159 *ptr = 0;
1160 else
1161 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1162 "%d", minor & (nr_parts - 1));
1163
1164 gd->major = XENVBD_MAJOR;
1165 gd->first_minor = minor;
1166 gd->fops = &xlvbd_block_fops;
1167 gd->private_data = info;
1168 set_capacity(gd, capacity);
1169
1170 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
1171 del_gendisk(gd);
1172 goto release;
1173 }
1174
1175 xlvbd_flush(info);
1176
1177 if (vdisk_info & VDISK_READONLY)
1178 set_disk_ro(gd, 1);
1179
1180 if (vdisk_info & VDISK_REMOVABLE)
1181 gd->flags |= GENHD_FL_REMOVABLE;
1182
1183 if (vdisk_info & VDISK_CDROM)
1184 gd->flags |= GENHD_FL_CD;
1185
1186 return 0;
1187
1188 release:
1189 xlbd_release_minors(minor, nr_minors);
1190 out:
1191 return err;
1192}
1193
1194static void xlvbd_release_gendisk(struct blkfront_info *info)
1195{
1196 unsigned int minor, nr_minors, i;
1197 struct blkfront_ring_info *rinfo;
1198
1199 if (info->rq == NULL)
1200 return;
1201
1202
1203 blk_mq_stop_hw_queues(info->rq);
1204
1205 for_each_rinfo(info, rinfo, i) {
1206
1207 gnttab_cancel_free_callback(&rinfo->callback);
1208
1209
1210 flush_work(&rinfo->work);
1211 }
1212
1213 del_gendisk(info->gd);
1214
1215 minor = info->gd->first_minor;
1216 nr_minors = info->gd->minors;
1217 xlbd_release_minors(minor, nr_minors);
1218
1219 blk_cleanup_queue(info->rq);
1220 blk_mq_free_tag_set(&info->tag_set);
1221 info->rq = NULL;
1222
1223 put_disk(info->gd);
1224 info->gd = NULL;
1225}
1226
1227
1228static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1229{
1230 if (!RING_FULL(&rinfo->ring))
1231 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1232}
1233
1234static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1235{
1236 unsigned long flags;
1237
1238 spin_lock_irqsave(&rinfo->ring_lock, flags);
1239 kick_pending_request_queues_locked(rinfo);
1240 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1241}
1242
1243static void blkif_restart_queue(struct work_struct *work)
1244{
1245 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1246
1247 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1248 kick_pending_request_queues(rinfo);
1249}
1250
1251static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1252{
1253 struct grant *persistent_gnt, *n;
1254 struct blkfront_info *info = rinfo->dev_info;
1255 int i, j, segs;
1256
1257
1258
1259
1260
1261 if (!list_empty(&rinfo->indirect_pages)) {
1262 struct page *indirect_page, *n;
1263
1264 BUG_ON(info->feature_persistent);
1265 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1266 list_del(&indirect_page->lru);
1267 __free_page(indirect_page);
1268 }
1269 }
1270
1271
1272 if (!list_empty(&rinfo->grants)) {
1273 list_for_each_entry_safe(persistent_gnt, n,
1274 &rinfo->grants, node) {
1275 list_del(&persistent_gnt->node);
1276 if (persistent_gnt->gref != GRANT_INVALID_REF) {
1277 gnttab_end_foreign_access(persistent_gnt->gref,
1278 0, 0UL);
1279 rinfo->persistent_gnts_c--;
1280 }
1281 if (info->feature_persistent)
1282 __free_page(persistent_gnt->page);
1283 kfree(persistent_gnt);
1284 }
1285 }
1286 BUG_ON(rinfo->persistent_gnts_c != 0);
1287
1288 for (i = 0; i < BLK_RING_SIZE(info); i++) {
1289
1290
1291
1292
1293 if (!rinfo->shadow[i].request)
1294 goto free_shadow;
1295
1296 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1297 rinfo->shadow[i].req.u.indirect.nr_segments :
1298 rinfo->shadow[i].req.u.rw.nr_segments;
1299 for (j = 0; j < segs; j++) {
1300 persistent_gnt = rinfo->shadow[i].grants_used[j];
1301 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1302 if (info->feature_persistent)
1303 __free_page(persistent_gnt->page);
1304 kfree(persistent_gnt);
1305 }
1306
1307 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1308
1309
1310
1311
1312 goto free_shadow;
1313
1314 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1315 persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1316 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1317 __free_page(persistent_gnt->page);
1318 kfree(persistent_gnt);
1319 }
1320
1321free_shadow:
1322 kvfree(rinfo->shadow[i].grants_used);
1323 rinfo->shadow[i].grants_used = NULL;
1324 kvfree(rinfo->shadow[i].indirect_grants);
1325 rinfo->shadow[i].indirect_grants = NULL;
1326 kvfree(rinfo->shadow[i].sg);
1327 rinfo->shadow[i].sg = NULL;
1328 }
1329
1330
1331 gnttab_cancel_free_callback(&rinfo->callback);
1332
1333
1334 flush_work(&rinfo->work);
1335
1336
1337 for (i = 0; i < info->nr_ring_pages; i++) {
1338 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1339 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1340 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1341 }
1342 }
1343 free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
1344 rinfo->ring.sring = NULL;
1345
1346 if (rinfo->irq)
1347 unbind_from_irqhandler(rinfo->irq, rinfo);
1348 rinfo->evtchn = rinfo->irq = 0;
1349}
1350
1351static void blkif_free(struct blkfront_info *info, int suspend)
1352{
1353 unsigned int i;
1354 struct blkfront_ring_info *rinfo;
1355
1356
1357 info->connected = suspend ?
1358 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1359
1360 if (info->rq)
1361 blk_mq_stop_hw_queues(info->rq);
1362
1363 for_each_rinfo(info, rinfo, i)
1364 blkif_free_ring(rinfo);
1365
1366 kvfree(info->rinfo);
1367 info->rinfo = NULL;
1368 info->nr_rings = 0;
1369}
1370
1371struct copy_from_grant {
1372 const struct blk_shadow *s;
1373 unsigned int grant_idx;
1374 unsigned int bvec_offset;
1375 char *bvec_data;
1376};
1377
1378static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1379 unsigned int len, void *data)
1380{
1381 struct copy_from_grant *info = data;
1382 char *shared_data;
1383
1384 const struct blk_shadow *s = info->s;
1385
1386 shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1387
1388 memcpy(info->bvec_data + info->bvec_offset,
1389 shared_data + offset, len);
1390
1391 info->bvec_offset += len;
1392 info->grant_idx++;
1393
1394 kunmap_atomic(shared_data);
1395}
1396
1397static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1398{
1399 switch (rsp)
1400 {
1401 case BLKIF_RSP_OKAY:
1402 return REQ_DONE;
1403 case BLKIF_RSP_EOPNOTSUPP:
1404 return REQ_EOPNOTSUPP;
1405 case BLKIF_RSP_ERROR:
1406
1407 default:
1408 return REQ_ERROR;
1409 }
1410}
1411
1412
1413
1414
1415static int blkif_get_final_status(enum blk_req_status s1,
1416 enum blk_req_status s2)
1417{
1418 BUG_ON(s1 == REQ_WAITING);
1419 BUG_ON(s2 == REQ_WAITING);
1420
1421 if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1422 return BLKIF_RSP_ERROR;
1423 else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1424 return BLKIF_RSP_EOPNOTSUPP;
1425 return BLKIF_RSP_OKAY;
1426}
1427
1428static bool blkif_completion(unsigned long *id,
1429 struct blkfront_ring_info *rinfo,
1430 struct blkif_response *bret)
1431{
1432 int i = 0;
1433 struct scatterlist *sg;
1434 int num_sg, num_grant;
1435 struct blkfront_info *info = rinfo->dev_info;
1436 struct blk_shadow *s = &rinfo->shadow[*id];
1437 struct copy_from_grant data = {
1438 .grant_idx = 0,
1439 };
1440
1441 num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1442 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1443
1444
1445 if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1446 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1447
1448
1449 s->status = blkif_rsp_to_req_status(bret->status);
1450
1451
1452 if (s2->status == REQ_WAITING)
1453 return false;
1454
1455 bret->status = blkif_get_final_status(s->status,
1456 s2->status);
1457
1458
1459
1460
1461
1462 num_grant += s2->req.u.rw.nr_segments;
1463
1464
1465
1466
1467
1468 if (s2->num_sg != 0) {
1469
1470 *id = s->associated_id;
1471 s = s2;
1472 }
1473
1474
1475
1476
1477
1478 if (add_id_to_freelist(rinfo, s->associated_id))
1479 WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1480 info->gd->disk_name, s->associated_id);
1481 }
1482
1483 data.s = s;
1484 num_sg = s->num_sg;
1485
1486 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1487 for_each_sg(s->sg, sg, num_sg, i) {
1488 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1489
1490 data.bvec_offset = sg->offset;
1491 data.bvec_data = kmap_atomic(sg_page(sg));
1492
1493 gnttab_foreach_grant_in_range(sg_page(sg),
1494 sg->offset,
1495 sg->length,
1496 blkif_copy_from_grant,
1497 &data);
1498
1499 kunmap_atomic(data.bvec_data);
1500 }
1501 }
1502
1503 for (i = 0; i < num_grant; i++) {
1504 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1505
1506
1507
1508
1509
1510
1511 if (!info->feature_persistent)
1512 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1513 s->grants_used[i]->gref);
1514 list_add(&s->grants_used[i]->node, &rinfo->grants);
1515 rinfo->persistent_gnts_c++;
1516 } else {
1517
1518
1519
1520
1521
1522
1523 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1524 s->grants_used[i]->gref = GRANT_INVALID_REF;
1525 list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1526 }
1527 }
1528 if (s->req.operation == BLKIF_OP_INDIRECT) {
1529 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1530 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1531 if (!info->feature_persistent)
1532 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1533 s->indirect_grants[i]->gref);
1534 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1535 rinfo->persistent_gnts_c++;
1536 } else {
1537 struct page *indirect_page;
1538
1539 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1540
1541
1542
1543
1544 if (!info->feature_persistent) {
1545 indirect_page = s->indirect_grants[i]->page;
1546 list_add(&indirect_page->lru, &rinfo->indirect_pages);
1547 }
1548 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1549 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1550 }
1551 }
1552 }
1553
1554 return true;
1555}
1556
1557static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1558{
1559 struct request *req;
1560 struct blkif_response *bret;
1561 RING_IDX i, rp;
1562 unsigned long flags;
1563 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1564 struct blkfront_info *info = rinfo->dev_info;
1565
1566 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
1567 return IRQ_HANDLED;
1568
1569 spin_lock_irqsave(&rinfo->ring_lock, flags);
1570 again:
1571 rp = rinfo->ring.sring->rsp_prod;
1572 rmb();
1573
1574 for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1575 unsigned long id;
1576
1577 bret = RING_GET_RESPONSE(&rinfo->ring, i);
1578 id = bret->id;
1579
1580
1581
1582
1583
1584 if (id >= BLK_RING_SIZE(info)) {
1585 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1586 info->gd->disk_name, op_name(bret->operation), id);
1587
1588
1589 continue;
1590 }
1591 req = rinfo->shadow[id].request;
1592
1593 if (bret->operation != BLKIF_OP_DISCARD) {
1594
1595
1596
1597
1598 if (!blkif_completion(&id, rinfo, bret))
1599 continue;
1600 }
1601
1602 if (add_id_to_freelist(rinfo, id)) {
1603 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1604 info->gd->disk_name, op_name(bret->operation), id);
1605 continue;
1606 }
1607
1608 if (bret->status == BLKIF_RSP_OKAY)
1609 blkif_req(req)->error = BLK_STS_OK;
1610 else
1611 blkif_req(req)->error = BLK_STS_IOERR;
1612
1613 switch (bret->operation) {
1614 case BLKIF_OP_DISCARD:
1615 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1616 struct request_queue *rq = info->rq;
1617 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1618 info->gd->disk_name, op_name(bret->operation));
1619 blkif_req(req)->error = BLK_STS_NOTSUPP;
1620 info->feature_discard = 0;
1621 info->feature_secdiscard = 0;
1622 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1623 blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1624 }
1625 break;
1626 case BLKIF_OP_FLUSH_DISKCACHE:
1627 case BLKIF_OP_WRITE_BARRIER:
1628 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1629 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1630 info->gd->disk_name, op_name(bret->operation));
1631 blkif_req(req)->error = BLK_STS_NOTSUPP;
1632 }
1633 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1634 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1635 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1636 info->gd->disk_name, op_name(bret->operation));
1637 blkif_req(req)->error = BLK_STS_NOTSUPP;
1638 }
1639 if (unlikely(blkif_req(req)->error)) {
1640 if (blkif_req(req)->error == BLK_STS_NOTSUPP)
1641 blkif_req(req)->error = BLK_STS_OK;
1642 info->feature_fua = 0;
1643 info->feature_flush = 0;
1644 xlvbd_flush(info);
1645 }
1646
1647 case BLKIF_OP_READ:
1648 case BLKIF_OP_WRITE:
1649 if (unlikely(bret->status != BLKIF_RSP_OKAY))
1650 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1651 "request: %x\n", bret->status);
1652
1653 break;
1654 default:
1655 BUG();
1656 }
1657
1658 blk_mq_complete_request(req);
1659 }
1660
1661 rinfo->ring.rsp_cons = i;
1662
1663 if (i != rinfo->ring.req_prod_pvt) {
1664 int more_to_do;
1665 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1666 if (more_to_do)
1667 goto again;
1668 } else
1669 rinfo->ring.sring->rsp_event = i + 1;
1670
1671 kick_pending_request_queues_locked(rinfo);
1672
1673 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1674
1675 return IRQ_HANDLED;
1676}
1677
1678
1679static int setup_blkring(struct xenbus_device *dev,
1680 struct blkfront_ring_info *rinfo)
1681{
1682 struct blkif_sring *sring;
1683 int err, i;
1684 struct blkfront_info *info = rinfo->dev_info;
1685 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1686 grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1687
1688 for (i = 0; i < info->nr_ring_pages; i++)
1689 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1690
1691 sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1692 get_order(ring_size));
1693 if (!sring) {
1694 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1695 return -ENOMEM;
1696 }
1697 SHARED_RING_INIT(sring);
1698 FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1699
1700 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1701 if (err < 0) {
1702 free_pages((unsigned long)sring, get_order(ring_size));
1703 rinfo->ring.sring = NULL;
1704 goto fail;
1705 }
1706 for (i = 0; i < info->nr_ring_pages; i++)
1707 rinfo->ring_ref[i] = gref[i];
1708
1709 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1710 if (err)
1711 goto fail;
1712
1713 err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
1714 "blkif", rinfo);
1715 if (err <= 0) {
1716 xenbus_dev_fatal(dev, err,
1717 "bind_evtchn_to_irqhandler failed");
1718 goto fail;
1719 }
1720 rinfo->irq = err;
1721
1722 return 0;
1723fail:
1724 blkif_free(info, 0);
1725 return err;
1726}
1727
1728
1729
1730
1731
1732static int write_per_ring_nodes(struct xenbus_transaction xbt,
1733 struct blkfront_ring_info *rinfo, const char *dir)
1734{
1735 int err;
1736 unsigned int i;
1737 const char *message = NULL;
1738 struct blkfront_info *info = rinfo->dev_info;
1739
1740 if (info->nr_ring_pages == 1) {
1741 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1742 if (err) {
1743 message = "writing ring-ref";
1744 goto abort_transaction;
1745 }
1746 } else {
1747 for (i = 0; i < info->nr_ring_pages; i++) {
1748 char ring_ref_name[RINGREF_NAME_LEN];
1749
1750 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1751 err = xenbus_printf(xbt, dir, ring_ref_name,
1752 "%u", rinfo->ring_ref[i]);
1753 if (err) {
1754 message = "writing ring-ref";
1755 goto abort_transaction;
1756 }
1757 }
1758 }
1759
1760 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1761 if (err) {
1762 message = "writing event-channel";
1763 goto abort_transaction;
1764 }
1765
1766 return 0;
1767
1768abort_transaction:
1769 xenbus_transaction_end(xbt, 1);
1770 if (message)
1771 xenbus_dev_fatal(info->xbdev, err, "%s", message);
1772
1773 return err;
1774}
1775
1776static void free_info(struct blkfront_info *info)
1777{
1778 list_del(&info->info_list);
1779 kfree(info);
1780}
1781
1782
1783static int talk_to_blkback(struct xenbus_device *dev,
1784 struct blkfront_info *info)
1785{
1786 const char *message = NULL;
1787 struct xenbus_transaction xbt;
1788 int err;
1789 unsigned int i, max_page_order;
1790 unsigned int ring_page_order;
1791 struct blkfront_ring_info *rinfo;
1792
1793 if (!info)
1794 return -ENODEV;
1795
1796 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1797 "max-ring-page-order", 0);
1798 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1799 info->nr_ring_pages = 1 << ring_page_order;
1800
1801 err = negotiate_mq(info);
1802 if (err)
1803 goto destroy_blkring;
1804
1805 for_each_rinfo(info, rinfo, i) {
1806
1807 err = setup_blkring(dev, rinfo);
1808 if (err)
1809 goto destroy_blkring;
1810 }
1811
1812again:
1813 err = xenbus_transaction_start(&xbt);
1814 if (err) {
1815 xenbus_dev_fatal(dev, err, "starting transaction");
1816 goto destroy_blkring;
1817 }
1818
1819 if (info->nr_ring_pages > 1) {
1820 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1821 ring_page_order);
1822 if (err) {
1823 message = "writing ring-page-order";
1824 goto abort_transaction;
1825 }
1826 }
1827
1828
1829 if (info->nr_rings == 1) {
1830 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
1831 if (err)
1832 goto destroy_blkring;
1833 } else {
1834 char *path;
1835 size_t pathsize;
1836
1837 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1838 info->nr_rings);
1839 if (err) {
1840 message = "writing multi-queue-num-queues";
1841 goto abort_transaction;
1842 }
1843
1844 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1845 path = kmalloc(pathsize, GFP_KERNEL);
1846 if (!path) {
1847 err = -ENOMEM;
1848 message = "ENOMEM while writing ring references";
1849 goto abort_transaction;
1850 }
1851
1852 for_each_rinfo(info, rinfo, i) {
1853 memset(path, 0, pathsize);
1854 snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1855 err = write_per_ring_nodes(xbt, rinfo, path);
1856 if (err) {
1857 kfree(path);
1858 goto destroy_blkring;
1859 }
1860 }
1861 kfree(path);
1862 }
1863 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1864 XEN_IO_PROTO_ABI_NATIVE);
1865 if (err) {
1866 message = "writing protocol";
1867 goto abort_transaction;
1868 }
1869 err = xenbus_printf(xbt, dev->nodename,
1870 "feature-persistent", "%u", 1);
1871 if (err)
1872 dev_warn(&dev->dev,
1873 "writing persistent grants feature to xenbus");
1874
1875 err = xenbus_transaction_end(xbt, 0);
1876 if (err) {
1877 if (err == -EAGAIN)
1878 goto again;
1879 xenbus_dev_fatal(dev, err, "completing transaction");
1880 goto destroy_blkring;
1881 }
1882
1883 for_each_rinfo(info, rinfo, i) {
1884 unsigned int j;
1885
1886 for (j = 0; j < BLK_RING_SIZE(info); j++)
1887 rinfo->shadow[j].req.u.rw.id = j + 1;
1888 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1889 }
1890 xenbus_switch_state(dev, XenbusStateInitialised);
1891
1892 return 0;
1893
1894 abort_transaction:
1895 xenbus_transaction_end(xbt, 1);
1896 if (message)
1897 xenbus_dev_fatal(dev, err, "%s", message);
1898 destroy_blkring:
1899 blkif_free(info, 0);
1900
1901 mutex_lock(&blkfront_mutex);
1902 free_info(info);
1903 mutex_unlock(&blkfront_mutex);
1904
1905 dev_set_drvdata(&dev->dev, NULL);
1906
1907 return err;
1908}
1909
1910static int negotiate_mq(struct blkfront_info *info)
1911{
1912 unsigned int backend_max_queues;
1913 unsigned int i;
1914 struct blkfront_ring_info *rinfo;
1915
1916 BUG_ON(info->nr_rings);
1917
1918
1919 backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1920 "multi-queue-max-queues", 1);
1921 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1922
1923 if (!info->nr_rings)
1924 info->nr_rings = 1;
1925
1926 info->rinfo_size = struct_size(info->rinfo, shadow,
1927 BLK_RING_SIZE(info));
1928 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
1929 if (!info->rinfo) {
1930 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1931 info->nr_rings = 0;
1932 return -ENOMEM;
1933 }
1934
1935 for_each_rinfo(info, rinfo, i) {
1936 INIT_LIST_HEAD(&rinfo->indirect_pages);
1937 INIT_LIST_HEAD(&rinfo->grants);
1938 rinfo->dev_info = info;
1939 INIT_WORK(&rinfo->work, blkif_restart_queue);
1940 spin_lock_init(&rinfo->ring_lock);
1941 }
1942 return 0;
1943}
1944
1945
1946
1947
1948
1949
1950static int blkfront_probe(struct xenbus_device *dev,
1951 const struct xenbus_device_id *id)
1952{
1953 int err, vdevice;
1954 struct blkfront_info *info;
1955
1956
1957 err = xenbus_scanf(XBT_NIL, dev->nodename,
1958 "virtual-device", "%i", &vdevice);
1959 if (err != 1) {
1960
1961 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1962 "%i", &vdevice);
1963 if (err != 1) {
1964 xenbus_dev_fatal(dev, err, "reading virtual-device");
1965 return err;
1966 }
1967 }
1968
1969 if (xen_hvm_domain()) {
1970 char *type;
1971 int len;
1972
1973 if (xen_has_pv_and_legacy_disk_devices()) {
1974 int major;
1975
1976 if (!VDEV_IS_EXTENDED(vdevice))
1977 major = BLKIF_MAJOR(vdevice);
1978 else
1979 major = XENVBD_MAJOR;
1980
1981 if (major != XENVBD_MAJOR) {
1982 printk(KERN_INFO
1983 "%s: HVM does not support vbd %d as xen block device\n",
1984 __func__, vdevice);
1985 return -ENODEV;
1986 }
1987 }
1988
1989 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1990 if (IS_ERR(type))
1991 return -ENODEV;
1992 if (strncmp(type, "cdrom", 5) == 0) {
1993 kfree(type);
1994 return -ENODEV;
1995 }
1996 kfree(type);
1997 }
1998 info = kzalloc(sizeof(*info), GFP_KERNEL);
1999 if (!info) {
2000 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
2001 return -ENOMEM;
2002 }
2003
2004 info->xbdev = dev;
2005
2006 mutex_init(&info->mutex);
2007 info->vdevice = vdevice;
2008 info->connected = BLKIF_STATE_DISCONNECTED;
2009
2010
2011 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
2012 dev_set_drvdata(&dev->dev, info);
2013
2014 mutex_lock(&blkfront_mutex);
2015 list_add(&info->info_list, &info_list);
2016 mutex_unlock(&blkfront_mutex);
2017
2018 return 0;
2019}
2020
2021static int blkif_recover(struct blkfront_info *info)
2022{
2023 unsigned int r_index;
2024 struct request *req, *n;
2025 int rc;
2026 struct bio *bio;
2027 unsigned int segs;
2028 struct blkfront_ring_info *rinfo;
2029
2030 blkfront_gather_backend_features(info);
2031
2032 blkif_set_queue_limits(info);
2033 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2034 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2035
2036 for_each_rinfo(info, rinfo, r_index) {
2037 rc = blkfront_setup_indirect(rinfo);
2038 if (rc)
2039 return rc;
2040 }
2041 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2042
2043
2044 info->connected = BLKIF_STATE_CONNECTED;
2045
2046 for_each_rinfo(info, rinfo, r_index) {
2047
2048 kick_pending_request_queues(rinfo);
2049 }
2050
2051 list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2052
2053 list_del_init(&req->queuelist);
2054 BUG_ON(req->nr_phys_segments > segs);
2055 blk_mq_requeue_request(req, false);
2056 }
2057 blk_mq_start_stopped_hw_queues(info->rq, true);
2058 blk_mq_kick_requeue_list(info->rq);
2059
2060 while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2061
2062 submit_bio(bio);
2063 }
2064
2065 return 0;
2066}
2067
2068
2069
2070
2071
2072
2073
2074static int blkfront_resume(struct xenbus_device *dev)
2075{
2076 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2077 int err = 0;
2078 unsigned int i, j;
2079 struct blkfront_ring_info *rinfo;
2080
2081 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2082
2083 bio_list_init(&info->bio_list);
2084 INIT_LIST_HEAD(&info->requests);
2085 for_each_rinfo(info, rinfo, i) {
2086 struct bio_list merge_bio;
2087 struct blk_shadow *shadow = rinfo->shadow;
2088
2089 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2090
2091 if (!shadow[j].request)
2092 continue;
2093
2094
2095
2096
2097 if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2098 req_op(shadow[j].request) == REQ_OP_DISCARD ||
2099 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2100 shadow[j].request->cmd_flags & REQ_FUA) {
2101
2102
2103
2104
2105
2106
2107
2108 list_add(&shadow[j].request->queuelist, &info->requests);
2109 continue;
2110 }
2111 merge_bio.head = shadow[j].request->bio;
2112 merge_bio.tail = shadow[j].request->biotail;
2113 bio_list_merge(&info->bio_list, &merge_bio);
2114 shadow[j].request->bio = NULL;
2115 blk_mq_end_request(shadow[j].request, BLK_STS_OK);
2116 }
2117 }
2118
2119 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2120
2121 err = talk_to_blkback(dev, info);
2122 if (!err)
2123 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2124
2125
2126
2127
2128
2129
2130
2131 return err;
2132}
2133
2134static void blkfront_closing(struct blkfront_info *info)
2135{
2136 struct xenbus_device *xbdev = info->xbdev;
2137 struct block_device *bdev = NULL;
2138
2139 mutex_lock(&info->mutex);
2140
2141 if (xbdev->state == XenbusStateClosing) {
2142 mutex_unlock(&info->mutex);
2143 return;
2144 }
2145
2146 if (info->gd)
2147 bdev = bdget_disk(info->gd, 0);
2148
2149 mutex_unlock(&info->mutex);
2150
2151 if (!bdev) {
2152 xenbus_frontend_closed(xbdev);
2153 return;
2154 }
2155
2156 mutex_lock(&bdev->bd_mutex);
2157
2158 if (bdev->bd_openers) {
2159 xenbus_dev_error(xbdev, -EBUSY,
2160 "Device in use; refusing to close");
2161 xenbus_switch_state(xbdev, XenbusStateClosing);
2162 } else {
2163 xlvbd_release_gendisk(info);
2164 xenbus_frontend_closed(xbdev);
2165 }
2166
2167 mutex_unlock(&bdev->bd_mutex);
2168 bdput(bdev);
2169}
2170
2171static void blkfront_setup_discard(struct blkfront_info *info)
2172{
2173 int err;
2174 unsigned int discard_granularity;
2175 unsigned int discard_alignment;
2176
2177 info->feature_discard = 1;
2178 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2179 "discard-granularity", "%u", &discard_granularity,
2180 "discard-alignment", "%u", &discard_alignment,
2181 NULL);
2182 if (!err) {
2183 info->discard_granularity = discard_granularity;
2184 info->discard_alignment = discard_alignment;
2185 }
2186 info->feature_secdiscard =
2187 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
2188 0);
2189}
2190
2191static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2192{
2193 unsigned int psegs, grants, memflags;
2194 int err, i;
2195 struct blkfront_info *info = rinfo->dev_info;
2196
2197 memflags = memalloc_noio_save();
2198
2199 if (info->max_indirect_segments == 0) {
2200 if (!HAS_EXTRA_REQ)
2201 grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2202 else {
2203
2204
2205
2206
2207
2208 grants = GRANTS_PER_PSEG;
2209 }
2210 }
2211 else
2212 grants = info->max_indirect_segments;
2213 psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2214
2215 err = fill_grant_buffer(rinfo,
2216 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2217 if (err)
2218 goto out_of_memory;
2219
2220 if (!info->feature_persistent && info->max_indirect_segments) {
2221
2222
2223
2224
2225
2226 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2227
2228 BUG_ON(!list_empty(&rinfo->indirect_pages));
2229 for (i = 0; i < num; i++) {
2230 struct page *indirect_page = alloc_page(GFP_KERNEL);
2231 if (!indirect_page)
2232 goto out_of_memory;
2233 list_add(&indirect_page->lru, &rinfo->indirect_pages);
2234 }
2235 }
2236
2237 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2238 rinfo->shadow[i].grants_used =
2239 kvcalloc(grants,
2240 sizeof(rinfo->shadow[i].grants_used[0]),
2241 GFP_KERNEL);
2242 rinfo->shadow[i].sg = kvcalloc(psegs,
2243 sizeof(rinfo->shadow[i].sg[0]),
2244 GFP_KERNEL);
2245 if (info->max_indirect_segments)
2246 rinfo->shadow[i].indirect_grants =
2247 kvcalloc(INDIRECT_GREFS(grants),
2248 sizeof(rinfo->shadow[i].indirect_grants[0]),
2249 GFP_KERNEL);
2250 if ((rinfo->shadow[i].grants_used == NULL) ||
2251 (rinfo->shadow[i].sg == NULL) ||
2252 (info->max_indirect_segments &&
2253 (rinfo->shadow[i].indirect_grants == NULL)))
2254 goto out_of_memory;
2255 sg_init_table(rinfo->shadow[i].sg, psegs);
2256 }
2257
2258 memalloc_noio_restore(memflags);
2259
2260 return 0;
2261
2262out_of_memory:
2263 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2264 kvfree(rinfo->shadow[i].grants_used);
2265 rinfo->shadow[i].grants_used = NULL;
2266 kvfree(rinfo->shadow[i].sg);
2267 rinfo->shadow[i].sg = NULL;
2268 kvfree(rinfo->shadow[i].indirect_grants);
2269 rinfo->shadow[i].indirect_grants = NULL;
2270 }
2271 if (!list_empty(&rinfo->indirect_pages)) {
2272 struct page *indirect_page, *n;
2273 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2274 list_del(&indirect_page->lru);
2275 __free_page(indirect_page);
2276 }
2277 }
2278
2279 memalloc_noio_restore(memflags);
2280
2281 return -ENOMEM;
2282}
2283
2284
2285
2286
2287static void blkfront_gather_backend_features(struct blkfront_info *info)
2288{
2289 unsigned int indirect_segments;
2290
2291 info->feature_flush = 0;
2292 info->feature_fua = 0;
2293
2294
2295
2296
2297
2298
2299
2300
2301 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
2302 info->feature_flush = 1;
2303 info->feature_fua = 1;
2304 }
2305
2306
2307
2308
2309
2310 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
2311 0)) {
2312 info->feature_flush = 1;
2313 info->feature_fua = 0;
2314 }
2315
2316 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
2317 blkfront_setup_discard(info);
2318
2319 info->feature_persistent =
2320 !!xenbus_read_unsigned(info->xbdev->otherend,
2321 "feature-persistent", 0);
2322
2323 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2324 "feature-max-indirect-segments", 0);
2325 if (indirect_segments > xen_blkif_max_segments)
2326 indirect_segments = xen_blkif_max_segments;
2327 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2328 indirect_segments = 0;
2329 info->max_indirect_segments = indirect_segments;
2330
2331 if (info->feature_persistent) {
2332 mutex_lock(&blkfront_mutex);
2333 schedule_delayed_work(&blkfront_work, HZ * 10);
2334 mutex_unlock(&blkfront_mutex);
2335 }
2336}
2337
2338
2339
2340
2341
2342static void blkfront_connect(struct blkfront_info *info)
2343{
2344 unsigned long long sectors;
2345 unsigned long sector_size;
2346 unsigned int physical_sector_size;
2347 unsigned int binfo;
2348 int err, i;
2349 struct blkfront_ring_info *rinfo;
2350
2351 switch (info->connected) {
2352 case BLKIF_STATE_CONNECTED:
2353
2354
2355
2356
2357 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2358 "sectors", "%Lu", §ors);
2359 if (XENBUS_EXIST_ERR(err))
2360 return;
2361 printk(KERN_INFO "Setting capacity to %Lu\n",
2362 sectors);
2363 set_capacity_revalidate_and_notify(info->gd, sectors, true);
2364
2365 return;
2366 case BLKIF_STATE_SUSPENDED:
2367
2368
2369
2370
2371
2372
2373 blkif_recover(info);
2374 return;
2375
2376 default:
2377 break;
2378 }
2379
2380 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2381 __func__, info->xbdev->otherend);
2382
2383 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2384 "sectors", "%llu", §ors,
2385 "info", "%u", &binfo,
2386 "sector-size", "%lu", §or_size,
2387 NULL);
2388 if (err) {
2389 xenbus_dev_fatal(info->xbdev, err,
2390 "reading backend fields at %s",
2391 info->xbdev->otherend);
2392 return;
2393 }
2394
2395
2396
2397
2398
2399
2400 physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
2401 "physical-sector-size",
2402 sector_size);
2403 blkfront_gather_backend_features(info);
2404 for_each_rinfo(info, rinfo, i) {
2405 err = blkfront_setup_indirect(rinfo);
2406 if (err) {
2407 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2408 info->xbdev->otherend);
2409 blkif_free(info, 0);
2410 break;
2411 }
2412 }
2413
2414 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
2415 physical_sector_size);
2416 if (err) {
2417 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2418 info->xbdev->otherend);
2419 goto fail;
2420 }
2421
2422 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2423
2424
2425 info->connected = BLKIF_STATE_CONNECTED;
2426 for_each_rinfo(info, rinfo, i)
2427 kick_pending_request_queues(rinfo);
2428
2429 device_add_disk(&info->xbdev->dev, info->gd, NULL);
2430
2431 info->is_ready = 1;
2432 return;
2433
2434fail:
2435 blkif_free(info, 0);
2436 return;
2437}
2438
2439
2440
2441
2442static void blkback_changed(struct xenbus_device *dev,
2443 enum xenbus_state backend_state)
2444{
2445 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2446
2447 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2448
2449 switch (backend_state) {
2450 case XenbusStateInitWait:
2451 if (dev->state != XenbusStateInitialising)
2452 break;
2453 if (talk_to_blkback(dev, info))
2454 break;
2455 case XenbusStateInitialising:
2456 case XenbusStateInitialised:
2457 case XenbusStateReconfiguring:
2458 case XenbusStateReconfigured:
2459 case XenbusStateUnknown:
2460 break;
2461
2462 case XenbusStateConnected:
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474 if ((dev->state != XenbusStateInitialised) &&
2475 (dev->state != XenbusStateConnected)) {
2476 if (talk_to_blkback(dev, info))
2477 break;
2478 }
2479
2480 blkfront_connect(info);
2481 break;
2482
2483 case XenbusStateClosed:
2484 if (dev->state == XenbusStateClosed)
2485 break;
2486
2487 case XenbusStateClosing:
2488 if (info)
2489 blkfront_closing(info);
2490 break;
2491 }
2492}
2493
2494static int blkfront_remove(struct xenbus_device *xbdev)
2495{
2496 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2497 struct block_device *bdev = NULL;
2498 struct gendisk *disk;
2499
2500 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2501
2502 if (!info)
2503 return 0;
2504
2505 blkif_free(info, 0);
2506
2507 mutex_lock(&info->mutex);
2508
2509 disk = info->gd;
2510 if (disk)
2511 bdev = bdget_disk(disk, 0);
2512
2513 info->xbdev = NULL;
2514 mutex_unlock(&info->mutex);
2515
2516 if (!bdev) {
2517 mutex_lock(&blkfront_mutex);
2518 free_info(info);
2519 mutex_unlock(&blkfront_mutex);
2520 return 0;
2521 }
2522
2523
2524
2525
2526
2527
2528
2529 mutex_lock(&bdev->bd_mutex);
2530 info = disk->private_data;
2531
2532 dev_warn(disk_to_dev(disk),
2533 "%s was hot-unplugged, %d stale handles\n",
2534 xbdev->nodename, bdev->bd_openers);
2535
2536 if (info && !bdev->bd_openers) {
2537 xlvbd_release_gendisk(info);
2538 disk->private_data = NULL;
2539 mutex_lock(&blkfront_mutex);
2540 free_info(info);
2541 mutex_unlock(&blkfront_mutex);
2542 }
2543
2544 mutex_unlock(&bdev->bd_mutex);
2545 bdput(bdev);
2546
2547 return 0;
2548}
2549
2550static int blkfront_is_ready(struct xenbus_device *dev)
2551{
2552 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2553
2554 return info->is_ready && info->xbdev;
2555}
2556
2557static int blkif_open(struct block_device *bdev, fmode_t mode)
2558{
2559 struct gendisk *disk = bdev->bd_disk;
2560 struct blkfront_info *info;
2561 int err = 0;
2562
2563 mutex_lock(&blkfront_mutex);
2564
2565 info = disk->private_data;
2566 if (!info) {
2567
2568 err = -ERESTARTSYS;
2569 goto out;
2570 }
2571
2572 mutex_lock(&info->mutex);
2573
2574 if (!info->gd)
2575
2576 err = -ERESTARTSYS;
2577
2578 mutex_unlock(&info->mutex);
2579
2580out:
2581 mutex_unlock(&blkfront_mutex);
2582 return err;
2583}
2584
2585static void blkif_release(struct gendisk *disk, fmode_t mode)
2586{
2587 struct blkfront_info *info = disk->private_data;
2588 struct block_device *bdev;
2589 struct xenbus_device *xbdev;
2590
2591 mutex_lock(&blkfront_mutex);
2592
2593 bdev = bdget_disk(disk, 0);
2594
2595 if (!bdev) {
2596 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2597 goto out_mutex;
2598 }
2599 if (bdev->bd_openers)
2600 goto out;
2601
2602
2603
2604
2605
2606
2607 mutex_lock(&info->mutex);
2608 xbdev = info->xbdev;
2609
2610 if (xbdev && xbdev->state == XenbusStateClosing) {
2611
2612 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2613 xlvbd_release_gendisk(info);
2614 xenbus_frontend_closed(info->xbdev);
2615 }
2616
2617 mutex_unlock(&info->mutex);
2618
2619 if (!xbdev) {
2620
2621 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2622 xlvbd_release_gendisk(info);
2623 disk->private_data = NULL;
2624 free_info(info);
2625 }
2626
2627out:
2628 bdput(bdev);
2629out_mutex:
2630 mutex_unlock(&blkfront_mutex);
2631}
2632
2633static const struct block_device_operations xlvbd_block_fops =
2634{
2635 .owner = THIS_MODULE,
2636 .open = blkif_open,
2637 .release = blkif_release,
2638 .getgeo = blkif_getgeo,
2639 .ioctl = blkif_ioctl,
2640 .compat_ioctl = blkdev_compat_ptr_ioctl,
2641};
2642
2643
2644static const struct xenbus_device_id blkfront_ids[] = {
2645 { "vbd" },
2646 { "" }
2647};
2648
2649static struct xenbus_driver blkfront_driver = {
2650 .ids = blkfront_ids,
2651 .probe = blkfront_probe,
2652 .remove = blkfront_remove,
2653 .resume = blkfront_resume,
2654 .otherend_changed = blkback_changed,
2655 .is_ready = blkfront_is_ready,
2656};
2657
2658static void purge_persistent_grants(struct blkfront_info *info)
2659{
2660 unsigned int i;
2661 unsigned long flags;
2662 struct blkfront_ring_info *rinfo;
2663
2664 for_each_rinfo(info, rinfo, i) {
2665 struct grant *gnt_list_entry, *tmp;
2666
2667 spin_lock_irqsave(&rinfo->ring_lock, flags);
2668
2669 if (rinfo->persistent_gnts_c == 0) {
2670 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2671 continue;
2672 }
2673
2674 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2675 node) {
2676 if (gnt_list_entry->gref == GRANT_INVALID_REF ||
2677 gnttab_query_foreign_access(gnt_list_entry->gref))
2678 continue;
2679
2680 list_del(&gnt_list_entry->node);
2681 gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
2682 rinfo->persistent_gnts_c--;
2683 gnt_list_entry->gref = GRANT_INVALID_REF;
2684 list_add_tail(&gnt_list_entry->node, &rinfo->grants);
2685 }
2686
2687 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2688 }
2689}
2690
2691static void blkfront_delay_work(struct work_struct *work)
2692{
2693 struct blkfront_info *info;
2694 bool need_schedule_work = false;
2695
2696 mutex_lock(&blkfront_mutex);
2697
2698 list_for_each_entry(info, &info_list, info_list) {
2699 if (info->feature_persistent) {
2700 need_schedule_work = true;
2701 mutex_lock(&info->mutex);
2702 purge_persistent_grants(info);
2703 mutex_unlock(&info->mutex);
2704 }
2705 }
2706
2707 if (need_schedule_work)
2708 schedule_delayed_work(&blkfront_work, HZ * 10);
2709
2710 mutex_unlock(&blkfront_mutex);
2711}
2712
2713static int __init xlblk_init(void)
2714{
2715 int ret;
2716 int nr_cpus = num_online_cpus();
2717
2718 if (!xen_domain())
2719 return -ENODEV;
2720
2721 if (!xen_has_pv_disk_devices())
2722 return -ENODEV;
2723
2724 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2725 pr_warn("xen_blk: can't get major %d with name %s\n",
2726 XENVBD_MAJOR, DEV_NAME);
2727 return -ENODEV;
2728 }
2729
2730 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2731 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2732
2733 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2734 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2735 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2736 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2737 }
2738
2739 if (xen_blkif_max_queues > nr_cpus) {
2740 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2741 xen_blkif_max_queues, nr_cpus);
2742 xen_blkif_max_queues = nr_cpus;
2743 }
2744
2745 INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
2746
2747 ret = xenbus_register_frontend(&blkfront_driver);
2748 if (ret) {
2749 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2750 return ret;
2751 }
2752
2753 return 0;
2754}
2755module_init(xlblk_init);
2756
2757
2758static void __exit xlblk_exit(void)
2759{
2760 cancel_delayed_work_sync(&blkfront_work);
2761
2762 xenbus_unregister_driver(&blkfront_driver);
2763 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2764 kfree(minors);
2765}
2766module_exit(xlblk_exit);
2767
2768MODULE_DESCRIPTION("Xen virtual block device frontend");
2769MODULE_LICENSE("GPL");
2770MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2771MODULE_ALIAS("xen:vbd");
2772MODULE_ALIAS("xenblk");
2773