1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <linux/interrupt.h>
39#include <linux/blkdev.h>
40#include <linux/blk-mq.h>
41#include <linux/hdreg.h>
42#include <linux/cdrom.h>
43#include <linux/module.h>
44#include <linux/slab.h>
45#include <linux/major.h>
46#include <linux/mutex.h>
47#include <linux/scatterlist.h>
48#include <linux/bitmap.h>
49#include <linux/list.h>
50#include <linux/workqueue.h>
51#include <linux/sched/mm.h>
52
53#include <xen/xen.h>
54#include <xen/xenbus.h>
55#include <xen/grant_table.h>
56#include <xen/events.h>
57#include <xen/page.h>
58#include <xen/platform_pci.h>
59
60#include <xen/interface/grant_table.h>
61#include <xen/interface/io/blkif.h>
62#include <xen/interface/io/protocols.h>
63
64#include <asm/xen/hypervisor.h>
65
66
67
68
69
70
71
72
73
74
75
76
77
78#define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
79
80enum blkif_state {
81 BLKIF_STATE_DISCONNECTED,
82 BLKIF_STATE_CONNECTED,
83 BLKIF_STATE_SUSPENDED,
84 BLKIF_STATE_ERROR,
85};
86
87struct grant {
88 grant_ref_t gref;
89 struct page *page;
90 struct list_head node;
91};
92
93enum blk_req_status {
94 REQ_PROCESSING,
95 REQ_WAITING,
96 REQ_DONE,
97 REQ_ERROR,
98 REQ_EOPNOTSUPP,
99};
100
101struct blk_shadow {
102 struct blkif_request req;
103 struct request *request;
104 struct grant **grants_used;
105 struct grant **indirect_grants;
106 struct scatterlist *sg;
107 unsigned int num_sg;
108 enum blk_req_status status;
109
110 #define NO_ASSOCIATED_ID ~0UL
111
112
113
114
115 unsigned long associated_id;
116};
117
118struct blkif_req {
119 blk_status_t error;
120};
121
122static inline struct blkif_req *blkif_req(struct request *rq)
123{
124 return blk_mq_rq_to_pdu(rq);
125}
126
127static DEFINE_MUTEX(blkfront_mutex);
128static const struct block_device_operations xlvbd_block_fops;
129static struct delayed_work blkfront_work;
130static LIST_HEAD(info_list);
131
132
133
134
135
136
137
138static unsigned int xen_blkif_max_segments = 32;
139module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
140MODULE_PARM_DESC(max_indirect_segments,
141 "Maximum amount of segments in indirect requests (default is 32)");
142
143static unsigned int xen_blkif_max_queues = 4;
144module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
145MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
146
147
148
149
150
151static unsigned int xen_blkif_max_ring_order;
152module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
153MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
154
155#define BLK_RING_SIZE(info) \
156 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
157
158
159
160
161
162#define RINGREF_NAME_LEN (20)
163
164
165
166#define QUEUE_NAME_LEN (17)
167
168
169
170
171
172
173struct blkfront_ring_info {
174
175 spinlock_t ring_lock;
176 struct blkif_front_ring ring;
177 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
178 unsigned int evtchn, irq;
179 struct work_struct work;
180 struct gnttab_free_callback callback;
181 struct list_head indirect_pages;
182 struct list_head grants;
183 unsigned int persistent_gnts_c;
184 unsigned long shadow_free;
185 struct blkfront_info *dev_info;
186 struct blk_shadow shadow[];
187};
188
189
190
191
192
193
194struct blkfront_info
195{
196 struct mutex mutex;
197 struct xenbus_device *xbdev;
198 struct gendisk *gd;
199 u16 sector_size;
200 unsigned int physical_sector_size;
201 int vdevice;
202 blkif_vdev_t handle;
203 enum blkif_state connected;
204
205 unsigned int nr_ring_pages;
206 struct request_queue *rq;
207 unsigned int feature_flush:1;
208 unsigned int feature_fua:1;
209 unsigned int feature_discard:1;
210 unsigned int feature_secdiscard:1;
211 unsigned int feature_persistent:1;
212 unsigned int discard_granularity;
213 unsigned int discard_alignment;
214
215 unsigned int max_indirect_segments;
216 int is_ready;
217 struct blk_mq_tag_set tag_set;
218 struct blkfront_ring_info *rinfo;
219 unsigned int nr_rings;
220 unsigned int rinfo_size;
221
222 struct list_head requests;
223 struct bio_list bio_list;
224 struct list_head info_list;
225};
226
227static unsigned int nr_minors;
228static unsigned long *minors;
229static DEFINE_SPINLOCK(minor_lock);
230
231#define GRANT_INVALID_REF 0
232
233#define PARTS_PER_DISK 16
234#define PARTS_PER_EXT_DISK 256
235
236#define BLKIF_MAJOR(dev) ((dev)>>8)
237#define BLKIF_MINOR(dev) ((dev) & 0xff)
238
239#define EXT_SHIFT 28
240#define EXTENDED (1<<EXT_SHIFT)
241#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
242#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
243#define EMULATED_HD_DISK_MINOR_OFFSET (0)
244#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
245#define EMULATED_SD_DISK_MINOR_OFFSET (0)
246#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
247
248#define DEV_NAME "xvd"
249
250
251
252
253
254
255#define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
256
257#define GRANTS_PER_INDIRECT_FRAME \
258 (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
259
260#define INDIRECT_GREFS(_grants) \
261 DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
262
263static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
264static void blkfront_gather_backend_features(struct blkfront_info *info);
265static int negotiate_mq(struct blkfront_info *info);
266
267#define for_each_rinfo(info, ptr, idx) \
268 for ((ptr) = (info)->rinfo, (idx) = 0; \
269 (idx) < (info)->nr_rings; \
270 (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
271
272static inline struct blkfront_ring_info *
273get_rinfo(const struct blkfront_info *info, unsigned int i)
274{
275 BUG_ON(i >= info->nr_rings);
276 return (void *)info->rinfo + i * info->rinfo_size;
277}
278
279static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
280{
281 unsigned long free = rinfo->shadow_free;
282
283 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
284 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
285 rinfo->shadow[free].req.u.rw.id = 0x0fffffee;
286 return free;
287}
288
289static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
290 unsigned long id)
291{
292 if (rinfo->shadow[id].req.u.rw.id != id)
293 return -EINVAL;
294 if (rinfo->shadow[id].request == NULL)
295 return -EINVAL;
296 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
297 rinfo->shadow[id].request = NULL;
298 rinfo->shadow_free = id;
299 return 0;
300}
301
302static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
303{
304 struct blkfront_info *info = rinfo->dev_info;
305 struct page *granted_page;
306 struct grant *gnt_list_entry, *n;
307 int i = 0;
308
309 while (i < num) {
310 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
311 if (!gnt_list_entry)
312 goto out_of_memory;
313
314 if (info->feature_persistent) {
315 granted_page = alloc_page(GFP_NOIO);
316 if (!granted_page) {
317 kfree(gnt_list_entry);
318 goto out_of_memory;
319 }
320 gnt_list_entry->page = granted_page;
321 }
322
323 gnt_list_entry->gref = GRANT_INVALID_REF;
324 list_add(&gnt_list_entry->node, &rinfo->grants);
325 i++;
326 }
327
328 return 0;
329
330out_of_memory:
331 list_for_each_entry_safe(gnt_list_entry, n,
332 &rinfo->grants, node) {
333 list_del(&gnt_list_entry->node);
334 if (info->feature_persistent)
335 __free_page(gnt_list_entry->page);
336 kfree(gnt_list_entry);
337 i--;
338 }
339 BUG_ON(i != 0);
340 return -ENOMEM;
341}
342
343static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
344{
345 struct grant *gnt_list_entry;
346
347 BUG_ON(list_empty(&rinfo->grants));
348 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
349 node);
350 list_del(&gnt_list_entry->node);
351
352 if (gnt_list_entry->gref != GRANT_INVALID_REF)
353 rinfo->persistent_gnts_c--;
354
355 return gnt_list_entry;
356}
357
358static inline void grant_foreign_access(const struct grant *gnt_list_entry,
359 const struct blkfront_info *info)
360{
361 gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
362 info->xbdev->otherend_id,
363 gnt_list_entry->page,
364 0);
365}
366
367static struct grant *get_grant(grant_ref_t *gref_head,
368 unsigned long gfn,
369 struct blkfront_ring_info *rinfo)
370{
371 struct grant *gnt_list_entry = get_free_grant(rinfo);
372 struct blkfront_info *info = rinfo->dev_info;
373
374 if (gnt_list_entry->gref != GRANT_INVALID_REF)
375 return gnt_list_entry;
376
377
378 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
379 BUG_ON(gnt_list_entry->gref == -ENOSPC);
380 if (info->feature_persistent)
381 grant_foreign_access(gnt_list_entry, info);
382 else {
383
384 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
385 info->xbdev->otherend_id,
386 gfn, 0);
387 }
388
389 return gnt_list_entry;
390}
391
392static struct grant *get_indirect_grant(grant_ref_t *gref_head,
393 struct blkfront_ring_info *rinfo)
394{
395 struct grant *gnt_list_entry = get_free_grant(rinfo);
396 struct blkfront_info *info = rinfo->dev_info;
397
398 if (gnt_list_entry->gref != GRANT_INVALID_REF)
399 return gnt_list_entry;
400
401
402 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
403 BUG_ON(gnt_list_entry->gref == -ENOSPC);
404 if (!info->feature_persistent) {
405 struct page *indirect_page;
406
407
408 BUG_ON(list_empty(&rinfo->indirect_pages));
409 indirect_page = list_first_entry(&rinfo->indirect_pages,
410 struct page, lru);
411 list_del(&indirect_page->lru);
412 gnt_list_entry->page = indirect_page;
413 }
414 grant_foreign_access(gnt_list_entry, info);
415
416 return gnt_list_entry;
417}
418
419static const char *op_name(int op)
420{
421 static const char *const names[] = {
422 [BLKIF_OP_READ] = "read",
423 [BLKIF_OP_WRITE] = "write",
424 [BLKIF_OP_WRITE_BARRIER] = "barrier",
425 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
426 [BLKIF_OP_DISCARD] = "discard" };
427
428 if (op < 0 || op >= ARRAY_SIZE(names))
429 return "unknown";
430
431 if (!names[op])
432 return "reserved";
433
434 return names[op];
435}
436static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
437{
438 unsigned int end = minor + nr;
439 int rc;
440
441 if (end > nr_minors) {
442 unsigned long *bitmap, *old;
443
444 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
445 GFP_KERNEL);
446 if (bitmap == NULL)
447 return -ENOMEM;
448
449 spin_lock(&minor_lock);
450 if (end > nr_minors) {
451 old = minors;
452 memcpy(bitmap, minors,
453 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
454 minors = bitmap;
455 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
456 } else
457 old = bitmap;
458 spin_unlock(&minor_lock);
459 kfree(old);
460 }
461
462 spin_lock(&minor_lock);
463 if (find_next_bit(minors, end, minor) >= end) {
464 bitmap_set(minors, minor, nr);
465 rc = 0;
466 } else
467 rc = -EBUSY;
468 spin_unlock(&minor_lock);
469
470 return rc;
471}
472
473static void xlbd_release_minors(unsigned int minor, unsigned int nr)
474{
475 unsigned int end = minor + nr;
476
477 BUG_ON(end > nr_minors);
478 spin_lock(&minor_lock);
479 bitmap_clear(minors, minor, nr);
480 spin_unlock(&minor_lock);
481}
482
483static void blkif_restart_queue_callback(void *arg)
484{
485 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
486 schedule_work(&rinfo->work);
487}
488
489static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
490{
491
492
493 sector_t nsect = get_capacity(bd->bd_disk);
494 sector_t cylinders = nsect;
495
496 hg->heads = 0xff;
497 hg->sectors = 0x3f;
498 sector_div(cylinders, hg->heads * hg->sectors);
499 hg->cylinders = cylinders;
500 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
501 hg->cylinders = 0xffff;
502 return 0;
503}
504
505static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
506 unsigned command, unsigned long argument)
507{
508 int i;
509
510 switch (command) {
511 case CDROMMULTISESSION:
512 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
513 if (put_user(0, (char __user *)(argument + i)))
514 return -EFAULT;
515 return 0;
516 case CDROM_GET_CAPABILITY:
517 if (bdev->bd_disk->flags & GENHD_FL_CD)
518 return 0;
519 return -EINVAL;
520 default:
521 return -EINVAL;
522 }
523}
524
525static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
526 struct request *req,
527 struct blkif_request **ring_req)
528{
529 unsigned long id;
530
531 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
532 rinfo->ring.req_prod_pvt++;
533
534 id = get_id_from_freelist(rinfo);
535 rinfo->shadow[id].request = req;
536 rinfo->shadow[id].status = REQ_PROCESSING;
537 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
538
539 rinfo->shadow[id].req.u.rw.id = id;
540
541 return id;
542}
543
544static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
545{
546 struct blkfront_info *info = rinfo->dev_info;
547 struct blkif_request *ring_req, *final_ring_req;
548 unsigned long id;
549
550
551 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
552 ring_req = &rinfo->shadow[id].req;
553
554 ring_req->operation = BLKIF_OP_DISCARD;
555 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
556 ring_req->u.discard.id = id;
557 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
558 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
559 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
560 else
561 ring_req->u.discard.flag = 0;
562
563
564 *final_ring_req = *ring_req;
565 rinfo->shadow[id].status = REQ_WAITING;
566
567 return 0;
568}
569
570struct setup_rw_req {
571 unsigned int grant_idx;
572 struct blkif_request_segment *segments;
573 struct blkfront_ring_info *rinfo;
574 struct blkif_request *ring_req;
575 grant_ref_t gref_head;
576 unsigned int id;
577
578 bool need_copy;
579 unsigned int bvec_off;
580 char *bvec_data;
581
582 bool require_extra_req;
583 struct blkif_request *extra_ring_req;
584};
585
586static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
587 unsigned int len, void *data)
588{
589 struct setup_rw_req *setup = data;
590 int n, ref;
591 struct grant *gnt_list_entry;
592 unsigned int fsect, lsect;
593
594 unsigned int grant_idx = setup->grant_idx;
595 struct blkif_request *ring_req = setup->ring_req;
596 struct blkfront_ring_info *rinfo = setup->rinfo;
597
598
599
600
601
602
603 struct blk_shadow *shadow = &rinfo->shadow[setup->id];
604
605 if (unlikely(setup->require_extra_req &&
606 grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
607
608
609
610
611 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
612 ring_req = setup->extra_ring_req;
613 }
614
615 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
616 (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
617 if (setup->segments)
618 kunmap_atomic(setup->segments);
619
620 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
621 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
622 shadow->indirect_grants[n] = gnt_list_entry;
623 setup->segments = kmap_atomic(gnt_list_entry->page);
624 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
625 }
626
627 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
628 ref = gnt_list_entry->gref;
629
630
631
632
633 shadow->grants_used[setup->grant_idx] = gnt_list_entry;
634
635 if (setup->need_copy) {
636 void *shared_data;
637
638 shared_data = kmap_atomic(gnt_list_entry->page);
639
640
641
642
643
644
645
646
647
648 memcpy(shared_data + offset,
649 setup->bvec_data + setup->bvec_off,
650 len);
651
652 kunmap_atomic(shared_data);
653 setup->bvec_off += len;
654 }
655
656 fsect = offset >> 9;
657 lsect = fsect + (len >> 9) - 1;
658 if (ring_req->operation != BLKIF_OP_INDIRECT) {
659 ring_req->u.rw.seg[grant_idx] =
660 (struct blkif_request_segment) {
661 .gref = ref,
662 .first_sect = fsect,
663 .last_sect = lsect };
664 } else {
665 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
666 (struct blkif_request_segment) {
667 .gref = ref,
668 .first_sect = fsect,
669 .last_sect = lsect };
670 }
671
672 (setup->grant_idx)++;
673}
674
675static void blkif_setup_extra_req(struct blkif_request *first,
676 struct blkif_request *second)
677{
678 uint16_t nr_segments = first->u.rw.nr_segments;
679
680
681
682
683
684 first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
685
686 second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
687 second->u.rw.sector_number = first->u.rw.sector_number +
688 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
689
690 second->u.rw.handle = first->u.rw.handle;
691 second->operation = first->operation;
692}
693
694static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
695{
696 struct blkfront_info *info = rinfo->dev_info;
697 struct blkif_request *ring_req, *extra_ring_req = NULL;
698 struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
699 unsigned long id, extra_id = NO_ASSOCIATED_ID;
700 bool require_extra_req = false;
701 int i;
702 struct setup_rw_req setup = {
703 .grant_idx = 0,
704 .segments = NULL,
705 .rinfo = rinfo,
706 .need_copy = rq_data_dir(req) && info->feature_persistent,
707 };
708
709
710
711
712
713
714 bool new_persistent_gnts = false;
715 struct scatterlist *sg;
716 int num_sg, max_grefs, num_grant;
717
718 max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
719 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
720
721
722
723
724 max_grefs += INDIRECT_GREFS(max_grefs);
725
726
727 if (rinfo->persistent_gnts_c < max_grefs) {
728 new_persistent_gnts = true;
729
730 if (gnttab_alloc_grant_references(
731 max_grefs - rinfo->persistent_gnts_c,
732 &setup.gref_head) < 0) {
733 gnttab_request_free_callback(
734 &rinfo->callback,
735 blkif_restart_queue_callback,
736 rinfo,
737 max_grefs - rinfo->persistent_gnts_c);
738 return 1;
739 }
740 }
741
742
743 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
744 ring_req = &rinfo->shadow[id].req;
745
746 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
747 num_grant = 0;
748
749 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
750 num_grant += gnttab_count_grant(sg->offset, sg->length);
751
752 require_extra_req = info->max_indirect_segments == 0 &&
753 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
754 BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
755
756 rinfo->shadow[id].num_sg = num_sg;
757 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
758 likely(!require_extra_req)) {
759
760
761
762
763 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
764 ring_req->operation = BLKIF_OP_INDIRECT;
765 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
766 BLKIF_OP_WRITE : BLKIF_OP_READ;
767 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
768 ring_req->u.indirect.handle = info->handle;
769 ring_req->u.indirect.nr_segments = num_grant;
770 } else {
771 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
772 ring_req->u.rw.handle = info->handle;
773 ring_req->operation = rq_data_dir(req) ?
774 BLKIF_OP_WRITE : BLKIF_OP_READ;
775 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
776
777
778
779
780
781
782
783 if (info->feature_flush && info->feature_fua)
784 ring_req->operation =
785 BLKIF_OP_WRITE_BARRIER;
786 else if (info->feature_flush)
787 ring_req->operation =
788 BLKIF_OP_FLUSH_DISKCACHE;
789 else
790 ring_req->operation = 0;
791 }
792 ring_req->u.rw.nr_segments = num_grant;
793 if (unlikely(require_extra_req)) {
794 extra_id = blkif_ring_get_request(rinfo, req,
795 &final_extra_ring_req);
796 extra_ring_req = &rinfo->shadow[extra_id].req;
797
798
799
800
801
802 rinfo->shadow[extra_id].num_sg = 0;
803
804 blkif_setup_extra_req(ring_req, extra_ring_req);
805
806
807 rinfo->shadow[extra_id].associated_id = id;
808 rinfo->shadow[id].associated_id = extra_id;
809 }
810 }
811
812 setup.ring_req = ring_req;
813 setup.id = id;
814
815 setup.require_extra_req = require_extra_req;
816 if (unlikely(require_extra_req))
817 setup.extra_ring_req = extra_ring_req;
818
819 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
820 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
821
822 if (setup.need_copy) {
823 setup.bvec_off = sg->offset;
824 setup.bvec_data = kmap_atomic(sg_page(sg));
825 }
826
827 gnttab_foreach_grant_in_range(sg_page(sg),
828 sg->offset,
829 sg->length,
830 blkif_setup_rw_req_grant,
831 &setup);
832
833 if (setup.need_copy)
834 kunmap_atomic(setup.bvec_data);
835 }
836 if (setup.segments)
837 kunmap_atomic(setup.segments);
838
839
840 *final_ring_req = *ring_req;
841 rinfo->shadow[id].status = REQ_WAITING;
842 if (unlikely(require_extra_req)) {
843 *final_extra_ring_req = *extra_ring_req;
844 rinfo->shadow[extra_id].status = REQ_WAITING;
845 }
846
847 if (new_persistent_gnts)
848 gnttab_free_grant_references(setup.gref_head);
849
850 return 0;
851}
852
853
854
855
856
857
858
859static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
860{
861 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
862 return 1;
863
864 if (unlikely(req_op(req) == REQ_OP_DISCARD ||
865 req_op(req) == REQ_OP_SECURE_ERASE))
866 return blkif_queue_discard_req(req, rinfo);
867 else
868 return blkif_queue_rw_req(req, rinfo);
869}
870
871static inline void flush_requests(struct blkfront_ring_info *rinfo)
872{
873 int notify;
874
875 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
876
877 if (notify)
878 notify_remote_via_irq(rinfo->irq);
879}
880
881static inline bool blkif_request_flush_invalid(struct request *req,
882 struct blkfront_info *info)
883{
884 return (blk_rq_is_passthrough(req) ||
885 ((req_op(req) == REQ_OP_FLUSH) &&
886 !info->feature_flush) ||
887 ((req->cmd_flags & REQ_FUA) &&
888 !info->feature_fua));
889}
890
891static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
892 const struct blk_mq_queue_data *qd)
893{
894 unsigned long flags;
895 int qid = hctx->queue_num;
896 struct blkfront_info *info = hctx->queue->queuedata;
897 struct blkfront_ring_info *rinfo = NULL;
898
899 rinfo = get_rinfo(info, qid);
900 blk_mq_start_request(qd->rq);
901 spin_lock_irqsave(&rinfo->ring_lock, flags);
902 if (RING_FULL(&rinfo->ring))
903 goto out_busy;
904
905 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
906 goto out_err;
907
908 if (blkif_queue_request(qd->rq, rinfo))
909 goto out_busy;
910
911 flush_requests(rinfo);
912 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
913 return BLK_STS_OK;
914
915out_err:
916 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
917 return BLK_STS_IOERR;
918
919out_busy:
920 blk_mq_stop_hw_queue(hctx);
921 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
922 return BLK_STS_DEV_RESOURCE;
923}
924
925static void blkif_complete_rq(struct request *rq)
926{
927 blk_mq_end_request(rq, blkif_req(rq)->error);
928}
929
930static const struct blk_mq_ops blkfront_mq_ops = {
931 .queue_rq = blkif_queue_rq,
932 .complete = blkif_complete_rq,
933};
934
935static void blkif_set_queue_limits(struct blkfront_info *info)
936{
937 struct request_queue *rq = info->rq;
938 struct gendisk *gd = info->gd;
939 unsigned int segments = info->max_indirect_segments ? :
940 BLKIF_MAX_SEGMENTS_PER_REQUEST;
941
942 blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
943
944 if (info->feature_discard) {
945 blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
946 blk_queue_max_discard_sectors(rq, get_capacity(gd));
947 rq->limits.discard_granularity = info->discard_granularity ?:
948 info->physical_sector_size;
949 rq->limits.discard_alignment = info->discard_alignment;
950 if (info->feature_secdiscard)
951 blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
952 }
953
954
955 blk_queue_logical_block_size(rq, info->sector_size);
956 blk_queue_physical_block_size(rq, info->physical_sector_size);
957 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
958
959
960 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
961 blk_queue_max_segment_size(rq, PAGE_SIZE);
962
963
964 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
965
966
967 blk_queue_dma_alignment(rq, 511);
968}
969
970static const char *flush_info(struct blkfront_info *info)
971{
972 if (info->feature_flush && info->feature_fua)
973 return "barrier: enabled;";
974 else if (info->feature_flush)
975 return "flush diskcache: enabled;";
976 else
977 return "barrier or flush: disabled;";
978}
979
980static void xlvbd_flush(struct blkfront_info *info)
981{
982 blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
983 info->feature_fua ? true : false);
984 pr_info("blkfront: %s: %s %s %s %s %s\n",
985 info->gd->disk_name, flush_info(info),
986 "persistent grants:", info->feature_persistent ?
987 "enabled;" : "disabled;", "indirect descriptors:",
988 info->max_indirect_segments ? "enabled;" : "disabled;");
989}
990
991static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
992{
993 int major;
994 major = BLKIF_MAJOR(vdevice);
995 *minor = BLKIF_MINOR(vdevice);
996 switch (major) {
997 case XEN_IDE0_MAJOR:
998 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
999 *minor = ((*minor / 64) * PARTS_PER_DISK) +
1000 EMULATED_HD_DISK_MINOR_OFFSET;
1001 break;
1002 case XEN_IDE1_MAJOR:
1003 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1004 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1005 EMULATED_HD_DISK_MINOR_OFFSET;
1006 break;
1007 case XEN_SCSI_DISK0_MAJOR:
1008 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1009 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1010 break;
1011 case XEN_SCSI_DISK1_MAJOR:
1012 case XEN_SCSI_DISK2_MAJOR:
1013 case XEN_SCSI_DISK3_MAJOR:
1014 case XEN_SCSI_DISK4_MAJOR:
1015 case XEN_SCSI_DISK5_MAJOR:
1016 case XEN_SCSI_DISK6_MAJOR:
1017 case XEN_SCSI_DISK7_MAJOR:
1018 *offset = (*minor / PARTS_PER_DISK) +
1019 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1020 EMULATED_SD_DISK_NAME_OFFSET;
1021 *minor = *minor +
1022 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1023 EMULATED_SD_DISK_MINOR_OFFSET;
1024 break;
1025 case XEN_SCSI_DISK8_MAJOR:
1026 case XEN_SCSI_DISK9_MAJOR:
1027 case XEN_SCSI_DISK10_MAJOR:
1028 case XEN_SCSI_DISK11_MAJOR:
1029 case XEN_SCSI_DISK12_MAJOR:
1030 case XEN_SCSI_DISK13_MAJOR:
1031 case XEN_SCSI_DISK14_MAJOR:
1032 case XEN_SCSI_DISK15_MAJOR:
1033 *offset = (*minor / PARTS_PER_DISK) +
1034 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1035 EMULATED_SD_DISK_NAME_OFFSET;
1036 *minor = *minor +
1037 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1038 EMULATED_SD_DISK_MINOR_OFFSET;
1039 break;
1040 case XENVBD_MAJOR:
1041 *offset = *minor / PARTS_PER_DISK;
1042 break;
1043 default:
1044 printk(KERN_WARNING "blkfront: your disk configuration is "
1045 "incorrect, please use an xvd device instead\n");
1046 return -ENODEV;
1047 }
1048 return 0;
1049}
1050
1051static char *encode_disk_name(char *ptr, unsigned int n)
1052{
1053 if (n >= 26)
1054 ptr = encode_disk_name(ptr, n / 26 - 1);
1055 *ptr = 'a' + n % 26;
1056 return ptr + 1;
1057}
1058
1059static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1060 struct blkfront_info *info,
1061 u16 vdisk_info, u16 sector_size,
1062 unsigned int physical_sector_size)
1063{
1064 struct gendisk *gd;
1065 int nr_minors = 1;
1066 int err;
1067 unsigned int offset;
1068 int minor;
1069 int nr_parts;
1070 char *ptr;
1071
1072 BUG_ON(info->gd != NULL);
1073 BUG_ON(info->rq != NULL);
1074
1075 if ((info->vdevice>>EXT_SHIFT) > 1) {
1076
1077 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1078 return -ENODEV;
1079 }
1080
1081 if (!VDEV_IS_EXTENDED(info->vdevice)) {
1082 err = xen_translate_vdev(info->vdevice, &minor, &offset);
1083 if (err)
1084 return err;
1085 nr_parts = PARTS_PER_DISK;
1086 } else {
1087 minor = BLKIF_MINOR_EXT(info->vdevice);
1088 nr_parts = PARTS_PER_EXT_DISK;
1089 offset = minor / nr_parts;
1090 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1091 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1092 "emulated IDE disks,\n\t choose an xvd device name"
1093 "from xvde on\n", info->vdevice);
1094 }
1095 if (minor >> MINORBITS) {
1096 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1097 info->vdevice, minor);
1098 return -ENODEV;
1099 }
1100
1101 if ((minor % nr_parts) == 0)
1102 nr_minors = nr_parts;
1103
1104 err = xlbd_reserve_minors(minor, nr_minors);
1105 if (err)
1106 return err;
1107
1108 memset(&info->tag_set, 0, sizeof(info->tag_set));
1109 info->tag_set.ops = &blkfront_mq_ops;
1110 info->tag_set.nr_hw_queues = info->nr_rings;
1111 if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
1112
1113
1114
1115
1116
1117
1118 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
1119 } else
1120 info->tag_set.queue_depth = BLK_RING_SIZE(info);
1121 info->tag_set.numa_node = NUMA_NO_NODE;
1122 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1123 info->tag_set.cmd_size = sizeof(struct blkif_req);
1124 info->tag_set.driver_data = info;
1125
1126 err = blk_mq_alloc_tag_set(&info->tag_set);
1127 if (err)
1128 goto out_release_minors;
1129
1130 gd = blk_mq_alloc_disk(&info->tag_set, info);
1131 if (IS_ERR(gd)) {
1132 err = PTR_ERR(gd);
1133 goto out_free_tag_set;
1134 }
1135
1136 strcpy(gd->disk_name, DEV_NAME);
1137 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1138 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1139 if (nr_minors > 1)
1140 *ptr = 0;
1141 else
1142 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1143 "%d", minor & (nr_parts - 1));
1144
1145 gd->major = XENVBD_MAJOR;
1146 gd->first_minor = minor;
1147 gd->minors = nr_minors;
1148 gd->fops = &xlvbd_block_fops;
1149 gd->private_data = info;
1150 set_capacity(gd, capacity);
1151
1152 info->rq = gd->queue;
1153 info->gd = gd;
1154 info->sector_size = sector_size;
1155 info->physical_sector_size = physical_sector_size;
1156 blkif_set_queue_limits(info);
1157
1158 xlvbd_flush(info);
1159
1160 if (vdisk_info & VDISK_READONLY)
1161 set_disk_ro(gd, 1);
1162
1163 if (vdisk_info & VDISK_REMOVABLE)
1164 gd->flags |= GENHD_FL_REMOVABLE;
1165
1166 if (vdisk_info & VDISK_CDROM)
1167 gd->flags |= GENHD_FL_CD;
1168
1169 return 0;
1170
1171out_free_tag_set:
1172 blk_mq_free_tag_set(&info->tag_set);
1173out_release_minors:
1174 xlbd_release_minors(minor, nr_minors);
1175 return err;
1176}
1177
1178
1179static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1180{
1181 if (!RING_FULL(&rinfo->ring))
1182 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1183}
1184
1185static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1186{
1187 unsigned long flags;
1188
1189 spin_lock_irqsave(&rinfo->ring_lock, flags);
1190 kick_pending_request_queues_locked(rinfo);
1191 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1192}
1193
1194static void blkif_restart_queue(struct work_struct *work)
1195{
1196 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1197
1198 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1199 kick_pending_request_queues(rinfo);
1200}
1201
1202static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1203{
1204 struct grant *persistent_gnt, *n;
1205 struct blkfront_info *info = rinfo->dev_info;
1206 int i, j, segs;
1207
1208
1209
1210
1211
1212 if (!list_empty(&rinfo->indirect_pages)) {
1213 struct page *indirect_page, *n;
1214
1215 BUG_ON(info->feature_persistent);
1216 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1217 list_del(&indirect_page->lru);
1218 __free_page(indirect_page);
1219 }
1220 }
1221
1222
1223 if (!list_empty(&rinfo->grants)) {
1224 list_for_each_entry_safe(persistent_gnt, n,
1225 &rinfo->grants, node) {
1226 list_del(&persistent_gnt->node);
1227 if (persistent_gnt->gref != GRANT_INVALID_REF) {
1228 gnttab_end_foreign_access(persistent_gnt->gref,
1229 0, 0UL);
1230 rinfo->persistent_gnts_c--;
1231 }
1232 if (info->feature_persistent)
1233 __free_page(persistent_gnt->page);
1234 kfree(persistent_gnt);
1235 }
1236 }
1237 BUG_ON(rinfo->persistent_gnts_c != 0);
1238
1239 for (i = 0; i < BLK_RING_SIZE(info); i++) {
1240
1241
1242
1243
1244 if (!rinfo->shadow[i].request)
1245 goto free_shadow;
1246
1247 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1248 rinfo->shadow[i].req.u.indirect.nr_segments :
1249 rinfo->shadow[i].req.u.rw.nr_segments;
1250 for (j = 0; j < segs; j++) {
1251 persistent_gnt = rinfo->shadow[i].grants_used[j];
1252 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1253 if (info->feature_persistent)
1254 __free_page(persistent_gnt->page);
1255 kfree(persistent_gnt);
1256 }
1257
1258 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1259
1260
1261
1262
1263 goto free_shadow;
1264
1265 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1266 persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1267 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1268 __free_page(persistent_gnt->page);
1269 kfree(persistent_gnt);
1270 }
1271
1272free_shadow:
1273 kvfree(rinfo->shadow[i].grants_used);
1274 rinfo->shadow[i].grants_used = NULL;
1275 kvfree(rinfo->shadow[i].indirect_grants);
1276 rinfo->shadow[i].indirect_grants = NULL;
1277 kvfree(rinfo->shadow[i].sg);
1278 rinfo->shadow[i].sg = NULL;
1279 }
1280
1281
1282 gnttab_cancel_free_callback(&rinfo->callback);
1283
1284
1285 flush_work(&rinfo->work);
1286
1287
1288 for (i = 0; i < info->nr_ring_pages; i++) {
1289 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1290 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1291 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1292 }
1293 }
1294 free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
1295 rinfo->ring.sring = NULL;
1296
1297 if (rinfo->irq)
1298 unbind_from_irqhandler(rinfo->irq, rinfo);
1299 rinfo->evtchn = rinfo->irq = 0;
1300}
1301
1302static void blkif_free(struct blkfront_info *info, int suspend)
1303{
1304 unsigned int i;
1305 struct blkfront_ring_info *rinfo;
1306
1307
1308 info->connected = suspend ?
1309 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1310
1311 if (info->rq)
1312 blk_mq_stop_hw_queues(info->rq);
1313
1314 for_each_rinfo(info, rinfo, i)
1315 blkif_free_ring(rinfo);
1316
1317 kvfree(info->rinfo);
1318 info->rinfo = NULL;
1319 info->nr_rings = 0;
1320}
1321
1322struct copy_from_grant {
1323 const struct blk_shadow *s;
1324 unsigned int grant_idx;
1325 unsigned int bvec_offset;
1326 char *bvec_data;
1327};
1328
1329static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1330 unsigned int len, void *data)
1331{
1332 struct copy_from_grant *info = data;
1333 char *shared_data;
1334
1335 const struct blk_shadow *s = info->s;
1336
1337 shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1338
1339 memcpy(info->bvec_data + info->bvec_offset,
1340 shared_data + offset, len);
1341
1342 info->bvec_offset += len;
1343 info->grant_idx++;
1344
1345 kunmap_atomic(shared_data);
1346}
1347
1348static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1349{
1350 switch (rsp)
1351 {
1352 case BLKIF_RSP_OKAY:
1353 return REQ_DONE;
1354 case BLKIF_RSP_EOPNOTSUPP:
1355 return REQ_EOPNOTSUPP;
1356 case BLKIF_RSP_ERROR:
1357 default:
1358 return REQ_ERROR;
1359 }
1360}
1361
1362
1363
1364
1365static int blkif_get_final_status(enum blk_req_status s1,
1366 enum blk_req_status s2)
1367{
1368 BUG_ON(s1 < REQ_DONE);
1369 BUG_ON(s2 < REQ_DONE);
1370
1371 if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1372 return BLKIF_RSP_ERROR;
1373 else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1374 return BLKIF_RSP_EOPNOTSUPP;
1375 return BLKIF_RSP_OKAY;
1376}
1377
1378static bool blkif_completion(unsigned long *id,
1379 struct blkfront_ring_info *rinfo,
1380 struct blkif_response *bret)
1381{
1382 int i = 0;
1383 struct scatterlist *sg;
1384 int num_sg, num_grant;
1385 struct blkfront_info *info = rinfo->dev_info;
1386 struct blk_shadow *s = &rinfo->shadow[*id];
1387 struct copy_from_grant data = {
1388 .grant_idx = 0,
1389 };
1390
1391 num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1392 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1393
1394
1395 if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1396 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1397
1398
1399 s->status = blkif_rsp_to_req_status(bret->status);
1400
1401
1402 if (s2->status < REQ_DONE)
1403 return false;
1404
1405 bret->status = blkif_get_final_status(s->status,
1406 s2->status);
1407
1408
1409
1410
1411
1412 num_grant += s2->req.u.rw.nr_segments;
1413
1414
1415
1416
1417
1418 if (s2->num_sg != 0) {
1419
1420 *id = s->associated_id;
1421 s = s2;
1422 }
1423
1424
1425
1426
1427
1428 if (add_id_to_freelist(rinfo, s->associated_id))
1429 WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1430 info->gd->disk_name, s->associated_id);
1431 }
1432
1433 data.s = s;
1434 num_sg = s->num_sg;
1435
1436 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1437 for_each_sg(s->sg, sg, num_sg, i) {
1438 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1439
1440 data.bvec_offset = sg->offset;
1441 data.bvec_data = kmap_atomic(sg_page(sg));
1442
1443 gnttab_foreach_grant_in_range(sg_page(sg),
1444 sg->offset,
1445 sg->length,
1446 blkif_copy_from_grant,
1447 &data);
1448
1449 kunmap_atomic(data.bvec_data);
1450 }
1451 }
1452
1453 for (i = 0; i < num_grant; i++) {
1454 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1455
1456
1457
1458
1459
1460
1461 if (!info->feature_persistent)
1462 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1463 s->grants_used[i]->gref);
1464 list_add(&s->grants_used[i]->node, &rinfo->grants);
1465 rinfo->persistent_gnts_c++;
1466 } else {
1467
1468
1469
1470
1471
1472
1473 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1474 s->grants_used[i]->gref = GRANT_INVALID_REF;
1475 list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1476 }
1477 }
1478 if (s->req.operation == BLKIF_OP_INDIRECT) {
1479 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1480 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1481 if (!info->feature_persistent)
1482 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1483 s->indirect_grants[i]->gref);
1484 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1485 rinfo->persistent_gnts_c++;
1486 } else {
1487 struct page *indirect_page;
1488
1489 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1490
1491
1492
1493
1494 if (!info->feature_persistent) {
1495 indirect_page = s->indirect_grants[i]->page;
1496 list_add(&indirect_page->lru, &rinfo->indirect_pages);
1497 }
1498 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1499 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1500 }
1501 }
1502 }
1503
1504 return true;
1505}
1506
1507static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1508{
1509 struct request *req;
1510 struct blkif_response bret;
1511 RING_IDX i, rp;
1512 unsigned long flags;
1513 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1514 struct blkfront_info *info = rinfo->dev_info;
1515 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1516
1517 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
1518 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
1519 return IRQ_HANDLED;
1520 }
1521
1522 spin_lock_irqsave(&rinfo->ring_lock, flags);
1523 again:
1524 rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
1525 virt_rmb();
1526 if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
1527 pr_alert("%s: illegal number of responses %u\n",
1528 info->gd->disk_name, rp - rinfo->ring.rsp_cons);
1529 goto err;
1530 }
1531
1532 for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1533 unsigned long id;
1534 unsigned int op;
1535
1536 eoiflag = 0;
1537
1538 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
1539 id = bret.id;
1540
1541
1542
1543
1544
1545
1546 if (id >= BLK_RING_SIZE(info)) {
1547 pr_alert("%s: response has incorrect id (%ld)\n",
1548 info->gd->disk_name, id);
1549 goto err;
1550 }
1551 if (rinfo->shadow[id].status != REQ_WAITING) {
1552 pr_alert("%s: response references no pending request\n",
1553 info->gd->disk_name);
1554 goto err;
1555 }
1556
1557 rinfo->shadow[id].status = REQ_PROCESSING;
1558 req = rinfo->shadow[id].request;
1559
1560 op = rinfo->shadow[id].req.operation;
1561 if (op == BLKIF_OP_INDIRECT)
1562 op = rinfo->shadow[id].req.u.indirect.indirect_op;
1563 if (bret.operation != op) {
1564 pr_alert("%s: response has wrong operation (%u instead of %u)\n",
1565 info->gd->disk_name, bret.operation, op);
1566 goto err;
1567 }
1568
1569 if (bret.operation != BLKIF_OP_DISCARD) {
1570
1571
1572
1573
1574 if (!blkif_completion(&id, rinfo, &bret))
1575 continue;
1576 }
1577
1578 if (add_id_to_freelist(rinfo, id)) {
1579 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1580 info->gd->disk_name, op_name(bret.operation), id);
1581 continue;
1582 }
1583
1584 if (bret.status == BLKIF_RSP_OKAY)
1585 blkif_req(req)->error = BLK_STS_OK;
1586 else
1587 blkif_req(req)->error = BLK_STS_IOERR;
1588
1589 switch (bret.operation) {
1590 case BLKIF_OP_DISCARD:
1591 if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1592 struct request_queue *rq = info->rq;
1593
1594 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1595 info->gd->disk_name, op_name(bret.operation));
1596 blkif_req(req)->error = BLK_STS_NOTSUPP;
1597 info->feature_discard = 0;
1598 info->feature_secdiscard = 0;
1599 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1600 blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1601 }
1602 break;
1603 case BLKIF_OP_FLUSH_DISKCACHE:
1604 case BLKIF_OP_WRITE_BARRIER:
1605 if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1606 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1607 info->gd->disk_name, op_name(bret.operation));
1608 blkif_req(req)->error = BLK_STS_NOTSUPP;
1609 }
1610 if (unlikely(bret.status == BLKIF_RSP_ERROR &&
1611 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1612 pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
1613 info->gd->disk_name, op_name(bret.operation));
1614 blkif_req(req)->error = BLK_STS_NOTSUPP;
1615 }
1616 if (unlikely(blkif_req(req)->error)) {
1617 if (blkif_req(req)->error == BLK_STS_NOTSUPP)
1618 blkif_req(req)->error = BLK_STS_OK;
1619 info->feature_fua = 0;
1620 info->feature_flush = 0;
1621 xlvbd_flush(info);
1622 }
1623 fallthrough;
1624 case BLKIF_OP_READ:
1625 case BLKIF_OP_WRITE:
1626 if (unlikely(bret.status != BLKIF_RSP_OKAY))
1627 dev_dbg_ratelimited(&info->xbdev->dev,
1628 "Bad return from blkdev data request: %#x\n",
1629 bret.status);
1630
1631 break;
1632 default:
1633 BUG();
1634 }
1635
1636 if (likely(!blk_should_fake_timeout(req->q)))
1637 blk_mq_complete_request(req);
1638 }
1639
1640 rinfo->ring.rsp_cons = i;
1641
1642 if (i != rinfo->ring.req_prod_pvt) {
1643 int more_to_do;
1644 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1645 if (more_to_do)
1646 goto again;
1647 } else
1648 rinfo->ring.sring->rsp_event = i + 1;
1649
1650 kick_pending_request_queues_locked(rinfo);
1651
1652 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1653
1654 xen_irq_lateeoi(irq, eoiflag);
1655
1656 return IRQ_HANDLED;
1657
1658 err:
1659 info->connected = BLKIF_STATE_ERROR;
1660
1661 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1662
1663
1664
1665 pr_alert("%s disabled for further use\n", info->gd->disk_name);
1666 return IRQ_HANDLED;
1667}
1668
1669
1670static int setup_blkring(struct xenbus_device *dev,
1671 struct blkfront_ring_info *rinfo)
1672{
1673 struct blkif_sring *sring;
1674 int err, i;
1675 struct blkfront_info *info = rinfo->dev_info;
1676 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1677 grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1678
1679 for (i = 0; i < info->nr_ring_pages; i++)
1680 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1681
1682 sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1683 get_order(ring_size));
1684 if (!sring) {
1685 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1686 return -ENOMEM;
1687 }
1688 SHARED_RING_INIT(sring);
1689 FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1690
1691 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1692 if (err < 0) {
1693 free_pages((unsigned long)sring, get_order(ring_size));
1694 rinfo->ring.sring = NULL;
1695 goto fail;
1696 }
1697 for (i = 0; i < info->nr_ring_pages; i++)
1698 rinfo->ring_ref[i] = gref[i];
1699
1700 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1701 if (err)
1702 goto fail;
1703
1704 err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
1705 0, "blkif", rinfo);
1706 if (err <= 0) {
1707 xenbus_dev_fatal(dev, err,
1708 "bind_evtchn_to_irqhandler failed");
1709 goto fail;
1710 }
1711 rinfo->irq = err;
1712
1713 return 0;
1714fail:
1715 blkif_free(info, 0);
1716 return err;
1717}
1718
1719
1720
1721
1722
1723static int write_per_ring_nodes(struct xenbus_transaction xbt,
1724 struct blkfront_ring_info *rinfo, const char *dir)
1725{
1726 int err;
1727 unsigned int i;
1728 const char *message = NULL;
1729 struct blkfront_info *info = rinfo->dev_info;
1730
1731 if (info->nr_ring_pages == 1) {
1732 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1733 if (err) {
1734 message = "writing ring-ref";
1735 goto abort_transaction;
1736 }
1737 } else {
1738 for (i = 0; i < info->nr_ring_pages; i++) {
1739 char ring_ref_name[RINGREF_NAME_LEN];
1740
1741 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1742 err = xenbus_printf(xbt, dir, ring_ref_name,
1743 "%u", rinfo->ring_ref[i]);
1744 if (err) {
1745 message = "writing ring-ref";
1746 goto abort_transaction;
1747 }
1748 }
1749 }
1750
1751 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1752 if (err) {
1753 message = "writing event-channel";
1754 goto abort_transaction;
1755 }
1756
1757 return 0;
1758
1759abort_transaction:
1760 xenbus_transaction_end(xbt, 1);
1761 if (message)
1762 xenbus_dev_fatal(info->xbdev, err, "%s", message);
1763
1764 return err;
1765}
1766
1767
1768static int talk_to_blkback(struct xenbus_device *dev,
1769 struct blkfront_info *info)
1770{
1771 const char *message = NULL;
1772 struct xenbus_transaction xbt;
1773 int err;
1774 unsigned int i, max_page_order;
1775 unsigned int ring_page_order;
1776 struct blkfront_ring_info *rinfo;
1777
1778 if (!info)
1779 return -ENODEV;
1780
1781 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1782 "max-ring-page-order", 0);
1783 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1784 info->nr_ring_pages = 1 << ring_page_order;
1785
1786 err = negotiate_mq(info);
1787 if (err)
1788 goto destroy_blkring;
1789
1790 for_each_rinfo(info, rinfo, i) {
1791
1792 err = setup_blkring(dev, rinfo);
1793 if (err)
1794 goto destroy_blkring;
1795 }
1796
1797again:
1798 err = xenbus_transaction_start(&xbt);
1799 if (err) {
1800 xenbus_dev_fatal(dev, err, "starting transaction");
1801 goto destroy_blkring;
1802 }
1803
1804 if (info->nr_ring_pages > 1) {
1805 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1806 ring_page_order);
1807 if (err) {
1808 message = "writing ring-page-order";
1809 goto abort_transaction;
1810 }
1811 }
1812
1813
1814 if (info->nr_rings == 1) {
1815 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
1816 if (err)
1817 goto destroy_blkring;
1818 } else {
1819 char *path;
1820 size_t pathsize;
1821
1822 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1823 info->nr_rings);
1824 if (err) {
1825 message = "writing multi-queue-num-queues";
1826 goto abort_transaction;
1827 }
1828
1829 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1830 path = kmalloc(pathsize, GFP_KERNEL);
1831 if (!path) {
1832 err = -ENOMEM;
1833 message = "ENOMEM while writing ring references";
1834 goto abort_transaction;
1835 }
1836
1837 for_each_rinfo(info, rinfo, i) {
1838 memset(path, 0, pathsize);
1839 snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1840 err = write_per_ring_nodes(xbt, rinfo, path);
1841 if (err) {
1842 kfree(path);
1843 goto destroy_blkring;
1844 }
1845 }
1846 kfree(path);
1847 }
1848 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1849 XEN_IO_PROTO_ABI_NATIVE);
1850 if (err) {
1851 message = "writing protocol";
1852 goto abort_transaction;
1853 }
1854 err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
1855 info->feature_persistent);
1856 if (err)
1857 dev_warn(&dev->dev,
1858 "writing persistent grants feature to xenbus");
1859
1860 err = xenbus_transaction_end(xbt, 0);
1861 if (err) {
1862 if (err == -EAGAIN)
1863 goto again;
1864 xenbus_dev_fatal(dev, err, "completing transaction");
1865 goto destroy_blkring;
1866 }
1867
1868 for_each_rinfo(info, rinfo, i) {
1869 unsigned int j;
1870
1871 for (j = 0; j < BLK_RING_SIZE(info); j++)
1872 rinfo->shadow[j].req.u.rw.id = j + 1;
1873 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1874 }
1875 xenbus_switch_state(dev, XenbusStateInitialised);
1876
1877 return 0;
1878
1879 abort_transaction:
1880 xenbus_transaction_end(xbt, 1);
1881 if (message)
1882 xenbus_dev_fatal(dev, err, "%s", message);
1883 destroy_blkring:
1884 blkif_free(info, 0);
1885 return err;
1886}
1887
1888static int negotiate_mq(struct blkfront_info *info)
1889{
1890 unsigned int backend_max_queues;
1891 unsigned int i;
1892 struct blkfront_ring_info *rinfo;
1893
1894 BUG_ON(info->nr_rings);
1895
1896
1897 backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1898 "multi-queue-max-queues", 1);
1899 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1900
1901 if (!info->nr_rings)
1902 info->nr_rings = 1;
1903
1904 info->rinfo_size = struct_size(info->rinfo, shadow,
1905 BLK_RING_SIZE(info));
1906 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
1907 if (!info->rinfo) {
1908 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1909 info->nr_rings = 0;
1910 return -ENOMEM;
1911 }
1912
1913 for_each_rinfo(info, rinfo, i) {
1914 INIT_LIST_HEAD(&rinfo->indirect_pages);
1915 INIT_LIST_HEAD(&rinfo->grants);
1916 rinfo->dev_info = info;
1917 INIT_WORK(&rinfo->work, blkif_restart_queue);
1918 spin_lock_init(&rinfo->ring_lock);
1919 }
1920 return 0;
1921}
1922
1923
1924static bool feature_persistent = true;
1925module_param(feature_persistent, bool, 0644);
1926MODULE_PARM_DESC(feature_persistent,
1927 "Enables the persistent grants feature");
1928
1929
1930
1931
1932
1933
1934
1935static int blkfront_probe(struct xenbus_device *dev,
1936 const struct xenbus_device_id *id)
1937{
1938 int err, vdevice;
1939 struct blkfront_info *info;
1940
1941
1942 err = xenbus_scanf(XBT_NIL, dev->nodename,
1943 "virtual-device", "%i", &vdevice);
1944 if (err != 1) {
1945
1946 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1947 "%i", &vdevice);
1948 if (err != 1) {
1949 xenbus_dev_fatal(dev, err, "reading virtual-device");
1950 return err;
1951 }
1952 }
1953
1954 if (xen_hvm_domain()) {
1955 char *type;
1956 int len;
1957
1958 if (xen_has_pv_and_legacy_disk_devices()) {
1959 int major;
1960
1961 if (!VDEV_IS_EXTENDED(vdevice))
1962 major = BLKIF_MAJOR(vdevice);
1963 else
1964 major = XENVBD_MAJOR;
1965
1966 if (major != XENVBD_MAJOR) {
1967 printk(KERN_INFO
1968 "%s: HVM does not support vbd %d as xen block device\n",
1969 __func__, vdevice);
1970 return -ENODEV;
1971 }
1972 }
1973
1974 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1975 if (IS_ERR(type))
1976 return -ENODEV;
1977 if (strncmp(type, "cdrom", 5) == 0) {
1978 kfree(type);
1979 return -ENODEV;
1980 }
1981 kfree(type);
1982 }
1983 info = kzalloc(sizeof(*info), GFP_KERNEL);
1984 if (!info) {
1985 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1986 return -ENOMEM;
1987 }
1988
1989 info->xbdev = dev;
1990
1991 mutex_init(&info->mutex);
1992 info->vdevice = vdevice;
1993 info->connected = BLKIF_STATE_DISCONNECTED;
1994
1995 info->feature_persistent = feature_persistent;
1996
1997
1998 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1999 dev_set_drvdata(&dev->dev, info);
2000
2001 mutex_lock(&blkfront_mutex);
2002 list_add(&info->info_list, &info_list);
2003 mutex_unlock(&blkfront_mutex);
2004
2005 return 0;
2006}
2007
2008static int blkif_recover(struct blkfront_info *info)
2009{
2010 unsigned int r_index;
2011 struct request *req, *n;
2012 int rc;
2013 struct bio *bio;
2014 unsigned int segs;
2015 struct blkfront_ring_info *rinfo;
2016
2017 blkfront_gather_backend_features(info);
2018
2019 blkif_set_queue_limits(info);
2020 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2021 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2022
2023 for_each_rinfo(info, rinfo, r_index) {
2024 rc = blkfront_setup_indirect(rinfo);
2025 if (rc)
2026 return rc;
2027 }
2028 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2029
2030
2031 info->connected = BLKIF_STATE_CONNECTED;
2032
2033 for_each_rinfo(info, rinfo, r_index) {
2034
2035 kick_pending_request_queues(rinfo);
2036 }
2037
2038 list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2039
2040 list_del_init(&req->queuelist);
2041 BUG_ON(req->nr_phys_segments > segs);
2042 blk_mq_requeue_request(req, false);
2043 }
2044 blk_mq_start_stopped_hw_queues(info->rq, true);
2045 blk_mq_kick_requeue_list(info->rq);
2046
2047 while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2048
2049 submit_bio(bio);
2050 }
2051
2052 return 0;
2053}
2054
2055
2056
2057
2058
2059
2060
2061static int blkfront_resume(struct xenbus_device *dev)
2062{
2063 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2064 int err = 0;
2065 unsigned int i, j;
2066 struct blkfront_ring_info *rinfo;
2067
2068 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2069
2070 bio_list_init(&info->bio_list);
2071 INIT_LIST_HEAD(&info->requests);
2072 for_each_rinfo(info, rinfo, i) {
2073 struct bio_list merge_bio;
2074 struct blk_shadow *shadow = rinfo->shadow;
2075
2076 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2077
2078 if (!shadow[j].request)
2079 continue;
2080
2081
2082
2083
2084 if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2085 req_op(shadow[j].request) == REQ_OP_DISCARD ||
2086 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2087 shadow[j].request->cmd_flags & REQ_FUA) {
2088
2089
2090
2091
2092
2093
2094
2095 list_add(&shadow[j].request->queuelist, &info->requests);
2096 continue;
2097 }
2098 merge_bio.head = shadow[j].request->bio;
2099 merge_bio.tail = shadow[j].request->biotail;
2100 bio_list_merge(&info->bio_list, &merge_bio);
2101 shadow[j].request->bio = NULL;
2102 blk_mq_end_request(shadow[j].request, BLK_STS_OK);
2103 }
2104 }
2105
2106 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2107
2108 err = talk_to_blkback(dev, info);
2109 if (!err)
2110 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2111
2112
2113
2114
2115
2116
2117
2118 return err;
2119}
2120
2121static void blkfront_closing(struct blkfront_info *info)
2122{
2123 struct xenbus_device *xbdev = info->xbdev;
2124 struct blkfront_ring_info *rinfo;
2125 unsigned int i;
2126
2127 if (xbdev->state == XenbusStateClosing)
2128 return;
2129
2130
2131 blk_mq_stop_hw_queues(info->rq);
2132 blk_set_queue_dying(info->rq);
2133 set_capacity(info->gd, 0);
2134
2135 for_each_rinfo(info, rinfo, i) {
2136
2137 gnttab_cancel_free_callback(&rinfo->callback);
2138
2139
2140 flush_work(&rinfo->work);
2141 }
2142
2143 xenbus_frontend_closed(xbdev);
2144}
2145
2146static void blkfront_setup_discard(struct blkfront_info *info)
2147{
2148 info->feature_discard = 1;
2149 info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
2150 "discard-granularity",
2151 0);
2152 info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
2153 "discard-alignment", 0);
2154 info->feature_secdiscard =
2155 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
2156 0);
2157}
2158
2159static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2160{
2161 unsigned int psegs, grants, memflags;
2162 int err, i;
2163 struct blkfront_info *info = rinfo->dev_info;
2164
2165 memflags = memalloc_noio_save();
2166
2167 if (info->max_indirect_segments == 0) {
2168 if (!HAS_EXTRA_REQ)
2169 grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2170 else {
2171
2172
2173
2174
2175
2176 grants = GRANTS_PER_PSEG;
2177 }
2178 }
2179 else
2180 grants = info->max_indirect_segments;
2181 psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2182
2183 err = fill_grant_buffer(rinfo,
2184 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2185 if (err)
2186 goto out_of_memory;
2187
2188 if (!info->feature_persistent && info->max_indirect_segments) {
2189
2190
2191
2192
2193
2194 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2195
2196 BUG_ON(!list_empty(&rinfo->indirect_pages));
2197 for (i = 0; i < num; i++) {
2198 struct page *indirect_page = alloc_page(GFP_KERNEL);
2199 if (!indirect_page)
2200 goto out_of_memory;
2201 list_add(&indirect_page->lru, &rinfo->indirect_pages);
2202 }
2203 }
2204
2205 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2206 rinfo->shadow[i].grants_used =
2207 kvcalloc(grants,
2208 sizeof(rinfo->shadow[i].grants_used[0]),
2209 GFP_KERNEL);
2210 rinfo->shadow[i].sg = kvcalloc(psegs,
2211 sizeof(rinfo->shadow[i].sg[0]),
2212 GFP_KERNEL);
2213 if (info->max_indirect_segments)
2214 rinfo->shadow[i].indirect_grants =
2215 kvcalloc(INDIRECT_GREFS(grants),
2216 sizeof(rinfo->shadow[i].indirect_grants[0]),
2217 GFP_KERNEL);
2218 if ((rinfo->shadow[i].grants_used == NULL) ||
2219 (rinfo->shadow[i].sg == NULL) ||
2220 (info->max_indirect_segments &&
2221 (rinfo->shadow[i].indirect_grants == NULL)))
2222 goto out_of_memory;
2223 sg_init_table(rinfo->shadow[i].sg, psegs);
2224 }
2225
2226 memalloc_noio_restore(memflags);
2227
2228 return 0;
2229
2230out_of_memory:
2231 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2232 kvfree(rinfo->shadow[i].grants_used);
2233 rinfo->shadow[i].grants_used = NULL;
2234 kvfree(rinfo->shadow[i].sg);
2235 rinfo->shadow[i].sg = NULL;
2236 kvfree(rinfo->shadow[i].indirect_grants);
2237 rinfo->shadow[i].indirect_grants = NULL;
2238 }
2239 if (!list_empty(&rinfo->indirect_pages)) {
2240 struct page *indirect_page, *n;
2241 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2242 list_del(&indirect_page->lru);
2243 __free_page(indirect_page);
2244 }
2245 }
2246
2247 memalloc_noio_restore(memflags);
2248
2249 return -ENOMEM;
2250}
2251
2252
2253
2254
2255static void blkfront_gather_backend_features(struct blkfront_info *info)
2256{
2257 unsigned int indirect_segments;
2258
2259 info->feature_flush = 0;
2260 info->feature_fua = 0;
2261
2262
2263
2264
2265
2266
2267
2268
2269 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
2270 info->feature_flush = 1;
2271 info->feature_fua = 1;
2272 }
2273
2274
2275
2276
2277
2278 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
2279 0)) {
2280 info->feature_flush = 1;
2281 info->feature_fua = 0;
2282 }
2283
2284 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
2285 blkfront_setup_discard(info);
2286
2287 if (info->feature_persistent)
2288 info->feature_persistent =
2289 !!xenbus_read_unsigned(info->xbdev->otherend,
2290 "feature-persistent", 0);
2291
2292 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2293 "feature-max-indirect-segments", 0);
2294 if (indirect_segments > xen_blkif_max_segments)
2295 indirect_segments = xen_blkif_max_segments;
2296 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2297 indirect_segments = 0;
2298 info->max_indirect_segments = indirect_segments;
2299
2300 if (info->feature_persistent) {
2301 mutex_lock(&blkfront_mutex);
2302 schedule_delayed_work(&blkfront_work, HZ * 10);
2303 mutex_unlock(&blkfront_mutex);
2304 }
2305}
2306
2307
2308
2309
2310
2311static void blkfront_connect(struct blkfront_info *info)
2312{
2313 unsigned long long sectors;
2314 unsigned long sector_size;
2315 unsigned int physical_sector_size;
2316 unsigned int binfo;
2317 int err, i;
2318 struct blkfront_ring_info *rinfo;
2319
2320 switch (info->connected) {
2321 case BLKIF_STATE_CONNECTED:
2322
2323
2324
2325
2326 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2327 "sectors", "%Lu", §ors);
2328 if (XENBUS_EXIST_ERR(err))
2329 return;
2330 printk(KERN_INFO "Setting capacity to %Lu\n",
2331 sectors);
2332 set_capacity_and_notify(info->gd, sectors);
2333
2334 return;
2335 case BLKIF_STATE_SUSPENDED:
2336
2337
2338
2339
2340
2341
2342 blkif_recover(info);
2343 return;
2344
2345 default:
2346 break;
2347 }
2348
2349 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2350 __func__, info->xbdev->otherend);
2351
2352 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2353 "sectors", "%llu", §ors,
2354 "info", "%u", &binfo,
2355 "sector-size", "%lu", §or_size,
2356 NULL);
2357 if (err) {
2358 xenbus_dev_fatal(info->xbdev, err,
2359 "reading backend fields at %s",
2360 info->xbdev->otherend);
2361 return;
2362 }
2363
2364
2365
2366
2367
2368
2369 physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
2370 "physical-sector-size",
2371 sector_size);
2372 blkfront_gather_backend_features(info);
2373 for_each_rinfo(info, rinfo, i) {
2374 err = blkfront_setup_indirect(rinfo);
2375 if (err) {
2376 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2377 info->xbdev->otherend);
2378 blkif_free(info, 0);
2379 break;
2380 }
2381 }
2382
2383 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
2384 physical_sector_size);
2385 if (err) {
2386 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2387 info->xbdev->otherend);
2388 goto fail;
2389 }
2390
2391 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2392
2393
2394 info->connected = BLKIF_STATE_CONNECTED;
2395 for_each_rinfo(info, rinfo, i)
2396 kick_pending_request_queues(rinfo);
2397
2398 err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
2399 if (err) {
2400 blk_cleanup_disk(info->gd);
2401 blk_mq_free_tag_set(&info->tag_set);
2402 info->rq = NULL;
2403 goto fail;
2404 }
2405
2406 info->is_ready = 1;
2407 return;
2408
2409fail:
2410 blkif_free(info, 0);
2411 return;
2412}
2413
2414
2415
2416
2417static void blkback_changed(struct xenbus_device *dev,
2418 enum xenbus_state backend_state)
2419{
2420 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2421
2422 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2423
2424 switch (backend_state) {
2425 case XenbusStateInitWait:
2426 if (dev->state != XenbusStateInitialising)
2427 break;
2428 if (talk_to_blkback(dev, info))
2429 break;
2430 break;
2431 case XenbusStateInitialising:
2432 case XenbusStateInitialised:
2433 case XenbusStateReconfiguring:
2434 case XenbusStateReconfigured:
2435 case XenbusStateUnknown:
2436 break;
2437
2438 case XenbusStateConnected:
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450 if ((dev->state != XenbusStateInitialised) &&
2451 (dev->state != XenbusStateConnected)) {
2452 if (talk_to_blkback(dev, info))
2453 break;
2454 }
2455
2456 blkfront_connect(info);
2457 break;
2458
2459 case XenbusStateClosed:
2460 if (dev->state == XenbusStateClosed)
2461 break;
2462 fallthrough;
2463 case XenbusStateClosing:
2464 blkfront_closing(info);
2465 break;
2466 }
2467}
2468
2469static int blkfront_remove(struct xenbus_device *xbdev)
2470{
2471 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2472
2473 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2474
2475 del_gendisk(info->gd);
2476
2477 mutex_lock(&blkfront_mutex);
2478 list_del(&info->info_list);
2479 mutex_unlock(&blkfront_mutex);
2480
2481 blkif_free(info, 0);
2482 xlbd_release_minors(info->gd->first_minor, info->gd->minors);
2483 blk_cleanup_disk(info->gd);
2484 blk_mq_free_tag_set(&info->tag_set);
2485
2486 kfree(info);
2487 return 0;
2488}
2489
2490static int blkfront_is_ready(struct xenbus_device *dev)
2491{
2492 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2493
2494 return info->is_ready && info->xbdev;
2495}
2496
2497static const struct block_device_operations xlvbd_block_fops =
2498{
2499 .owner = THIS_MODULE,
2500 .getgeo = blkif_getgeo,
2501 .ioctl = blkif_ioctl,
2502 .compat_ioctl = blkdev_compat_ptr_ioctl,
2503};
2504
2505
2506static const struct xenbus_device_id blkfront_ids[] = {
2507 { "vbd" },
2508 { "" }
2509};
2510
2511static struct xenbus_driver blkfront_driver = {
2512 .ids = blkfront_ids,
2513 .probe = blkfront_probe,
2514 .remove = blkfront_remove,
2515 .resume = blkfront_resume,
2516 .otherend_changed = blkback_changed,
2517 .is_ready = blkfront_is_ready,
2518};
2519
2520static void purge_persistent_grants(struct blkfront_info *info)
2521{
2522 unsigned int i;
2523 unsigned long flags;
2524 struct blkfront_ring_info *rinfo;
2525
2526 for_each_rinfo(info, rinfo, i) {
2527 struct grant *gnt_list_entry, *tmp;
2528
2529 spin_lock_irqsave(&rinfo->ring_lock, flags);
2530
2531 if (rinfo->persistent_gnts_c == 0) {
2532 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2533 continue;
2534 }
2535
2536 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2537 node) {
2538 if (gnt_list_entry->gref == GRANT_INVALID_REF ||
2539 gnttab_query_foreign_access(gnt_list_entry->gref))
2540 continue;
2541
2542 list_del(&gnt_list_entry->node);
2543 gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
2544 rinfo->persistent_gnts_c--;
2545 gnt_list_entry->gref = GRANT_INVALID_REF;
2546 list_add_tail(&gnt_list_entry->node, &rinfo->grants);
2547 }
2548
2549 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2550 }
2551}
2552
2553static void blkfront_delay_work(struct work_struct *work)
2554{
2555 struct blkfront_info *info;
2556 bool need_schedule_work = false;
2557
2558 mutex_lock(&blkfront_mutex);
2559
2560 list_for_each_entry(info, &info_list, info_list) {
2561 if (info->feature_persistent) {
2562 need_schedule_work = true;
2563 mutex_lock(&info->mutex);
2564 purge_persistent_grants(info);
2565 mutex_unlock(&info->mutex);
2566 }
2567 }
2568
2569 if (need_schedule_work)
2570 schedule_delayed_work(&blkfront_work, HZ * 10);
2571
2572 mutex_unlock(&blkfront_mutex);
2573}
2574
2575static int __init xlblk_init(void)
2576{
2577 int ret;
2578 int nr_cpus = num_online_cpus();
2579
2580 if (!xen_domain())
2581 return -ENODEV;
2582
2583 if (!xen_has_pv_disk_devices())
2584 return -ENODEV;
2585
2586 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2587 pr_warn("xen_blk: can't get major %d with name %s\n",
2588 XENVBD_MAJOR, DEV_NAME);
2589 return -ENODEV;
2590 }
2591
2592 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2593 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2594
2595 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2596 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2597 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2598 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2599 }
2600
2601 if (xen_blkif_max_queues > nr_cpus) {
2602 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2603 xen_blkif_max_queues, nr_cpus);
2604 xen_blkif_max_queues = nr_cpus;
2605 }
2606
2607 INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
2608
2609 ret = xenbus_register_frontend(&blkfront_driver);
2610 if (ret) {
2611 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2612 return ret;
2613 }
2614
2615 return 0;
2616}
2617module_init(xlblk_init);
2618
2619
2620static void __exit xlblk_exit(void)
2621{
2622 cancel_delayed_work_sync(&blkfront_work);
2623
2624 xenbus_unregister_driver(&blkfront_driver);
2625 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2626 kfree(minors);
2627}
2628module_exit(xlblk_exit);
2629
2630MODULE_DESCRIPTION("Xen virtual block device frontend");
2631MODULE_LICENSE("GPL");
2632MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2633MODULE_ALIAS("xen:vbd");
2634MODULE_ALIAS("xenblk");
2635