1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/rbtree.h>
15#include <linux/blkdev.h>
16#include <linux/blk-mq.h>
17#include <linux/mm.h>
18#include <linux/vmalloc.h>
19#include <linux/sched/mm.h>
20
21#include "blk.h"
22
23#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
24static const char *const zone_cond_name[] = {
25 ZONE_COND_NAME(NOT_WP),
26 ZONE_COND_NAME(EMPTY),
27 ZONE_COND_NAME(IMP_OPEN),
28 ZONE_COND_NAME(EXP_OPEN),
29 ZONE_COND_NAME(CLOSED),
30 ZONE_COND_NAME(READONLY),
31 ZONE_COND_NAME(FULL),
32 ZONE_COND_NAME(OFFLINE),
33};
34#undef ZONE_COND_NAME
35
36
37
38
39
40
41
42
43
44const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
45{
46 static const char *zone_cond_str = "UNKNOWN";
47
48 if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
49 zone_cond_str = zone_cond_name[zone_cond];
50
51 return zone_cond_str;
52}
53EXPORT_SYMBOL_GPL(blk_zone_cond_str);
54
55
56
57
58bool blk_req_needs_zone_write_lock(struct request *rq)
59{
60 if (!rq->q->seq_zones_wlock)
61 return false;
62
63 if (blk_rq_is_passthrough(rq))
64 return false;
65
66 switch (req_op(rq)) {
67 case REQ_OP_WRITE_ZEROES:
68 case REQ_OP_WRITE:
69 return blk_rq_zone_is_seq(rq);
70 default:
71 return false;
72 }
73}
74EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
75
76bool blk_req_zone_write_trylock(struct request *rq)
77{
78 unsigned int zno = blk_rq_zone_no(rq);
79
80 if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
81 return false;
82
83 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
84 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
85
86 return true;
87}
88EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
89
90void __blk_req_zone_write_lock(struct request *rq)
91{
92 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
93 rq->q->seq_zones_wlock)))
94 return;
95
96 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
97 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
98}
99EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
100
101void __blk_req_zone_write_unlock(struct request *rq)
102{
103 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
104 if (rq->q->seq_zones_wlock)
105 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
106 rq->q->seq_zones_wlock));
107}
108EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
109
110
111
112
113
114
115
116
117unsigned int blkdev_nr_zones(struct gendisk *disk)
118{
119 sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
120
121 if (!blk_queue_is_zoned(disk->queue))
122 return 0;
123 return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
124}
125EXPORT_SYMBOL_GPL(blkdev_nr_zones);
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146int blkdev_report_zones(struct block_device *bdev, sector_t sector,
147 unsigned int nr_zones, report_zones_cb cb, void *data)
148{
149 struct gendisk *disk = bdev->bd_disk;
150 sector_t capacity = get_capacity(disk);
151
152 if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
153 WARN_ON_ONCE(!disk->fops->report_zones))
154 return -EOPNOTSUPP;
155
156 if (!nr_zones || sector >= capacity)
157 return 0;
158
159 return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
160}
161EXPORT_SYMBOL_GPL(blkdev_report_zones);
162
163static inline unsigned long *blk_alloc_zone_bitmap(int node,
164 unsigned int nr_zones)
165{
166 return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
167 GFP_NOIO, node);
168}
169
170static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
171 void *data)
172{
173
174
175
176
177 switch (zone->cond) {
178 case BLK_ZONE_COND_NOT_WP:
179 case BLK_ZONE_COND_EMPTY:
180 case BLK_ZONE_COND_READONLY:
181 case BLK_ZONE_COND_OFFLINE:
182 return 0;
183 default:
184 set_bit(idx, (unsigned long *)data);
185 return 0;
186 }
187}
188
189static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
190 gfp_t gfp_mask)
191{
192 struct request_queue *q = bdev_get_queue(bdev);
193 sector_t capacity = get_capacity(bdev->bd_disk);
194 sector_t zone_sectors = blk_queue_zone_sectors(q);
195 unsigned long *need_reset;
196 struct bio *bio = NULL;
197 sector_t sector = 0;
198 int ret;
199
200 need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones);
201 if (!need_reset)
202 return -ENOMEM;
203
204 ret = bdev->bd_disk->fops->report_zones(bdev->bd_disk, 0,
205 q->nr_zones, blk_zone_need_reset_cb,
206 need_reset);
207 if (ret < 0)
208 goto out_free_need_reset;
209
210 ret = 0;
211 while (sector < capacity) {
212 if (!test_bit(blk_queue_zone_no(q, sector), need_reset)) {
213 sector += zone_sectors;
214 continue;
215 }
216
217 bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
218 gfp_mask);
219 bio->bi_iter.bi_sector = sector;
220 sector += zone_sectors;
221
222
223 cond_resched();
224 }
225
226 if (bio) {
227 ret = submit_bio_wait(bio);
228 bio_put(bio);
229 }
230
231out_free_need_reset:
232 kfree(need_reset);
233 return ret;
234}
235
236static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
237{
238 struct bio bio;
239
240 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
241 return submit_bio_wait(&bio);
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
261 sector_t sector, sector_t nr_sectors,
262 gfp_t gfp_mask)
263{
264 struct request_queue *q = bdev_get_queue(bdev);
265 sector_t zone_sectors = blk_queue_zone_sectors(q);
266 sector_t capacity = get_capacity(bdev->bd_disk);
267 sector_t end_sector = sector + nr_sectors;
268 struct bio *bio = NULL;
269 int ret = 0;
270
271 if (!blk_queue_is_zoned(q))
272 return -EOPNOTSUPP;
273
274 if (bdev_read_only(bdev))
275 return -EPERM;
276
277 if (!op_is_zone_mgmt(op))
278 return -EOPNOTSUPP;
279
280 if (end_sector <= sector || end_sector > capacity)
281
282 return -EINVAL;
283
284
285 if (sector & (zone_sectors - 1))
286 return -EINVAL;
287
288 if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
289 return -EINVAL;
290
291
292
293
294
295
296
297 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
298 if (!blk_queue_zone_resetall(q))
299 return blkdev_zone_reset_all_emulated(bdev, gfp_mask);
300 return blkdev_zone_reset_all(bdev, gfp_mask);
301 }
302
303 while (sector < end_sector) {
304 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
305 bio->bi_iter.bi_sector = sector;
306 sector += zone_sectors;
307
308
309 cond_resched();
310 }
311
312 ret = submit_bio_wait(bio);
313 bio_put(bio);
314
315 return ret;
316}
317EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
318
319struct zone_report_args {
320 struct blk_zone __user *zones;
321};
322
323static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
324 void *data)
325{
326 struct zone_report_args *args = data;
327
328 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
329 return -EFAULT;
330 return 0;
331}
332
333
334
335
336
337int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
338 unsigned int cmd, unsigned long arg)
339{
340 void __user *argp = (void __user *)arg;
341 struct zone_report_args args;
342 struct request_queue *q;
343 struct blk_zone_report rep;
344 int ret;
345
346 if (!argp)
347 return -EINVAL;
348
349 q = bdev_get_queue(bdev);
350 if (!q)
351 return -ENXIO;
352
353 if (!blk_queue_is_zoned(q))
354 return -ENOTTY;
355
356 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
357 return -EFAULT;
358
359 if (!rep.nr_zones)
360 return -EINVAL;
361
362 args.zones = argp + sizeof(struct blk_zone_report);
363 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
364 blkdev_copy_zone_to_user, &args);
365 if (ret < 0)
366 return ret;
367
368 rep.nr_zones = ret;
369 rep.flags = BLK_ZONE_REP_CAPACITY;
370 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
371 return -EFAULT;
372 return 0;
373}
374
375static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
376 const struct blk_zone_range *zrange)
377{
378 loff_t start, end;
379
380 if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
381 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
382
383 return -EINVAL;
384
385 start = zrange->sector << SECTOR_SHIFT;
386 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
387
388 return truncate_bdev_range(bdev, mode, start, end);
389}
390
391
392
393
394
395int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
396 unsigned int cmd, unsigned long arg)
397{
398 void __user *argp = (void __user *)arg;
399 struct request_queue *q;
400 struct blk_zone_range zrange;
401 enum req_opf op;
402 int ret;
403
404 if (!argp)
405 return -EINVAL;
406
407 q = bdev_get_queue(bdev);
408 if (!q)
409 return -ENXIO;
410
411 if (!blk_queue_is_zoned(q))
412 return -ENOTTY;
413
414 if (!(mode & FMODE_WRITE))
415 return -EBADF;
416
417 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
418 return -EFAULT;
419
420 switch (cmd) {
421 case BLKRESETZONE:
422 op = REQ_OP_ZONE_RESET;
423
424
425 filemap_invalidate_lock(bdev->bd_inode->i_mapping);
426 ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
427 if (ret)
428 goto fail;
429 break;
430 case BLKOPENZONE:
431 op = REQ_OP_ZONE_OPEN;
432 break;
433 case BLKCLOSEZONE:
434 op = REQ_OP_ZONE_CLOSE;
435 break;
436 case BLKFINISHZONE:
437 op = REQ_OP_ZONE_FINISH;
438 break;
439 default:
440 return -ENOTTY;
441 }
442
443 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
444 GFP_KERNEL);
445
446fail:
447 if (cmd == BLKRESETZONE)
448 filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
449
450 return ret;
451}
452
453void blk_queue_free_zone_bitmaps(struct request_queue *q)
454{
455 kfree(q->conv_zones_bitmap);
456 q->conv_zones_bitmap = NULL;
457 kfree(q->seq_zones_wlock);
458 q->seq_zones_wlock = NULL;
459}
460
461struct blk_revalidate_zone_args {
462 struct gendisk *disk;
463 unsigned long *conv_zones_bitmap;
464 unsigned long *seq_zones_wlock;
465 unsigned int nr_zones;
466 sector_t zone_sectors;
467 sector_t sector;
468};
469
470
471
472
473static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
474 void *data)
475{
476 struct blk_revalidate_zone_args *args = data;
477 struct gendisk *disk = args->disk;
478 struct request_queue *q = disk->queue;
479 sector_t capacity = get_capacity(disk);
480
481
482
483
484
485 if (zone->start == 0) {
486 if (zone->len == 0 || !is_power_of_2(zone->len)) {
487 pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
488 disk->disk_name, zone->len);
489 return -ENODEV;
490 }
491
492 args->zone_sectors = zone->len;
493 args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
494 } else if (zone->start + args->zone_sectors < capacity) {
495 if (zone->len != args->zone_sectors) {
496 pr_warn("%s: Invalid zoned device with non constant zone size\n",
497 disk->disk_name);
498 return -ENODEV;
499 }
500 } else {
501 if (zone->len > args->zone_sectors) {
502 pr_warn("%s: Invalid zoned device with larger last zone size\n",
503 disk->disk_name);
504 return -ENODEV;
505 }
506 }
507
508
509 if (zone->start != args->sector) {
510 pr_warn("%s: Zone gap at sectors %llu..%llu\n",
511 disk->disk_name, args->sector, zone->start);
512 return -ENODEV;
513 }
514
515
516 switch (zone->type) {
517 case BLK_ZONE_TYPE_CONVENTIONAL:
518 if (!args->conv_zones_bitmap) {
519 args->conv_zones_bitmap =
520 blk_alloc_zone_bitmap(q->node, args->nr_zones);
521 if (!args->conv_zones_bitmap)
522 return -ENOMEM;
523 }
524 set_bit(idx, args->conv_zones_bitmap);
525 break;
526 case BLK_ZONE_TYPE_SEQWRITE_REQ:
527 case BLK_ZONE_TYPE_SEQWRITE_PREF:
528 if (!args->seq_zones_wlock) {
529 args->seq_zones_wlock =
530 blk_alloc_zone_bitmap(q->node, args->nr_zones);
531 if (!args->seq_zones_wlock)
532 return -ENOMEM;
533 }
534 break;
535 default:
536 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
537 disk->disk_name, (int)zone->type, zone->start);
538 return -ENODEV;
539 }
540
541 args->sector += zone->len;
542 return 0;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559int blk_revalidate_disk_zones(struct gendisk *disk,
560 void (*update_driver_data)(struct gendisk *disk))
561{
562 struct request_queue *q = disk->queue;
563 struct blk_revalidate_zone_args args = {
564 .disk = disk,
565 };
566 unsigned int noio_flag;
567 int ret;
568
569 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
570 return -EIO;
571 if (WARN_ON_ONCE(!queue_is_mq(q)))
572 return -EIO;
573
574 if (!get_capacity(disk))
575 return -EIO;
576
577
578
579
580
581 noio_flag = memalloc_noio_save();
582 ret = disk->fops->report_zones(disk, 0, UINT_MAX,
583 blk_revalidate_zone_cb, &args);
584 if (!ret) {
585 pr_warn("%s: No zones reported\n", disk->disk_name);
586 ret = -ENODEV;
587 }
588 memalloc_noio_restore(noio_flag);
589
590
591
592
593
594 if (ret > 0 && args.sector != get_capacity(disk)) {
595 pr_warn("%s: Missing zones from sector %llu\n",
596 disk->disk_name, args.sector);
597 ret = -ENODEV;
598 }
599
600
601
602
603
604
605 blk_mq_freeze_queue(q);
606 if (ret > 0) {
607 blk_queue_chunk_sectors(q, args.zone_sectors);
608 q->nr_zones = args.nr_zones;
609 swap(q->seq_zones_wlock, args.seq_zones_wlock);
610 swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
611 if (update_driver_data)
612 update_driver_data(disk);
613 ret = 0;
614 } else {
615 pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
616 blk_queue_free_zone_bitmaps(q);
617 }
618 blk_mq_unfreeze_queue(q);
619
620 kfree(args.seq_zones_wlock);
621 kfree(args.conv_zones_bitmap);
622 return ret;
623}
624EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
625
626void blk_queue_clear_zone_settings(struct request_queue *q)
627{
628 blk_mq_freeze_queue(q);
629
630 blk_queue_free_zone_bitmaps(q);
631 blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
632 q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
633 q->nr_zones = 0;
634 q->max_open_zones = 0;
635 q->max_active_zones = 0;
636 q->limits.chunk_sectors = 0;
637 q->limits.zone_write_granularity = 0;
638 q->limits.max_zone_append_sectors = 0;
639
640 blk_mq_unfreeze_queue(q);
641}
642