1
2
3
4
5
6
7
8
9
10
11#include <linux/blkdev.h>
12#include <linux/vmalloc.h>
13#include <linux/sched/mm.h>
14#include <linux/mutex.h>
15
16#include <asm/unaligned.h>
17
18#include <scsi/scsi.h>
19#include <scsi/scsi_cmnd.h>
20
21#include "sd.h"
22
23static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
24{
25 if (zone->type == ZBC_ZONE_TYPE_CONV)
26 return 0;
27
28 switch (zone->cond) {
29 case BLK_ZONE_COND_IMP_OPEN:
30 case BLK_ZONE_COND_EXP_OPEN:
31 case BLK_ZONE_COND_CLOSED:
32 return zone->wp - zone->start;
33 case BLK_ZONE_COND_FULL:
34 return zone->len;
35 case BLK_ZONE_COND_EMPTY:
36 case BLK_ZONE_COND_OFFLINE:
37 case BLK_ZONE_COND_READONLY:
38 default:
39
40
41
42
43 return 0;
44 }
45}
46
47static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
48 unsigned int idx, report_zones_cb cb, void *data)
49{
50 struct scsi_device *sdp = sdkp->device;
51 struct blk_zone zone = { 0 };
52 int ret;
53
54 zone.type = buf[0] & 0x0f;
55 zone.cond = (buf[1] >> 4) & 0xf;
56 if (buf[1] & 0x01)
57 zone.reset = 1;
58 if (buf[1] & 0x02)
59 zone.non_seq = 1;
60
61 zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
62 zone.capacity = zone.len;
63 zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
64 zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
65 if (zone.type != ZBC_ZONE_TYPE_CONV &&
66 zone.cond == ZBC_ZONE_COND_FULL)
67 zone.wp = zone.start + zone.len;
68
69 ret = cb(&zone, idx, data);
70 if (ret)
71 return ret;
72
73 if (sdkp->rev_wp_offset)
74 sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone);
75
76 return 0;
77}
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
94 unsigned int buflen, sector_t lba,
95 bool partial)
96{
97 struct scsi_device *sdp = sdkp->device;
98 const int timeout = sdp->request_queue->rq_timeout;
99 struct scsi_sense_hdr sshdr;
100 unsigned char cmd[16];
101 unsigned int rep_len;
102 int result;
103
104 memset(cmd, 0, 16);
105 cmd[0] = ZBC_IN;
106 cmd[1] = ZI_REPORT_ZONES;
107 put_unaligned_be64(lba, &cmd[2]);
108 put_unaligned_be32(buflen, &cmd[10]);
109 if (partial)
110 cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
111
112 result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
113 buf, buflen, &sshdr,
114 timeout, SD_MAX_RETRIES, NULL);
115 if (result) {
116 sd_printk(KERN_ERR, sdkp,
117 "REPORT ZONES start lba %llu failed\n", lba);
118 sd_print_result(sdkp, "REPORT ZONES", result);
119 if (result > 0 && scsi_sense_valid(&sshdr))
120 sd_print_sense_hdr(sdkp, &sshdr);
121 return -EIO;
122 }
123
124 rep_len = get_unaligned_be32(&buf[0]);
125 if (rep_len < 64) {
126 sd_printk(KERN_ERR, sdkp,
127 "REPORT ZONES report invalid length %u\n",
128 rep_len);
129 return -EIO;
130 }
131
132 return 0;
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
149 unsigned int nr_zones, size_t *buflen)
150{
151 struct request_queue *q = sdkp->disk->queue;
152 size_t bufsize;
153 void *buf;
154
155
156
157
158
159
160
161
162
163
164 nr_zones = min(nr_zones, sdkp->nr_zones);
165 bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
166 bufsize = min_t(size_t, bufsize,
167 queue_max_hw_sectors(q) << SECTOR_SHIFT);
168 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
169
170 while (bufsize >= SECTOR_SIZE) {
171 buf = __vmalloc(bufsize,
172 GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
173 if (buf) {
174 *buflen = bufsize;
175 return buf;
176 }
177 bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
178 }
179
180 return NULL;
181}
182
183
184
185
186
187static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
188{
189 return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
190}
191
192int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
193 unsigned int nr_zones, report_zones_cb cb, void *data)
194{
195 struct scsi_disk *sdkp = scsi_disk(disk);
196 sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
197 unsigned int nr, i;
198 unsigned char *buf;
199 size_t offset, buflen = 0;
200 int zone_idx = 0;
201 int ret;
202
203 if (!sd_is_zoned(sdkp))
204
205 return -EOPNOTSUPP;
206
207 if (!capacity)
208
209 return -ENODEV;
210
211 buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
212 if (!buf)
213 return -ENOMEM;
214
215 while (zone_idx < nr_zones && sector < capacity) {
216 ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
217 sectors_to_logical(sdkp->device, sector), true);
218 if (ret)
219 goto out;
220
221 offset = 0;
222 nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
223 if (!nr)
224 break;
225
226 for (i = 0; i < nr && zone_idx < nr_zones; i++) {
227 offset += 64;
228 ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
229 cb, data);
230 if (ret)
231 goto out;
232 zone_idx++;
233 }
234
235 sector += sd_zbc_zone_sectors(sdkp) * i;
236 }
237
238 ret = zone_idx;
239out:
240 kvfree(buf);
241 return ret;
242}
243
244static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
245{
246 struct request *rq = scsi_cmd_to_rq(cmd);
247 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
248 sector_t sector = blk_rq_pos(rq);
249
250 if (!sd_is_zoned(sdkp))
251
252 return BLK_STS_IOERR;
253
254 if (sdkp->device->changed)
255 return BLK_STS_IOERR;
256
257 if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
258
259 return BLK_STS_IOERR;
260
261 return BLK_STS_OK;
262}
263
264#define SD_ZBC_INVALID_WP_OFST (~0u)
265#define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1)
266
267static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
268 void *data)
269{
270 struct scsi_disk *sdkp = data;
271
272 lockdep_assert_held(&sdkp->zones_wp_offset_lock);
273
274 sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone);
275
276 return 0;
277}
278
279static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
280{
281 struct scsi_disk *sdkp;
282 unsigned long flags;
283 sector_t zno;
284 int ret;
285
286 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
287
288 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
289 for (zno = 0; zno < sdkp->nr_zones; zno++) {
290 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
291 continue;
292
293 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
294 ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
295 SD_BUF_SIZE,
296 zno * sdkp->zone_blocks, true);
297 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
298 if (!ret)
299 sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
300 zno, sd_zbc_update_wp_offset_cb,
301 sdkp);
302 }
303 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
304
305 scsi_device_put(sdkp->device);
306}
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
322 unsigned int nr_blocks)
323{
324 struct request *rq = scsi_cmd_to_rq(cmd);
325 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
326 unsigned int wp_offset, zno = blk_rq_zone_no(rq);
327 unsigned long flags;
328 blk_status_t ret;
329
330 ret = sd_zbc_cmnd_checks(cmd);
331 if (ret != BLK_STS_OK)
332 return ret;
333
334 if (!blk_rq_zone_is_seq(rq))
335 return BLK_STS_IOERR;
336
337
338 if (!blk_req_zone_write_trylock(rq))
339 return BLK_STS_ZONE_RESOURCE;
340
341 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
342 wp_offset = sdkp->zones_wp_offset[zno];
343 switch (wp_offset) {
344 case SD_ZBC_INVALID_WP_OFST:
345
346
347
348
349
350
351 if (scsi_device_get(sdkp->device)) {
352 ret = BLK_STS_IOERR;
353 break;
354 }
355 sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST;
356 schedule_work(&sdkp->zone_wp_offset_work);
357 fallthrough;
358 case SD_ZBC_UPDATING_WP_OFST:
359 ret = BLK_STS_DEV_RESOURCE;
360 break;
361 default:
362 wp_offset = sectors_to_logical(sdkp->device, wp_offset);
363 if (wp_offset + nr_blocks > sdkp->zone_blocks) {
364 ret = BLK_STS_IOERR;
365 break;
366 }
367
368 *lba += wp_offset;
369 }
370 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
371 if (ret)
372 blk_req_zone_write_unlock(rq);
373 return ret;
374}
375
376
377
378
379
380
381
382
383
384
385
386blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
387 unsigned char op, bool all)
388{
389 struct request *rq = scsi_cmd_to_rq(cmd);
390 sector_t sector = blk_rq_pos(rq);
391 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
392 sector_t block = sectors_to_logical(sdkp->device, sector);
393 blk_status_t ret;
394
395 ret = sd_zbc_cmnd_checks(cmd);
396 if (ret != BLK_STS_OK)
397 return ret;
398
399 cmd->cmd_len = 16;
400 memset(cmd->cmnd, 0, cmd->cmd_len);
401 cmd->cmnd[0] = ZBC_OUT;
402 cmd->cmnd[1] = op;
403 if (all)
404 cmd->cmnd[14] = 0x1;
405 else
406 put_unaligned_be64(block, &cmd->cmnd[2]);
407
408 rq->timeout = SD_TIMEOUT;
409 cmd->sc_data_direction = DMA_NONE;
410 cmd->transfersize = 0;
411 cmd->allowed = 0;
412
413 return BLK_STS_OK;
414}
415
416static bool sd_zbc_need_zone_wp_update(struct request *rq)
417{
418 switch (req_op(rq)) {
419 case REQ_OP_ZONE_APPEND:
420 case REQ_OP_ZONE_FINISH:
421 case REQ_OP_ZONE_RESET:
422 case REQ_OP_ZONE_RESET_ALL:
423 return true;
424 case REQ_OP_WRITE:
425 case REQ_OP_WRITE_ZEROES:
426 case REQ_OP_WRITE_SAME:
427 return blk_rq_zone_is_seq(rq);
428 default:
429 return false;
430 }
431}
432
433
434
435
436
437
438
439
440
441static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
442 unsigned int good_bytes)
443{
444 int result = cmd->result;
445 struct request *rq = scsi_cmd_to_rq(cmd);
446 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
447 unsigned int zno = blk_rq_zone_no(rq);
448 enum req_opf op = req_op(rq);
449 unsigned long flags;
450
451
452
453
454
455
456
457 spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
458
459 if (result && op != REQ_OP_ZONE_RESET_ALL) {
460 if (op == REQ_OP_ZONE_APPEND) {
461
462 good_bytes = 0;
463 scsi_set_resid(cmd, blk_rq_bytes(rq));
464 }
465
466
467
468
469
470 if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
471 sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST;
472 goto unlock_wp_offset;
473 }
474
475 switch (op) {
476 case REQ_OP_ZONE_APPEND:
477 rq->__sector += sdkp->zones_wp_offset[zno];
478 fallthrough;
479 case REQ_OP_WRITE_ZEROES:
480 case REQ_OP_WRITE_SAME:
481 case REQ_OP_WRITE:
482 if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
483 sdkp->zones_wp_offset[zno] +=
484 good_bytes >> SECTOR_SHIFT;
485 break;
486 case REQ_OP_ZONE_RESET:
487 sdkp->zones_wp_offset[zno] = 0;
488 break;
489 case REQ_OP_ZONE_FINISH:
490 sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp);
491 break;
492 case REQ_OP_ZONE_RESET_ALL:
493 memset(sdkp->zones_wp_offset, 0,
494 sdkp->nr_zones * sizeof(unsigned int));
495 break;
496 default:
497 break;
498 }
499
500unlock_wp_offset:
501 spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
502
503 return good_bytes;
504}
505
506
507
508
509
510
511
512
513
514
515unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
516 struct scsi_sense_hdr *sshdr)
517{
518 int result = cmd->result;
519 struct request *rq = scsi_cmd_to_rq(cmd);
520
521 if (op_is_zone_mgmt(req_op(rq)) &&
522 result &&
523 sshdr->sense_key == ILLEGAL_REQUEST &&
524 sshdr->asc == 0x24) {
525
526
527
528
529
530 rq->rq_flags |= RQF_QUIET;
531 } else if (sd_zbc_need_zone_wp_update(rq))
532 good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes);
533
534 if (req_op(rq) == REQ_OP_ZONE_APPEND)
535 blk_req_zone_write_unlock(rq);
536
537 return good_bytes;
538}
539
540
541
542
543
544
545
546
547static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
548 unsigned char *buf)
549{
550
551 if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
552 sd_printk(KERN_NOTICE, sdkp,
553 "Read zoned characteristics VPD page failed\n");
554 return -ENODEV;
555 }
556
557 if (sdkp->device->type != TYPE_ZBC) {
558
559 sdkp->urswrz = 1;
560 sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
561 sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
562 sdkp->zones_max_open = 0;
563 } else {
564
565 sdkp->urswrz = buf[4] & 1;
566 sdkp->zones_optimal_open = 0;
567 sdkp->zones_optimal_nonseq = 0;
568 sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
569 }
570
571
572
573
574
575
576 if (!sdkp->urswrz) {
577 if (sdkp->first_scan)
578 sd_printk(KERN_NOTICE, sdkp,
579 "constrained reads devices are not supported\n");
580 return -ENODEV;
581 }
582
583 return 0;
584}
585
586
587
588
589
590
591
592
593
594
595
596
597
598static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
599 u32 *zblocks)
600{
601 u64 zone_blocks;
602 sector_t max_lba;
603 unsigned char *rec;
604 int ret;
605
606
607 ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
608 if (ret)
609 return ret;
610
611 if (sdkp->rc_basis == 0) {
612
613 max_lba = get_unaligned_be64(&buf[8]);
614 if (sdkp->capacity != max_lba + 1) {
615 if (sdkp->first_scan)
616 sd_printk(KERN_WARNING, sdkp,
617 "Changing capacity from %llu to max LBA+1 %llu\n",
618 (unsigned long long)sdkp->capacity,
619 (unsigned long long)max_lba + 1);
620 sdkp->capacity = max_lba + 1;
621 }
622 }
623
624
625 rec = buf + 64;
626 zone_blocks = get_unaligned_be64(&rec[8]);
627 if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
628 if (sdkp->first_scan)
629 sd_printk(KERN_NOTICE, sdkp,
630 "Zone size too large\n");
631 return -EFBIG;
632 }
633
634 *zblocks = zone_blocks;
635
636 return 0;
637}
638
639static void sd_zbc_print_zones(struct scsi_disk *sdkp)
640{
641 if (!sd_is_zoned(sdkp) || !sdkp->capacity)
642 return;
643
644 if (sdkp->capacity & (sdkp->zone_blocks - 1))
645 sd_printk(KERN_NOTICE, sdkp,
646 "%u zones of %u logical blocks + 1 runt zone\n",
647 sdkp->nr_zones - 1,
648 sdkp->zone_blocks);
649 else
650 sd_printk(KERN_NOTICE, sdkp,
651 "%u zones of %u logical blocks\n",
652 sdkp->nr_zones,
653 sdkp->zone_blocks);
654}
655
656static int sd_zbc_init_disk(struct scsi_disk *sdkp)
657{
658 sdkp->zones_wp_offset = NULL;
659 spin_lock_init(&sdkp->zones_wp_offset_lock);
660 sdkp->rev_wp_offset = NULL;
661 mutex_init(&sdkp->rev_mutex);
662 INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
663 sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
664 if (!sdkp->zone_wp_update_buf)
665 return -ENOMEM;
666
667 return 0;
668}
669
670static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
671{
672
673 mutex_lock(&sdkp->rev_mutex);
674
675 kvfree(sdkp->zones_wp_offset);
676 sdkp->zones_wp_offset = NULL;
677 kfree(sdkp->zone_wp_update_buf);
678 sdkp->zone_wp_update_buf = NULL;
679
680 sdkp->nr_zones = 0;
681 sdkp->rev_nr_zones = 0;
682 sdkp->zone_blocks = 0;
683 sdkp->rev_zone_blocks = 0;
684
685 mutex_unlock(&sdkp->rev_mutex);
686}
687
688void sd_zbc_release_disk(struct scsi_disk *sdkp)
689{
690 if (sd_is_zoned(sdkp))
691 sd_zbc_clear_zone_info(sdkp);
692}
693
694static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
695{
696 struct scsi_disk *sdkp = scsi_disk(disk);
697
698 swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
699}
700
701int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
702{
703 struct gendisk *disk = sdkp->disk;
704 struct request_queue *q = disk->queue;
705 u32 zone_blocks = sdkp->rev_zone_blocks;
706 unsigned int nr_zones = sdkp->rev_nr_zones;
707 u32 max_append;
708 int ret = 0;
709 unsigned int flags;
710
711
712
713
714
715
716
717
718 if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
719 ret = sd_zbc_init_disk(sdkp);
720 if (ret)
721 return ret;
722 }
723
724
725
726
727
728 if (!blk_queue_is_zoned(q))
729 return 0;
730
731
732
733
734
735 mutex_lock(&sdkp->rev_mutex);
736
737 if (sdkp->zone_blocks == zone_blocks &&
738 sdkp->nr_zones == nr_zones &&
739 disk->queue->nr_zones == nr_zones)
740 goto unlock;
741
742 flags = memalloc_noio_save();
743 sdkp->zone_blocks = zone_blocks;
744 sdkp->nr_zones = nr_zones;
745 sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
746 if (!sdkp->rev_wp_offset) {
747 ret = -ENOMEM;
748 memalloc_noio_restore(flags);
749 goto unlock;
750 }
751
752 ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
753
754 memalloc_noio_restore(flags);
755 kvfree(sdkp->rev_wp_offset);
756 sdkp->rev_wp_offset = NULL;
757
758 if (ret) {
759 sdkp->zone_blocks = 0;
760 sdkp->nr_zones = 0;
761 sdkp->capacity = 0;
762 goto unlock;
763 }
764
765 max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
766 q->limits.max_segments << (PAGE_SHIFT - 9));
767 max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
768
769 blk_queue_max_zone_append_sectors(q, max_append);
770
771 sd_zbc_print_zones(sdkp);
772
773unlock:
774 mutex_unlock(&sdkp->rev_mutex);
775
776 return ret;
777}
778
779int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
780{
781 struct gendisk *disk = sdkp->disk;
782 struct request_queue *q = disk->queue;
783 unsigned int nr_zones;
784 u32 zone_blocks = 0;
785 int ret;
786
787 if (!sd_is_zoned(sdkp))
788
789
790
791
792 return 0;
793
794
795 sdkp->device->use_16_for_rw = 1;
796 sdkp->device->use_10_for_rw = 0;
797
798 if (!blk_queue_is_zoned(q)) {
799
800
801
802
803
804
805 sd_zbc_clear_zone_info(sdkp);
806 return 0;
807 }
808
809
810 ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
811 if (ret)
812 goto err;
813
814
815 ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks);
816 if (ret != 0)
817 goto err;
818
819
820 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
821 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
822 if (sdkp->zones_max_open == U32_MAX)
823 blk_queue_max_open_zones(q, 0);
824 else
825 blk_queue_max_open_zones(q, sdkp->zones_max_open);
826 blk_queue_max_active_zones(q, 0);
827 nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
828
829
830
831
832
833
834 if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
835 blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
836
837 sdkp->rev_nr_zones = nr_zones;
838 sdkp->rev_zone_blocks = zone_blocks;
839
840 return 0;
841
842err:
843 sdkp->capacity = 0;
844
845 return ret;
846}
847