1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12struct bio_batch {
13 atomic_t done;
14 unsigned long flags;
15 struct completion *wait;
16};
17
18static void bio_batch_end_io(struct bio *bio, int err)
19{
20 struct bio_batch *bb = bio->bi_private;
21
22 if (err && (err != -EOPNOTSUPP))
23 clear_bit(BIO_UPTODATE, &bb->flags);
24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
26 bio_put(bio);
27}
28
29
30
31
32
33
34
35
36
37
38
39
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors, granularity;
47 int alignment;
48 struct bio_batch bb;
49 struct bio *bio;
50 int ret = 0;
51 struct blk_plug plug;
52
53 if (!q)
54 return -ENXIO;
55
56 if (!blk_queue_discard(q))
57 return -EOPNOTSUPP;
58
59
60 granularity = max(q->limits.discard_granularity >> 9, 1U);
61 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
62
63
64
65
66
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors -= max_discard_sectors % granularity;
69 if (unlikely(!max_discard_sectors)) {
70
71 return -EOPNOTSUPP;
72 }
73
74 if (flags & BLKDEV_DISCARD_SECURE) {
75 if (!blk_queue_secdiscard(q))
76 return -EOPNOTSUPP;
77 type |= REQ_SECURE;
78 }
79
80 atomic_set(&bb.done, 1);
81 bb.flags = 1 << BIO_UPTODATE;
82 bb.wait = &wait;
83
84 blk_start_plug(&plug);
85 while (nr_sects) {
86 unsigned int req_sects;
87 sector_t end_sect, tmp;
88
89 bio = bio_alloc(gfp_mask, 1);
90 if (!bio) {
91 ret = -ENOMEM;
92 break;
93 }
94
95 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
96
97
98
99
100
101 end_sect = sector + req_sects;
102 tmp = end_sect;
103 if (req_sects < nr_sects &&
104 sector_div(tmp, granularity) != alignment) {
105 end_sect = end_sect - alignment;
106 sector_div(end_sect, granularity);
107 end_sect = end_sect * granularity + alignment;
108 req_sects = end_sect - sector;
109 }
110
111 bio->bi_sector = sector;
112 bio->bi_end_io = bio_batch_end_io;
113 bio->bi_bdev = bdev;
114 bio->bi_private = &bb;
115
116 bio->bi_size = req_sects << 9;
117 nr_sects -= req_sects;
118 sector = end_sect;
119
120 atomic_inc(&bb.done);
121 submit_bio(type, bio);
122
123
124
125
126
127
128
129 cond_resched();
130 }
131 blk_finish_plug(&plug);
132
133
134 if (!atomic_dec_and_test(&bb.done))
135 wait_for_completion_io(&wait);
136
137 if (!test_bit(BIO_UPTODATE, &bb.flags))
138 ret = -EIO;
139
140 return ret;
141}
142EXPORT_SYMBOL(blkdev_issue_discard);
143
144
145
146
147
148
149
150
151
152
153
154
155int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
156 sector_t nr_sects, gfp_t gfp_mask,
157 struct page *page)
158{
159 DECLARE_COMPLETION_ONSTACK(wait);
160 struct request_queue *q = bdev_get_queue(bdev);
161 unsigned int max_write_same_sectors;
162 struct bio_batch bb;
163 struct bio *bio;
164 int ret = 0;
165
166 if (!q)
167 return -ENXIO;
168
169 max_write_same_sectors = q->limits.max_write_same_sectors;
170
171 if (max_write_same_sectors == 0)
172 return -EOPNOTSUPP;
173
174 atomic_set(&bb.done, 1);
175 bb.flags = 1 << BIO_UPTODATE;
176 bb.wait = &wait;
177
178 while (nr_sects) {
179 bio = bio_alloc(gfp_mask, 1);
180 if (!bio) {
181 ret = -ENOMEM;
182 break;
183 }
184
185 bio->bi_sector = sector;
186 bio->bi_end_io = bio_batch_end_io;
187 bio->bi_bdev = bdev;
188 bio->bi_private = &bb;
189 bio->bi_vcnt = 1;
190 bio->bi_io_vec->bv_page = page;
191 bio->bi_io_vec->bv_offset = 0;
192 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
193
194 if (nr_sects > max_write_same_sectors) {
195 bio->bi_size = max_write_same_sectors << 9;
196 nr_sects -= max_write_same_sectors;
197 sector += max_write_same_sectors;
198 } else {
199 bio->bi_size = nr_sects << 9;
200 nr_sects = 0;
201 }
202
203 atomic_inc(&bb.done);
204 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
205 }
206
207
208 if (!atomic_dec_and_test(&bb.done))
209 wait_for_completion_io(&wait);
210
211 if (!test_bit(BIO_UPTODATE, &bb.flags))
212 ret = -ENOTSUPP;
213
214 return ret;
215}
216EXPORT_SYMBOL(blkdev_issue_write_same);
217
218
219
220
221
222
223
224
225
226
227
228
229static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
230 sector_t nr_sects, gfp_t gfp_mask)
231{
232 int ret;
233 struct bio *bio;
234 struct bio_batch bb;
235 unsigned int sz;
236 DECLARE_COMPLETION_ONSTACK(wait);
237
238 atomic_set(&bb.done, 1);
239 bb.flags = 1 << BIO_UPTODATE;
240 bb.wait = &wait;
241
242 ret = 0;
243 while (nr_sects != 0) {
244 bio = bio_alloc(gfp_mask,
245 min(nr_sects, (sector_t)BIO_MAX_PAGES));
246 if (!bio) {
247 ret = -ENOMEM;
248 break;
249 }
250
251 bio->bi_sector = sector;
252 bio->bi_bdev = bdev;
253 bio->bi_end_io = bio_batch_end_io;
254 bio->bi_private = &bb;
255
256 while (nr_sects != 0) {
257 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
258 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
259 nr_sects -= ret >> 9;
260 sector += ret >> 9;
261 if (ret < (sz << 9))
262 break;
263 }
264 ret = 0;
265 atomic_inc(&bb.done);
266 submit_bio(WRITE, bio);
267 }
268
269
270 if (!atomic_dec_and_test(&bb.done))
271 wait_for_completion_io(&wait);
272
273 if (!test_bit(BIO_UPTODATE, &bb.flags))
274
275 ret = -EIO;
276
277 return ret;
278}
279
280
281
282
283
284
285
286
287
288
289
290
291int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
292 sector_t nr_sects, gfp_t gfp_mask)
293{
294 if (bdev_write_same(bdev)) {
295 unsigned char bdn[BDEVNAME_SIZE];
296
297 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
298 ZERO_PAGE(0)))
299 return 0;
300
301 bdevname(bdev, bdn);
302 pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
303 }
304
305 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
306}
307EXPORT_SYMBOL(blkdev_issue_zeroout);
308