1
2
3
4
5#include <linux/kernel.h>
6#include <linux/sched/task_stack.h>
7#include <linux/module.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/uio.h>
11
12#include "blk.h"
13
14
15
16
17
18int blk_rq_append_bio(struct request *rq, struct bio *bio)
19{
20 blk_queue_bounce(rq->q, &bio);
21
22 if (!rq->bio) {
23 blk_rq_bio_prep(rq->q, rq, bio);
24 } else {
25 if (!ll_back_merge_fn(rq->q, rq, bio))
26 return -EINVAL;
27
28 rq->biotail->bi_next = bio;
29 rq->biotail = bio;
30 rq->__data_len += bio->bi_iter.bi_size;
31 }
32
33 return 0;
34}
35EXPORT_SYMBOL(blk_rq_append_bio);
36
37static int __blk_rq_unmap_user(struct bio *bio)
38{
39 int ret = 0;
40
41 if (bio) {
42 if (bio_flagged(bio, BIO_USER_MAPPED))
43 bio_unmap_user(bio);
44 else
45 ret = bio_uncopy_user(bio);
46 }
47
48 return ret;
49}
50
51static int __blk_rq_map_user_iov(struct request *rq,
52 struct rq_map_data *map_data, struct iov_iter *iter,
53 gfp_t gfp_mask, bool copy)
54{
55 struct request_queue *q = rq->q;
56 struct bio *bio, *orig_bio;
57 int ret;
58
59 if (copy)
60 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
61 else
62 bio = bio_map_user_iov(q, iter, gfp_mask);
63
64 if (IS_ERR(bio))
65 return PTR_ERR(bio);
66
67 bio->bi_opf &= ~REQ_OP_MASK;
68 bio->bi_opf |= req_op(rq);
69
70 if (map_data && map_data->null_mapped)
71 bio_set_flag(bio, BIO_NULL_MAPPED);
72
73 iov_iter_advance(iter, bio->bi_iter.bi_size);
74 if (map_data)
75 map_data->offset += bio->bi_iter.bi_size;
76
77 orig_bio = bio;
78
79
80
81
82
83 ret = blk_rq_append_bio(rq, bio);
84 bio_get(bio);
85 if (ret) {
86 bio_endio(bio);
87 __blk_rq_unmap_user(orig_bio);
88 bio_put(bio);
89 return ret;
90 }
91
92 return 0;
93}
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
117 struct rq_map_data *map_data,
118 const struct iov_iter *iter, gfp_t gfp_mask)
119{
120 bool copy = false;
121 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
122 struct bio *bio = NULL;
123 struct iov_iter i;
124 int ret;
125
126 if (!iter_is_iovec(iter))
127 goto fail;
128
129 if (map_data)
130 copy = true;
131 else if (iov_iter_alignment(iter) & align)
132 copy = true;
133 else if (queue_virt_boundary(q))
134 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
135
136 i = *iter;
137 do {
138 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
139 if (ret)
140 goto unmap_rq;
141 if (!bio)
142 bio = rq->bio;
143 } while (iov_iter_count(&i));
144
145 if (!bio_flagged(bio, BIO_USER_MAPPED))
146 rq->rq_flags |= RQF_COPY_USER;
147 return 0;
148
149unmap_rq:
150 __blk_rq_unmap_user(bio);
151fail:
152 rq->bio = NULL;
153 return -EINVAL;
154}
155EXPORT_SYMBOL(blk_rq_map_user_iov);
156
157int blk_rq_map_user(struct request_queue *q, struct request *rq,
158 struct rq_map_data *map_data, void __user *ubuf,
159 unsigned long len, gfp_t gfp_mask)
160{
161 struct iovec iov;
162 struct iov_iter i;
163 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
164
165 if (unlikely(ret < 0))
166 return ret;
167
168 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
169}
170EXPORT_SYMBOL(blk_rq_map_user);
171
172
173
174
175
176
177
178
179
180
181int blk_rq_unmap_user(struct bio *bio)
182{
183 struct bio *mapped_bio;
184 int ret = 0, ret2;
185
186 while (bio) {
187 mapped_bio = bio;
188 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
189 mapped_bio = bio->bi_private;
190
191 ret2 = __blk_rq_unmap_user(mapped_bio);
192 if (ret2 && !ret)
193 ret = ret2;
194
195 mapped_bio = bio;
196 bio = bio->bi_next;
197 bio_put(mapped_bio);
198 }
199
200 return ret;
201}
202EXPORT_SYMBOL(blk_rq_unmap_user);
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
218 unsigned int len, gfp_t gfp_mask)
219{
220 int reading = rq_data_dir(rq) == READ;
221 unsigned long addr = (unsigned long) kbuf;
222 int do_copy = 0;
223 struct bio *bio;
224 int ret;
225
226 if (len > (queue_max_hw_sectors(q) << 9))
227 return -EINVAL;
228 if (!len || !kbuf)
229 return -EINVAL;
230
231 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
232 if (do_copy)
233 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
234 else
235 bio = bio_map_kern(q, kbuf, len, gfp_mask);
236
237 if (IS_ERR(bio))
238 return PTR_ERR(bio);
239
240 bio->bi_opf &= ~REQ_OP_MASK;
241 bio->bi_opf |= req_op(rq);
242
243 if (do_copy)
244 rq->rq_flags |= RQF_COPY_USER;
245
246 ret = blk_rq_append_bio(rq, bio);
247 if (unlikely(ret)) {
248
249 bio_put(bio);
250 return ret;
251 }
252
253 return 0;
254}
255EXPORT_SYMBOL(blk_rq_map_kern);
256