1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/slab.h>
9
10#include "blk.h"
11
12
13
14
15
16
17
18
19
20
21
22
23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24{
25 return blk_map_queue_find_tag(q->queue_tags, tag);
26}
27EXPORT_SYMBOL(blk_queue_find_tag);
28
29
30
31
32
33
34
35
36void blk_free_tags(struct blk_queue_tag *bqt)
37{
38 if (atomic_dec_and_test(&bqt->refcnt)) {
39 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
40 bqt->max_depth);
41
42 kfree(bqt->tag_index);
43 bqt->tag_index = NULL;
44
45 kfree(bqt->tag_map);
46 bqt->tag_map = NULL;
47
48 kfree(bqt);
49 }
50}
51EXPORT_SYMBOL(blk_free_tags);
52
53
54
55
56
57
58
59
60
61void __blk_queue_free_tags(struct request_queue *q)
62{
63 struct blk_queue_tag *bqt = q->queue_tags;
64
65 if (!bqt)
66 return;
67
68 blk_free_tags(bqt);
69
70 q->queue_tags = NULL;
71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
72}
73
74
75
76
77
78
79
80
81
82void blk_queue_free_tags(struct request_queue *q)
83{
84 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
85}
86EXPORT_SYMBOL(blk_queue_free_tags);
87
88static int
89init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
90{
91 struct request **tag_index;
92 unsigned long *tag_map;
93 int nr_ulongs;
94
95 if (q && depth > q->nr_requests * 2) {
96 depth = q->nr_requests * 2;
97 printk(KERN_ERR "%s: adjusted depth to %d\n",
98 __func__, depth);
99 }
100
101 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
102 if (!tag_index)
103 goto fail;
104
105 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
106 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
107 if (!tag_map)
108 goto fail;
109
110 tags->real_max_depth = depth;
111 tags->max_depth = depth;
112 tags->tag_index = tag_index;
113 tags->tag_map = tag_map;
114
115 return 0;
116fail:
117 kfree(tag_index);
118 return -ENOMEM;
119}
120
121static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
122 int depth, int alloc_policy)
123{
124 struct blk_queue_tag *tags;
125
126 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
127 if (!tags)
128 goto fail;
129
130 if (init_tag_map(q, tags, depth))
131 goto fail;
132
133 atomic_set(&tags->refcnt, 1);
134 tags->alloc_policy = alloc_policy;
135 tags->next_tag = 0;
136 return tags;
137fail:
138 kfree(tags);
139 return NULL;
140}
141
142
143
144
145
146
147struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
148{
149 return __blk_queue_init_tags(NULL, depth, alloc_policy);
150}
151EXPORT_SYMBOL(blk_init_tags);
152
153
154
155
156
157
158
159
160
161
162
163int blk_queue_init_tags(struct request_queue *q, int depth,
164 struct blk_queue_tag *tags, int alloc_policy)
165{
166 int rc;
167
168 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
169
170 if (!tags && !q->queue_tags) {
171 tags = __blk_queue_init_tags(q, depth, alloc_policy);
172
173 if (!tags)
174 return -ENOMEM;
175
176 } else if (q->queue_tags) {
177 rc = blk_queue_resize_tags(q, depth);
178 if (rc)
179 return rc;
180 queue_flag_set(QUEUE_FLAG_QUEUED, q);
181 return 0;
182 } else
183 atomic_inc(&tags->refcnt);
184
185
186
187
188 q->queue_tags = tags;
189 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
190 INIT_LIST_HEAD(&q->tag_busy_list);
191 return 0;
192}
193EXPORT_SYMBOL(blk_queue_init_tags);
194
195
196
197
198
199
200
201
202
203int blk_queue_resize_tags(struct request_queue *q, int new_depth)
204{
205 struct blk_queue_tag *bqt = q->queue_tags;
206 struct request **tag_index;
207 unsigned long *tag_map;
208 int max_depth, nr_ulongs;
209
210 if (!bqt)
211 return -ENXIO;
212
213
214
215
216
217
218
219 if (new_depth <= bqt->real_max_depth) {
220 bqt->max_depth = new_depth;
221 return 0;
222 }
223
224
225
226
227
228 if (atomic_read(&bqt->refcnt) != 1)
229 return -EBUSY;
230
231
232
233
234 tag_index = bqt->tag_index;
235 tag_map = bqt->tag_map;
236 max_depth = bqt->real_max_depth;
237
238 if (init_tag_map(q, bqt, new_depth))
239 return -ENOMEM;
240
241 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
242 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
243 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
244
245 kfree(tag_index);
246 kfree(tag_map);
247 return 0;
248}
249EXPORT_SYMBOL(blk_queue_resize_tags);
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265void blk_queue_end_tag(struct request_queue *q, struct request *rq)
266{
267 struct blk_queue_tag *bqt = q->queue_tags;
268 unsigned tag = rq->tag;
269
270 BUG_ON(tag >= bqt->real_max_depth);
271
272 list_del_init(&rq->queuelist);
273 rq->cmd_flags &= ~REQ_QUEUED;
274 rq->tag = -1;
275
276 if (unlikely(bqt->tag_index[tag] == NULL))
277 printk(KERN_ERR "%s: tag %d is missing\n",
278 __func__, tag);
279
280 bqt->tag_index[tag] = NULL;
281
282 if (unlikely(!test_bit(tag, bqt->tag_map))) {
283 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
284 __func__, tag);
285 return;
286 }
287
288
289
290
291 clear_bit_unlock(tag, bqt->tag_map);
292}
293EXPORT_SYMBOL(blk_queue_end_tag);
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313int blk_queue_start_tag(struct request_queue *q, struct request *rq)
314{
315 struct blk_queue_tag *bqt = q->queue_tags;
316 unsigned max_depth;
317 int tag;
318
319 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
320 printk(KERN_ERR
321 "%s: request %p for device [%s] already tagged %d",
322 __func__, rq,
323 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
324 BUG();
325 }
326
327
328
329
330
331
332
333
334 max_depth = bqt->max_depth;
335 if (!rq_is_sync(rq) && max_depth > 1) {
336 switch (max_depth) {
337 case 2:
338 max_depth = 1;
339 break;
340 case 3:
341 max_depth = 2;
342 break;
343 default:
344 max_depth -= 2;
345 }
346 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
347 return 1;
348 }
349
350 do {
351 if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
352 tag = find_first_zero_bit(bqt->tag_map, max_depth);
353 if (tag >= max_depth)
354 return 1;
355 } else {
356 int start = bqt->next_tag;
357 int size = min_t(int, bqt->max_depth, max_depth + start);
358 tag = find_next_zero_bit(bqt->tag_map, size, start);
359 if (tag >= size && start + size > bqt->max_depth) {
360 size = start + size - bqt->max_depth;
361 tag = find_first_zero_bit(bqt->tag_map, size);
362 }
363 if (tag >= size)
364 return 1;
365 }
366
367 } while (test_and_set_bit_lock(tag, bqt->tag_map));
368
369
370
371
372
373 bqt->next_tag = (tag + 1) % bqt->max_depth;
374 rq->cmd_flags |= REQ_QUEUED;
375 rq->tag = tag;
376 bqt->tag_index[tag] = rq;
377 blk_start_request(rq);
378 list_add(&rq->queuelist, &q->tag_busy_list);
379 return 0;
380}
381EXPORT_SYMBOL(blk_queue_start_tag);
382
383
384
385
386
387
388
389
390
391
392
393
394
395void blk_queue_invalidate_tags(struct request_queue *q)
396{
397 struct list_head *tmp, *n;
398
399 list_for_each_safe(tmp, n, &q->tag_busy_list)
400 blk_requeue_request(q, list_entry_rq(tmp));
401}
402EXPORT_SYMBOL(blk_queue_invalidate_tags);
403