1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#include <linux/device-mapper.h>
17
18#define DM_MSG_PREFIX "delay"
19
20struct delay_class {
21 struct dm_dev *dev;
22 sector_t start;
23 unsigned delay;
24 unsigned ops;
25};
26
27struct delay_c {
28 struct timer_list delay_timer;
29 struct mutex timer_lock;
30 struct workqueue_struct *kdelayd_wq;
31 struct work_struct flush_expired_bios;
32 struct list_head delayed_bios;
33 atomic_t may_delay;
34
35 struct delay_class read;
36 struct delay_class write;
37 struct delay_class flush;
38
39 int argc;
40};
41
42struct dm_delay_info {
43 struct delay_c *context;
44 struct delay_class *class;
45 struct list_head list;
46 unsigned long expires;
47};
48
49static DEFINE_MUTEX(delayed_bios_lock);
50
51static void handle_delayed_timer(struct timer_list *t)
52{
53 struct delay_c *dc = from_timer(dc, t, delay_timer);
54
55 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
56}
57
58static void queue_timeout(struct delay_c *dc, unsigned long expires)
59{
60 mutex_lock(&dc->timer_lock);
61
62 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
63 mod_timer(&dc->delay_timer, expires);
64
65 mutex_unlock(&dc->timer_lock);
66}
67
68static void flush_bios(struct bio *bio)
69{
70 struct bio *n;
71
72 while (bio) {
73 n = bio->bi_next;
74 bio->bi_next = NULL;
75 generic_make_request(bio);
76 bio = n;
77 }
78}
79
80static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
81{
82 struct dm_delay_info *delayed, *next;
83 unsigned long next_expires = 0;
84 unsigned long start_timer = 0;
85 struct bio_list flush_bios = { };
86
87 mutex_lock(&delayed_bios_lock);
88 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
89 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
90 struct bio *bio = dm_bio_from_per_bio_data(delayed,
91 sizeof(struct dm_delay_info));
92 list_del(&delayed->list);
93 bio_list_add(&flush_bios, bio);
94 delayed->class->ops--;
95 continue;
96 }
97
98 if (!start_timer) {
99 start_timer = 1;
100 next_expires = delayed->expires;
101 } else
102 next_expires = min(next_expires, delayed->expires);
103 }
104 mutex_unlock(&delayed_bios_lock);
105
106 if (start_timer)
107 queue_timeout(dc, next_expires);
108
109 return bio_list_get(&flush_bios);
110}
111
112static void flush_expired_bios(struct work_struct *work)
113{
114 struct delay_c *dc;
115
116 dc = container_of(work, struct delay_c, flush_expired_bios);
117 flush_bios(flush_delayed_bios(dc, 0));
118}
119
120static void delay_dtr(struct dm_target *ti)
121{
122 struct delay_c *dc = ti->private;
123
124 destroy_workqueue(dc->kdelayd_wq);
125
126 if (dc->read.dev)
127 dm_put_device(ti, dc->read.dev);
128 if (dc->write.dev)
129 dm_put_device(ti, dc->write.dev);
130 if (dc->flush.dev)
131 dm_put_device(ti, dc->flush.dev);
132
133 mutex_destroy(&dc->timer_lock);
134
135 kfree(dc);
136}
137
138static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv)
139{
140 int ret;
141 unsigned long long tmpll;
142 char dummy;
143
144 if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
145 ti->error = "Invalid device sector";
146 return -EINVAL;
147 }
148 c->start = tmpll;
149
150 if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) {
151 ti->error = "Invalid delay";
152 return -EINVAL;
153 }
154
155 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev);
156 if (ret) {
157 ti->error = "Device lookup failed";
158 return ret;
159 }
160
161 return 0;
162}
163
164
165
166
167
168
169
170
171
172static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
173{
174 struct delay_c *dc;
175 int ret;
176
177 if (argc != 3 && argc != 6 && argc != 9) {
178 ti->error = "Requires exactly 3, 6 or 9 arguments";
179 return -EINVAL;
180 }
181
182 dc = kzalloc(sizeof(*dc), GFP_KERNEL);
183 if (!dc) {
184 ti->error = "Cannot allocate context";
185 return -ENOMEM;
186 }
187
188 ti->private = dc;
189 timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
190 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
191 INIT_LIST_HEAD(&dc->delayed_bios);
192 mutex_init(&dc->timer_lock);
193 atomic_set(&dc->may_delay, 1);
194 dc->argc = argc;
195
196 ret = delay_class_ctr(ti, &dc->read, argv);
197 if (ret)
198 goto bad;
199
200 if (argc == 3) {
201 ret = delay_class_ctr(ti, &dc->write, argv);
202 if (ret)
203 goto bad;
204 ret = delay_class_ctr(ti, &dc->flush, argv);
205 if (ret)
206 goto bad;
207 goto out;
208 }
209
210 ret = delay_class_ctr(ti, &dc->write, argv + 3);
211 if (ret)
212 goto bad;
213 if (argc == 6) {
214 ret = delay_class_ctr(ti, &dc->flush, argv + 3);
215 if (ret)
216 goto bad;
217 goto out;
218 }
219
220 ret = delay_class_ctr(ti, &dc->flush, argv + 6);
221 if (ret)
222 goto bad;
223
224out:
225 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
226 if (!dc->kdelayd_wq) {
227 ret = -EINVAL;
228 DMERR("Couldn't start kdelayd");
229 goto bad;
230 }
231
232 ti->num_flush_bios = 1;
233 ti->num_discard_bios = 1;
234 ti->per_io_data_size = sizeof(struct dm_delay_info);
235 return 0;
236
237bad:
238 delay_dtr(ti);
239 return ret;
240}
241
242static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
243{
244 struct dm_delay_info *delayed;
245 unsigned long expires = 0;
246
247 if (!c->delay || !atomic_read(&dc->may_delay))
248 return DM_MAPIO_REMAPPED;
249
250 delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
251
252 delayed->context = dc;
253 delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
254
255 mutex_lock(&delayed_bios_lock);
256 c->ops++;
257 list_add_tail(&delayed->list, &dc->delayed_bios);
258 mutex_unlock(&delayed_bios_lock);
259
260 queue_timeout(dc, expires);
261
262 return DM_MAPIO_SUBMITTED;
263}
264
265static void delay_presuspend(struct dm_target *ti)
266{
267 struct delay_c *dc = ti->private;
268
269 atomic_set(&dc->may_delay, 0);
270 del_timer_sync(&dc->delay_timer);
271 flush_bios(flush_delayed_bios(dc, 1));
272}
273
274static void delay_resume(struct dm_target *ti)
275{
276 struct delay_c *dc = ti->private;
277
278 atomic_set(&dc->may_delay, 1);
279}
280
281static int delay_map(struct dm_target *ti, struct bio *bio)
282{
283 struct delay_c *dc = ti->private;
284 struct delay_class *c;
285 struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
286
287 if (bio_data_dir(bio) == WRITE) {
288 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
289 c = &dc->flush;
290 else
291 c = &dc->write;
292 } else {
293 c = &dc->read;
294 }
295 delayed->class = c;
296 bio_set_dev(bio, c->dev->bdev);
297 if (bio_sectors(bio))
298 bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
299
300 return delay_bio(dc, c, bio);
301}
302
303#define DMEMIT_DELAY_CLASS(c) \
304 DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
305
306static void delay_status(struct dm_target *ti, status_type_t type,
307 unsigned status_flags, char *result, unsigned maxlen)
308{
309 struct delay_c *dc = ti->private;
310 int sz = 0;
311
312 switch (type) {
313 case STATUSTYPE_INFO:
314 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops);
315 break;
316
317 case STATUSTYPE_TABLE:
318 DMEMIT_DELAY_CLASS(&dc->read);
319 if (dc->argc >= 6) {
320 DMEMIT(" ");
321 DMEMIT_DELAY_CLASS(&dc->write);
322 }
323 if (dc->argc >= 9) {
324 DMEMIT(" ");
325 DMEMIT_DELAY_CLASS(&dc->flush);
326 }
327 break;
328 }
329}
330
331static int delay_iterate_devices(struct dm_target *ti,
332 iterate_devices_callout_fn fn, void *data)
333{
334 struct delay_c *dc = ti->private;
335 int ret = 0;
336
337 ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data);
338 if (ret)
339 goto out;
340 ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data);
341 if (ret)
342 goto out;
343 ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data);
344 if (ret)
345 goto out;
346
347out:
348 return ret;
349}
350
351static struct target_type delay_target = {
352 .name = "delay",
353 .version = {1, 2, 1},
354 .features = DM_TARGET_PASSES_INTEGRITY,
355 .module = THIS_MODULE,
356 .ctr = delay_ctr,
357 .dtr = delay_dtr,
358 .map = delay_map,
359 .presuspend = delay_presuspend,
360 .resume = delay_resume,
361 .status = delay_status,
362 .iterate_devices = delay_iterate_devices,
363};
364
365static int __init dm_delay_init(void)
366{
367 int r;
368
369 r = dm_register_target(&delay_target);
370 if (r < 0) {
371 DMERR("register failed %d", r);
372 goto bad_register;
373 }
374
375 return 0;
376
377bad_register:
378 return r;
379}
380
381static void __exit dm_delay_exit(void)
382{
383 dm_unregister_target(&delay_target);
384}
385
386
387module_init(dm_delay_init);
388module_exit(dm_delay_exit);
389
390MODULE_DESCRIPTION(DM_NAME " delay target");
391MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
392MODULE_LICENSE("GPL");
393