1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20
21#include <linux/raid/md.h>
22#include <linux/slab.h>
23#include <linux/raid/linear.h>
24
25#define MAJOR_NR MD_MAJOR
26#define MD_DRIVER
27#define MD_PERSONALITY
28
29
30
31
32static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
33{
34 dev_info_t *hash;
35 linear_conf_t *conf = mddev_to_conf(mddev);
36 sector_t block = sector >> 1;
37
38
39
40
41 block >>= conf->preshift;
42 (void)sector_div(block, conf->hash_spacing);
43 hash = conf->hash_table[block];
44
45 while ((sector>>1) >= (hash->size + hash->offset))
46 hash++;
47 return hash;
48}
49
50
51
52
53
54
55
56
57
58static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
59{
60 mddev_t *mddev = q->queuedata;
61 dev_info_t *dev0;
62 unsigned long maxsectors, bio_sectors = bio->bi_size >> 9;
63 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
64
65 dev0 = which_dev(mddev, sector);
66 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1));
67
68 if (maxsectors < bio_sectors)
69 maxsectors = 0;
70 else
71 maxsectors -= bio_sectors;
72
73 if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
74 return biovec->bv_len;
75
76
77 if (maxsectors > (1 << (31-9)))
78 return 1<<31;
79 return maxsectors << 9;
80}
81
82static void linear_unplug(struct request_queue *q)
83{
84 mddev_t *mddev = q->queuedata;
85 linear_conf_t *conf = mddev_to_conf(mddev);
86 int i;
87
88 for (i=0; i < mddev->raid_disks; i++) {
89 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
90 blk_unplug(r_queue);
91 }
92}
93
94static int linear_congested(void *data, int bits)
95{
96 mddev_t *mddev = data;
97 linear_conf_t *conf = mddev_to_conf(mddev);
98 int i, ret = 0;
99
100 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
101 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
102 ret |= bdi_congested(&q->backing_dev_info, bits);
103 }
104 return ret;
105}
106
107static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
108{
109 linear_conf_t *conf;
110 dev_info_t **table;
111 mdk_rdev_t *rdev;
112 int i, nb_zone, cnt;
113 sector_t min_spacing;
114 sector_t curr_offset;
115 struct list_head *tmp;
116
117 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
118 GFP_KERNEL);
119 if (!conf)
120 return NULL;
121
122 cnt = 0;
123 conf->array_size = 0;
124
125 ITERATE_RDEV(mddev,rdev,tmp) {
126 int j = rdev->raid_disk;
127 dev_info_t *disk = conf->disks + j;
128
129 if (j < 0 || j > raid_disks || disk->rdev) {
130 printk("linear: disk numbering problem. Aborting!\n");
131 goto out;
132 }
133
134 disk->rdev = rdev;
135
136 blk_queue_stack_limits(mddev->queue,
137 rdev->bdev->bd_disk->queue);
138
139
140
141
142 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
143 mddev->queue->max_sectors > (PAGE_SIZE>>9))
144 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
145
146 disk->size = rdev->size;
147 conf->array_size += rdev->size;
148
149 cnt++;
150 }
151 if (cnt != raid_disks) {
152 printk("linear: not enough drives present. Aborting!\n");
153 goto out;
154 }
155
156 min_spacing = conf->array_size;
157 sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
158
159
160
161
162
163
164
165 conf->hash_spacing = conf->array_size;
166 for (i=0; i < cnt-1 ; i++) {
167 sector_t sz = 0;
168 int j;
169 for (j = i; j < cnt - 1 && sz < min_spacing; j++)
170 sz += conf->disks[j].size;
171 if (sz >= min_spacing && sz < conf->hash_spacing)
172 conf->hash_spacing = sz;
173 }
174
175
176
177
178 conf->preshift = 0;
179 if (sizeof(sector_t) > sizeof(u32)) {
180 sector_t space = conf->hash_spacing;
181 while (space > (sector_t)(~(u32)0)) {
182 space >>= 1;
183 conf->preshift++;
184 }
185 }
186
187
188
189
190 {
191 sector_t sz;
192 unsigned round;
193 unsigned long base;
194
195 sz = conf->array_size >> conf->preshift;
196 sz += 1;
197 base = conf->hash_spacing >> conf->preshift;
198 round = sector_div(sz, base);
199 nb_zone = sz + (round ? 1 : 0);
200 }
201 BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *));
202
203 conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone,
204 GFP_KERNEL);
205 if (!conf->hash_table)
206 goto out;
207
208
209
210
211
212 conf->disks[0].offset = 0;
213 for (i = 1; i < raid_disks; i++)
214 conf->disks[i].offset =
215 conf->disks[i-1].offset +
216 conf->disks[i-1].size;
217
218 table = conf->hash_table;
219 curr_offset = 0;
220 i = 0;
221 for (curr_offset = 0;
222 curr_offset < conf->array_size;
223 curr_offset += conf->hash_spacing) {
224
225 while (i < raid_disks-1 &&
226 curr_offset >= conf->disks[i+1].offset)
227 i++;
228
229 *table ++ = conf->disks + i;
230 }
231
232 if (conf->preshift) {
233 conf->hash_spacing >>= conf->preshift;
234
235
236
237 conf->hash_spacing++;
238 }
239
240 BUG_ON(table - conf->hash_table > nb_zone);
241
242 return conf;
243
244out:
245 kfree(conf);
246 return NULL;
247}
248
249static int linear_run (mddev_t *mddev)
250{
251 linear_conf_t *conf;
252
253 conf = linear_conf(mddev, mddev->raid_disks);
254
255 if (!conf)
256 return 1;
257 mddev->private = conf;
258 mddev->array_size = conf->array_size;
259
260 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
261 mddev->queue->unplug_fn = linear_unplug;
262 mddev->queue->backing_dev_info.congested_fn = linear_congested;
263 mddev->queue->backing_dev_info.congested_data = mddev;
264 return 0;
265}
266
267static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
268{
269
270
271
272
273
274
275
276
277 linear_conf_t *newconf;
278
279 if (rdev->saved_raid_disk != mddev->raid_disks)
280 return -EINVAL;
281
282 rdev->raid_disk = rdev->saved_raid_disk;
283
284 newconf = linear_conf(mddev,mddev->raid_disks+1);
285
286 if (!newconf)
287 return -ENOMEM;
288
289 newconf->prev = mddev_to_conf(mddev);
290 mddev->private = newconf;
291 mddev->raid_disks++;
292 mddev->array_size = newconf->array_size;
293 set_capacity(mddev->gendisk, mddev->array_size << 1);
294 return 0;
295}
296
297static int linear_stop (mddev_t *mddev)
298{
299 linear_conf_t *conf = mddev_to_conf(mddev);
300
301 blk_sync_queue(mddev->queue);
302 do {
303 linear_conf_t *t = conf->prev;
304 kfree(conf->hash_table);
305 kfree(conf);
306 conf = t;
307 } while (conf);
308
309 return 0;
310}
311
312static int linear_make_request (struct request_queue *q, struct bio *bio)
313{
314 const int rw = bio_data_dir(bio);
315 mddev_t *mddev = q->queuedata;
316 dev_info_t *tmp_dev;
317 sector_t block;
318
319 if (unlikely(bio_barrier(bio))) {
320 bio_endio(bio, -EOPNOTSUPP);
321 return 0;
322 }
323
324 disk_stat_inc(mddev->gendisk, ios[rw]);
325 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
326
327 tmp_dev = which_dev(mddev, bio->bi_sector);
328 block = bio->bi_sector >> 1;
329
330 if (unlikely(block >= (tmp_dev->size + tmp_dev->offset)
331 || block < tmp_dev->offset)) {
332 char b[BDEVNAME_SIZE];
333
334 printk("linear_make_request: Block %llu out of bounds on "
335 "dev %s size %llu offset %llu\n",
336 (unsigned long long)block,
337 bdevname(tmp_dev->rdev->bdev, b),
338 (unsigned long long)tmp_dev->size,
339 (unsigned long long)tmp_dev->offset);
340 bio_io_error(bio);
341 return 0;
342 }
343 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
344 (tmp_dev->offset + tmp_dev->size)<<1)) {
345
346
347
348 struct bio_pair *bp;
349 bp = bio_split(bio, bio_split_pool,
350 ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector);
351 if (linear_make_request(q, &bp->bio1))
352 generic_make_request(&bp->bio1);
353 if (linear_make_request(q, &bp->bio2))
354 generic_make_request(&bp->bio2);
355 bio_pair_release(bp);
356 return 0;
357 }
358
359 bio->bi_bdev = tmp_dev->rdev->bdev;
360 bio->bi_sector = bio->bi_sector - (tmp_dev->offset << 1) + tmp_dev->rdev->data_offset;
361
362 return 1;
363}
364
365static void linear_status (struct seq_file *seq, mddev_t *mddev)
366{
367
368#undef MD_DEBUG
369#ifdef MD_DEBUG
370 int j;
371 linear_conf_t *conf = mddev_to_conf(mddev);
372 sector_t s = 0;
373
374 seq_printf(seq, " ");
375 for (j = 0; j < mddev->raid_disks; j++)
376 {
377 char b[BDEVNAME_SIZE];
378 s += conf->smallest_size;
379 seq_printf(seq, "[%s",
380 bdevname(conf->hash_table[j][0].rdev->bdev,b));
381
382 while (s > conf->hash_table[j][0].offset +
383 conf->hash_table[j][0].size)
384 seq_printf(seq, "/%s] ",
385 bdevname(conf->hash_table[j][1].rdev->bdev,b));
386 else
387 seq_printf(seq, "] ");
388 }
389 seq_printf(seq, "\n");
390#endif
391 seq_printf(seq, " %dk rounding", mddev->chunk_size/1024);
392}
393
394
395static struct mdk_personality linear_personality =
396{
397 .name = "linear",
398 .level = LEVEL_LINEAR,
399 .owner = THIS_MODULE,
400 .make_request = linear_make_request,
401 .run = linear_run,
402 .stop = linear_stop,
403 .status = linear_status,
404 .hot_add_disk = linear_add,
405};
406
407static int __init linear_init (void)
408{
409 return register_md_personality (&linear_personality);
410}
411
412static void linear_exit (void)
413{
414 unregister_md_personality (&linear_personality);
415}
416
417
418module_init(linear_init);
419module_exit(linear_exit);
420MODULE_LICENSE("GPL");
421MODULE_ALIAS("md-personality-1");
422MODULE_ALIAS("md-linear");
423MODULE_ALIAS("md-level--1");
424