linux/drivers/md/linear.c
<<
>>
Prefs
   1/*
   2   linear.c : Multiple Devices driver for Linux
   3              Copyright (C) 1994-96 Marc ZYNGIER
   4              <zyngier@ufr-info-p7.ibp.fr> or
   5              <maz@gloups.fdn.fr>
   6
   7   Linear mode management functions.
   8
   9   This program is free software; you can redistribute it and/or modify
  10   it under the terms of the GNU General Public License as published by
  11   the Free Software Foundation; either version 2, or (at your option)
  12   any later version.
  13   
  14   You should have received a copy of the GNU General Public License
  15   (for example /usr/src/linux/COPYING); if not, write to the Free
  16   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
  17*/
  18
  19#include <linux/blkdev.h>
  20#include <linux/raid/md_u.h>
  21#include <linux/seq_file.h>
  22#include "md.h"
  23#include "linear.h"
  24
  25/*
  26 * find which device holds a particular offset 
  27 */
  28static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
  29{
  30        int lo, mid, hi;
  31        linear_conf_t *conf;
  32
  33        lo = 0;
  34        hi = mddev->raid_disks - 1;
  35        conf = rcu_dereference(mddev->private);
  36
  37        /*
  38         * Binary Search
  39         */
  40
  41        while (hi > lo) {
  42
  43                mid = (hi + lo) / 2;
  44                if (sector < conf->disks[mid].end_sector)
  45                        hi = mid;
  46                else
  47                        lo = mid + 1;
  48        }
  49
  50        return conf->disks + lo;
  51}
  52
  53/**
  54 *      linear_mergeable_bvec -- tell bio layer if two requests can be merged
  55 *      @q: request queue
  56 *      @bvm: properties of new bio
  57 *      @biovec: the request that could be merged to it.
  58 *
  59 *      Return amount of bytes we can take at this offset
  60 */
  61static int linear_mergeable_bvec(struct request_queue *q,
  62                                 struct bvec_merge_data *bvm,
  63                                 struct bio_vec *biovec)
  64{
  65        mddev_t *mddev = q->queuedata;
  66        dev_info_t *dev0;
  67        unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
  68        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  69
  70        rcu_read_lock();
  71        dev0 = which_dev(mddev, sector);
  72        maxsectors = dev0->end_sector - sector;
  73        rcu_read_unlock();
  74
  75        if (maxsectors < bio_sectors)
  76                maxsectors = 0;
  77        else
  78                maxsectors -= bio_sectors;
  79
  80        if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
  81                return biovec->bv_len;
  82        /* The bytes available at this offset could be really big,
  83         * so we cap at 2^31 to avoid overflow */
  84        if (maxsectors > (1 << (31-9)))
  85                return 1<<31;
  86        return maxsectors << 9;
  87}
  88
  89static void linear_unplug(struct request_queue *q)
  90{
  91        mddev_t *mddev = q->queuedata;
  92        linear_conf_t *conf;
  93        int i;
  94
  95        rcu_read_lock();
  96        conf = rcu_dereference(mddev->private);
  97
  98        for (i=0; i < mddev->raid_disks; i++) {
  99                struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
 100                blk_unplug(r_queue);
 101        }
 102        rcu_read_unlock();
 103}
 104
 105static int linear_congested(void *data, int bits)
 106{
 107        mddev_t *mddev = data;
 108        linear_conf_t *conf;
 109        int i, ret = 0;
 110
 111        if (mddev_congested(mddev, bits))
 112                return 1;
 113
 114        rcu_read_lock();
 115        conf = rcu_dereference(mddev->private);
 116
 117        for (i = 0; i < mddev->raid_disks && !ret ; i++) {
 118                struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
 119                ret |= bdi_congested(&q->backing_dev_info, bits);
 120        }
 121
 122        rcu_read_unlock();
 123        return ret;
 124}
 125
 126static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 127{
 128        linear_conf_t *conf;
 129        sector_t array_sectors;
 130
 131        rcu_read_lock();
 132        conf = rcu_dereference(mddev->private);
 133        WARN_ONCE(sectors || raid_disks,
 134                  "%s does not support generic reshape\n", __func__);
 135        array_sectors = conf->array_sectors;
 136        rcu_read_unlock();
 137
 138        return array_sectors;
 139}
 140
 141static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
 142{
 143        linear_conf_t *conf;
 144        mdk_rdev_t *rdev;
 145        int i, cnt;
 146
 147        conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
 148                        GFP_KERNEL);
 149        if (!conf)
 150                return NULL;
 151
 152        cnt = 0;
 153        conf->array_sectors = 0;
 154
 155        list_for_each_entry(rdev, &mddev->disks, same_set) {
 156                int j = rdev->raid_disk;
 157                dev_info_t *disk = conf->disks + j;
 158                sector_t sectors;
 159
 160                if (j < 0 || j >= raid_disks || disk->rdev) {
 161                        printk("linear: disk numbering problem. Aborting!\n");
 162                        goto out;
 163                }
 164
 165                disk->rdev = rdev;
 166                if (mddev->chunk_sectors) {
 167                        sectors = rdev->sectors;
 168                        sector_div(sectors, mddev->chunk_sectors);
 169                        rdev->sectors = sectors * mddev->chunk_sectors;
 170                }
 171
 172                disk_stack_limits(mddev->gendisk, rdev->bdev,
 173                                  rdev->data_offset << 9);
 174                /* as we don't honour merge_bvec_fn, we must never risk
 175                 * violating it, so limit ->max_sector to one PAGE, as
 176                 * a one page request is never in violation.
 177                 */
 178                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 179                    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
 180                        blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 181
 182                conf->array_sectors += rdev->sectors;
 183                cnt++;
 184
 185        }
 186        if (cnt != raid_disks) {
 187                printk("linear: not enough drives present. Aborting!\n");
 188                goto out;
 189        }
 190
 191        /*
 192         * Here we calculate the device offsets.
 193         */
 194        conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
 195
 196        for (i = 1; i < raid_disks; i++)
 197                conf->disks[i].end_sector =
 198                        conf->disks[i-1].end_sector +
 199                        conf->disks[i].rdev->sectors;
 200
 201        return conf;
 202
 203out:
 204        kfree(conf);
 205        return NULL;
 206}
 207
 208static int linear_run (mddev_t *mddev)
 209{
 210        linear_conf_t *conf;
 211
 212        if (md_check_no_bitmap(mddev))
 213                return -EINVAL;
 214        mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 215        conf = linear_conf(mddev, mddev->raid_disks);
 216
 217        if (!conf)
 218                return 1;
 219        mddev->private = conf;
 220        md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 221
 222        blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
 223        mddev->queue->unplug_fn = linear_unplug;
 224        mddev->queue->backing_dev_info.congested_fn = linear_congested;
 225        mddev->queue->backing_dev_info.congested_data = mddev;
 226        md_integrity_register(mddev);
 227        return 0;
 228}
 229
 230static void free_conf(struct rcu_head *head)
 231{
 232        linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
 233        kfree(conf);
 234}
 235
 236static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
 237{
 238        /* Adding a drive to a linear array allows the array to grow.
 239         * It is permitted if the new drive has a matching superblock
 240         * already on it, with raid_disk equal to raid_disks.
 241         * It is achieved by creating a new linear_private_data structure
 242         * and swapping it in in-place of the current one.
 243         * The current one is never freed until the array is stopped.
 244         * This avoids races.
 245         */
 246        linear_conf_t *newconf, *oldconf;
 247
 248        if (rdev->saved_raid_disk != mddev->raid_disks)
 249                return -EINVAL;
 250
 251        rdev->raid_disk = rdev->saved_raid_disk;
 252
 253        newconf = linear_conf(mddev,mddev->raid_disks+1);
 254
 255        if (!newconf)
 256                return -ENOMEM;
 257
 258        oldconf = rcu_dereference(mddev->private);
 259        mddev->raid_disks++;
 260        rcu_assign_pointer(mddev->private, newconf);
 261        md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 262        set_capacity(mddev->gendisk, mddev->array_sectors);
 263        revalidate_disk(mddev->gendisk);
 264        call_rcu(&oldconf->rcu, free_conf);
 265        return 0;
 266}
 267
 268static int linear_stop (mddev_t *mddev)
 269{
 270        linear_conf_t *conf = mddev->private;
 271
 272        /*
 273         * We do not require rcu protection here since
 274         * we hold reconfig_mutex for both linear_add and
 275         * linear_stop, so they cannot race.
 276         * We should make sure any old 'conf's are properly
 277         * freed though.
 278         */
 279        rcu_barrier();
 280        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
 281        kfree(conf);
 282
 283        return 0;
 284}
 285
 286static int linear_make_request (struct request_queue *q, struct bio *bio)
 287{
 288        const int rw = bio_data_dir(bio);
 289        mddev_t *mddev = q->queuedata;
 290        dev_info_t *tmp_dev;
 291        sector_t start_sector;
 292        int cpu;
 293
 294        if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
 295                bio_endio(bio, -EOPNOTSUPP);
 296                return 0;
 297        }
 298
 299        cpu = part_stat_lock();
 300        part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
 301        part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
 302                      bio_sectors(bio));
 303        part_stat_unlock();
 304
 305        rcu_read_lock();
 306        tmp_dev = which_dev(mddev, bio->bi_sector);
 307        start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
 308
 309
 310        if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
 311                     || (bio->bi_sector < start_sector))) {
 312                char b[BDEVNAME_SIZE];
 313
 314                printk("linear_make_request: Sector %llu out of bounds on "
 315                        "dev %s: %llu sectors, offset %llu\n",
 316                        (unsigned long long)bio->bi_sector,
 317                        bdevname(tmp_dev->rdev->bdev, b),
 318                        (unsigned long long)tmp_dev->rdev->sectors,
 319                        (unsigned long long)start_sector);
 320                rcu_read_unlock();
 321                bio_io_error(bio);
 322                return 0;
 323        }
 324        if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
 325                     tmp_dev->end_sector)) {
 326                /* This bio crosses a device boundary, so we have to
 327                 * split it.
 328                 */
 329                struct bio_pair *bp;
 330                sector_t end_sector = tmp_dev->end_sector;
 331
 332                rcu_read_unlock();
 333
 334                bp = bio_split(bio, end_sector - bio->bi_sector);
 335
 336                if (linear_make_request(q, &bp->bio1))
 337                        generic_make_request(&bp->bio1);
 338                if (linear_make_request(q, &bp->bio2))
 339                        generic_make_request(&bp->bio2);
 340                bio_pair_release(bp);
 341                return 0;
 342        }
 343                    
 344        bio->bi_bdev = tmp_dev->rdev->bdev;
 345        bio->bi_sector = bio->bi_sector - start_sector
 346                + tmp_dev->rdev->data_offset;
 347        rcu_read_unlock();
 348
 349        return 1;
 350}
 351
 352static void linear_status (struct seq_file *seq, mddev_t *mddev)
 353{
 354
 355        seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
 356}
 357
 358
 359static struct mdk_personality linear_personality =
 360{
 361        .name           = "linear",
 362        .level          = LEVEL_LINEAR,
 363        .owner          = THIS_MODULE,
 364        .make_request   = linear_make_request,
 365        .run            = linear_run,
 366        .stop           = linear_stop,
 367        .status         = linear_status,
 368        .hot_add_disk   = linear_add,
 369        .size           = linear_size,
 370};
 371
 372static int __init linear_init (void)
 373{
 374        return register_md_personality (&linear_personality);
 375}
 376
 377static void linear_exit (void)
 378{
 379        unregister_md_personality (&linear_personality);
 380}
 381
 382
 383module_init(linear_init);
 384module_exit(linear_exit);
 385MODULE_LICENSE("GPL");
 386MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
 387MODULE_ALIAS("md-linear");
 388MODULE_ALIAS("md-level--1");
 389