linux/drivers/md/linear.c
<<
>>
Prefs
   1/*
   2   linear.c : Multiple Devices driver for Linux
   3              Copyright (C) 1994-96 Marc ZYNGIER
   4              <zyngier@ufr-info-p7.ibp.fr> or
   5              <maz@gloups.fdn.fr>
   6
   7   Linear mode management functions.
   8
   9   This program is free software; you can redistribute it and/or modify
  10   it under the terms of the GNU General Public License as published by
  11   the Free Software Foundation; either version 2, or (at your option)
  12   any later version.
  13   
  14   You should have received a copy of the GNU General Public License
  15   (for example /usr/src/linux/COPYING); if not, write to the Free
  16   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
  17*/
  18
  19#include <linux/blkdev.h>
  20#include <linux/raid/md_u.h>
  21#include <linux/seq_file.h>
  22#include <linux/module.h>
  23#include <linux/slab.h>
  24#include "md.h"
  25#include "linear.h"
  26
  27/*
  28 * find which device holds a particular offset 
  29 */
  30static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
  31{
  32        int lo, mid, hi;
  33        struct linear_conf *conf;
  34
  35        lo = 0;
  36        hi = mddev->raid_disks - 1;
  37        conf = rcu_dereference(mddev->private);
  38
  39        /*
  40         * Binary Search
  41         */
  42
  43        while (hi > lo) {
  44
  45                mid = (hi + lo) / 2;
  46                if (sector < conf->disks[mid].end_sector)
  47                        hi = mid;
  48                else
  49                        lo = mid + 1;
  50        }
  51
  52        return conf->disks + lo;
  53}
  54
  55/**
  56 *      linear_mergeable_bvec -- tell bio layer if two requests can be merged
  57 *      @q: request queue
  58 *      @bvm: properties of new bio
  59 *      @biovec: the request that could be merged to it.
  60 *
  61 *      Return amount of bytes we can take at this offset
  62 */
  63static int linear_mergeable_bvec(struct request_queue *q,
  64                                 struct bvec_merge_data *bvm,
  65                                 struct bio_vec *biovec)
  66{
  67        struct mddev *mddev = q->queuedata;
  68        struct dev_info *dev0;
  69        unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
  70        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  71        int maxbytes = biovec->bv_len;
  72        struct request_queue *subq;
  73
  74        rcu_read_lock();
  75        dev0 = which_dev(mddev, sector);
  76        maxsectors = dev0->end_sector - sector;
  77        subq = bdev_get_queue(dev0->rdev->bdev);
  78        if (subq->merge_bvec_fn) {
  79                bvm->bi_bdev = dev0->rdev->bdev;
  80                bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors;
  81                maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
  82                                                             biovec));
  83        }
  84        rcu_read_unlock();
  85
  86        if (maxsectors < bio_sectors)
  87                maxsectors = 0;
  88        else
  89                maxsectors -= bio_sectors;
  90
  91        if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
  92                return maxbytes;
  93
  94        if (maxsectors > (maxbytes >> 9))
  95                return maxbytes;
  96        else
  97                return maxsectors << 9;
  98}
  99
 100static int linear_congested(void *data, int bits)
 101{
 102        struct mddev *mddev = data;
 103        struct linear_conf *conf;
 104        int i, ret = 0;
 105
 106        if (mddev_congested(mddev, bits))
 107                return 1;
 108
 109        rcu_read_lock();
 110        conf = rcu_dereference(mddev->private);
 111
 112        for (i = 0; i < mddev->raid_disks && !ret ; i++) {
 113                struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
 114                ret |= bdi_congested(&q->backing_dev_info, bits);
 115        }
 116
 117        rcu_read_unlock();
 118        return ret;
 119}
 120
 121static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 122{
 123        struct linear_conf *conf;
 124        sector_t array_sectors;
 125
 126        rcu_read_lock();
 127        conf = rcu_dereference(mddev->private);
 128        WARN_ONCE(sectors || raid_disks,
 129                  "%s does not support generic reshape\n", __func__);
 130        array_sectors = conf->array_sectors;
 131        rcu_read_unlock();
 132
 133        return array_sectors;
 134}
 135
 136static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
 137{
 138        struct linear_conf *conf;
 139        struct md_rdev *rdev;
 140        int i, cnt;
 141        bool discard_supported = false;
 142
 143        conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info),
 144                        GFP_KERNEL);
 145        if (!conf)
 146                return NULL;
 147
 148        cnt = 0;
 149        conf->array_sectors = 0;
 150
 151        rdev_for_each(rdev, mddev) {
 152                int j = rdev->raid_disk;
 153                struct dev_info *disk = conf->disks + j;
 154                sector_t sectors;
 155
 156                if (j < 0 || j >= raid_disks || disk->rdev) {
 157                        printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
 158                               mdname(mddev));
 159                        goto out;
 160                }
 161
 162                disk->rdev = rdev;
 163                if (mddev->chunk_sectors) {
 164                        sectors = rdev->sectors;
 165                        sector_div(sectors, mddev->chunk_sectors);
 166                        rdev->sectors = sectors * mddev->chunk_sectors;
 167                }
 168
 169                disk_stack_limits(mddev->gendisk, rdev->bdev,
 170                                  rdev->data_offset << 9);
 171
 172                conf->array_sectors += rdev->sectors;
 173                cnt++;
 174
 175                if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
 176                        discard_supported = true;
 177        }
 178        if (cnt != raid_disks) {
 179                printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
 180                       mdname(mddev));
 181                goto out;
 182        }
 183
 184        if (!discard_supported)
 185                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
 186        else
 187                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
 188
 189        /*
 190         * Here we calculate the device offsets.
 191         */
 192        conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
 193
 194        for (i = 1; i < raid_disks; i++)
 195                conf->disks[i].end_sector =
 196                        conf->disks[i-1].end_sector +
 197                        conf->disks[i].rdev->sectors;
 198
 199        return conf;
 200
 201out:
 202        kfree(conf);
 203        return NULL;
 204}
 205
 206static int linear_run (struct mddev *mddev)
 207{
 208        struct linear_conf *conf;
 209        int ret;
 210
 211        if (md_check_no_bitmap(mddev))
 212                return -EINVAL;
 213        conf = linear_conf(mddev, mddev->raid_disks);
 214
 215        if (!conf)
 216                return 1;
 217        mddev->private = conf;
 218        md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 219
 220        blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
 221        mddev->queue->backing_dev_info.congested_fn = linear_congested;
 222        mddev->queue->backing_dev_info.congested_data = mddev;
 223
 224        ret =  md_integrity_register(mddev);
 225        if (ret) {
 226                kfree(conf);
 227                mddev->private = NULL;
 228        }
 229        return ret;
 230}
 231
 232static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
 233{
 234        /* Adding a drive to a linear array allows the array to grow.
 235         * It is permitted if the new drive has a matching superblock
 236         * already on it, with raid_disk equal to raid_disks.
 237         * It is achieved by creating a new linear_private_data structure
 238         * and swapping it in in-place of the current one.
 239         * The current one is never freed until the array is stopped.
 240         * This avoids races.
 241         */
 242        struct linear_conf *newconf, *oldconf;
 243
 244        if (rdev->saved_raid_disk != mddev->raid_disks)
 245                return -EINVAL;
 246
 247        rdev->raid_disk = rdev->saved_raid_disk;
 248        rdev->saved_raid_disk = -1;
 249
 250        newconf = linear_conf(mddev,mddev->raid_disks+1);
 251
 252        if (!newconf)
 253                return -ENOMEM;
 254
 255        oldconf = rcu_dereference_protected(mddev->private,
 256                                            lockdep_is_held(
 257                                                    &mddev->reconfig_mutex));
 258        mddev->raid_disks++;
 259        rcu_assign_pointer(mddev->private, newconf);
 260        md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 261        set_capacity(mddev->gendisk, mddev->array_sectors);
 262        revalidate_disk(mddev->gendisk);
 263        kfree_rcu(oldconf, rcu);
 264        return 0;
 265}
 266
 267static int linear_stop (struct mddev *mddev)
 268{
 269        struct linear_conf *conf =
 270                rcu_dereference_protected(mddev->private,
 271                                          lockdep_is_held(
 272                                                  &mddev->reconfig_mutex));
 273
 274        /*
 275         * We do not require rcu protection here since
 276         * we hold reconfig_mutex for both linear_add and
 277         * linear_stop, so they cannot race.
 278         * We should make sure any old 'conf's are properly
 279         * freed though.
 280         */
 281        rcu_barrier();
 282        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
 283        kfree(conf);
 284        mddev->private = NULL;
 285
 286        return 0;
 287}
 288
 289static void linear_make_request(struct mddev *mddev, struct bio *bio)
 290{
 291        struct dev_info *tmp_dev;
 292        sector_t start_sector;
 293
 294        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 295                md_flush_request(mddev, bio);
 296                return;
 297        }
 298
 299        rcu_read_lock();
 300        tmp_dev = which_dev(mddev, bio->bi_sector);
 301        start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
 302
 303
 304        if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
 305                     || (bio->bi_sector < start_sector))) {
 306                char b[BDEVNAME_SIZE];
 307
 308                printk(KERN_ERR
 309                       "md/linear:%s: make_request: Sector %llu out of bounds on "
 310                       "dev %s: %llu sectors, offset %llu\n",
 311                       mdname(mddev),
 312                       (unsigned long long)bio->bi_sector,
 313                       bdevname(tmp_dev->rdev->bdev, b),
 314                       (unsigned long long)tmp_dev->rdev->sectors,
 315                       (unsigned long long)start_sector);
 316                rcu_read_unlock();
 317                bio_io_error(bio);
 318                return;
 319        }
 320        if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
 321                /* This bio crosses a device boundary, so we have to
 322                 * split it.
 323                 */
 324                struct bio_pair *bp;
 325                sector_t end_sector = tmp_dev->end_sector;
 326
 327                rcu_read_unlock();
 328
 329                bp = bio_split(bio, end_sector - bio->bi_sector);
 330
 331                linear_make_request(mddev, &bp->bio1);
 332                linear_make_request(mddev, &bp->bio2);
 333                bio_pair_release(bp);
 334                return;
 335        }
 336                    
 337        bio->bi_bdev = tmp_dev->rdev->bdev;
 338        bio->bi_sector = bio->bi_sector - start_sector
 339                + tmp_dev->rdev->data_offset;
 340        rcu_read_unlock();
 341
 342        if (unlikely((bio->bi_rw & REQ_DISCARD) &&
 343                     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
 344                /* Just ignore it */
 345                bio_endio(bio, 0);
 346                return;
 347        }
 348
 349        generic_make_request(bio);
 350}
 351
 352static void linear_status (struct seq_file *seq, struct mddev *mddev)
 353{
 354
 355        seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
 356}
 357
 358
 359static struct md_personality linear_personality =
 360{
 361        .name           = "linear",
 362        .level          = LEVEL_LINEAR,
 363        .owner          = THIS_MODULE,
 364        .make_request   = linear_make_request,
 365        .run            = linear_run,
 366        .stop           = linear_stop,
 367        .status         = linear_status,
 368        .hot_add_disk   = linear_add,
 369        .size           = linear_size,
 370};
 371
 372static int __init linear_init (void)
 373{
 374        return register_md_personality (&linear_personality);
 375}
 376
 377static void linear_exit (void)
 378{
 379        unregister_md_personality (&linear_personality);
 380}
 381
 382
 383module_init(linear_init);
 384module_exit(linear_exit);
 385MODULE_LICENSE("GPL");
 386MODULE_DESCRIPTION("Linear device concatenation personality for MD");
 387MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
 388MODULE_ALIAS("md-linear");
 389MODULE_ALIAS("md-level--1");
 390