linux/block/blk-lib.c
<<
>>
Prefs
   1/*
   2 * Functions related to generic helpers functions
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <linux/scatterlist.h>
   9
  10#include "blk.h"
  11
  12static void blkdev_discard_end_io(struct bio *bio, int err)
  13{
  14        if (err) {
  15                if (err == -EOPNOTSUPP)
  16                        set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
  17                clear_bit(BIO_UPTODATE, &bio->bi_flags);
  18        }
  19
  20        if (bio->bi_private)
  21                complete(bio->bi_private);
  22
  23        bio_put(bio);
  24}
  25
  26/**
  27 * blkdev_issue_discard - queue a discard
  28 * @bdev:       blockdev to issue discard for
  29 * @sector:     start sector
  30 * @nr_sects:   number of sectors to discard
  31 * @gfp_mask:   memory allocation flags (for bio_alloc)
  32 * @flags:      BLKDEV_IFL_* flags to control behaviour
  33 *
  34 * Description:
  35 *    Issue a discard request for the sectors in question.
  36 */
  37int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  38                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  39{
  40        DECLARE_COMPLETION_ONSTACK(wait);
  41        struct request_queue *q = bdev_get_queue(bdev);
  42        int type = REQ_WRITE | REQ_DISCARD;
  43        unsigned int max_discard_sectors;
  44        struct bio *bio;
  45        int ret = 0;
  46
  47        if (!q)
  48                return -ENXIO;
  49
  50        if (!blk_queue_discard(q))
  51                return -EOPNOTSUPP;
  52
  53        /*
  54         * Ensure that max_discard_sectors is of the proper
  55         * granularity
  56         */
  57        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  58        if (q->limits.discard_granularity) {
  59                unsigned int disc_sects = q->limits.discard_granularity >> 9;
  60
  61                max_discard_sectors &= ~(disc_sects - 1);
  62        }
  63
  64        if (flags & BLKDEV_DISCARD_SECURE) {
  65                if (!blk_queue_secdiscard(q))
  66                        return -EOPNOTSUPP;
  67                type |= REQ_SECURE;
  68        }
  69
  70        while (nr_sects && !ret) {
  71                bio = bio_alloc(gfp_mask, 1);
  72                if (!bio) {
  73                        ret = -ENOMEM;
  74                        break;
  75                }
  76
  77                bio->bi_sector = sector;
  78                bio->bi_end_io = blkdev_discard_end_io;
  79                bio->bi_bdev = bdev;
  80                bio->bi_private = &wait;
  81
  82                if (nr_sects > max_discard_sectors) {
  83                        bio->bi_size = max_discard_sectors << 9;
  84                        nr_sects -= max_discard_sectors;
  85                        sector += max_discard_sectors;
  86                } else {
  87                        bio->bi_size = nr_sects << 9;
  88                        nr_sects = 0;
  89                }
  90
  91                bio_get(bio);
  92                submit_bio(type, bio);
  93
  94                wait_for_completion(&wait);
  95
  96                if (bio_flagged(bio, BIO_EOPNOTSUPP))
  97                        ret = -EOPNOTSUPP;
  98                else if (!bio_flagged(bio, BIO_UPTODATE))
  99                        ret = -EIO;
 100                bio_put(bio);
 101        }
 102
 103        return ret;
 104}
 105EXPORT_SYMBOL(blkdev_issue_discard);
 106
 107struct bio_batch
 108{
 109        atomic_t                done;
 110        unsigned long           flags;
 111        struct completion       *wait;
 112};
 113
 114static void bio_batch_end_io(struct bio *bio, int err)
 115{
 116        struct bio_batch *bb = bio->bi_private;
 117
 118        if (err) {
 119                if (err == -EOPNOTSUPP)
 120                        set_bit(BIO_EOPNOTSUPP, &bb->flags);
 121                else
 122                        clear_bit(BIO_UPTODATE, &bb->flags);
 123        }
 124        if (bb)
 125                if (atomic_dec_and_test(&bb->done))
 126                        complete(bb->wait);
 127        bio_put(bio);
 128}
 129
 130/**
 131 * blkdev_issue_zeroout - generate number of zero filed write bios
 132 * @bdev:       blockdev to issue
 133 * @sector:     start sector
 134 * @nr_sects:   number of sectors to write
 135 * @gfp_mask:   memory allocation flags (for bio_alloc)
 136 *
 137 * Description:
 138 *  Generate and issue number of bios with zerofiled pages.
 139 *  Send barrier at the beginning and at the end if requested. This guarantie
 140 *  correct request ordering. Empty barrier allow us to avoid post queue flush.
 141 */
 142
 143int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 144                        sector_t nr_sects, gfp_t gfp_mask)
 145{
 146        int ret;
 147        struct bio *bio;
 148        struct bio_batch bb;
 149        unsigned int sz;
 150        DECLARE_COMPLETION_ONSTACK(wait);
 151
 152        atomic_set(&bb.done, 1);
 153        bb.flags = 1 << BIO_UPTODATE;
 154        bb.wait = &wait;
 155
 156submit:
 157        ret = 0;
 158        while (nr_sects != 0) {
 159                bio = bio_alloc(gfp_mask,
 160                                min(nr_sects, (sector_t)BIO_MAX_PAGES));
 161                if (!bio) {
 162                        ret = -ENOMEM;
 163                        break;
 164                }
 165
 166                bio->bi_sector = sector;
 167                bio->bi_bdev   = bdev;
 168                bio->bi_end_io = bio_batch_end_io;
 169                bio->bi_private = &bb;
 170
 171                while (nr_sects != 0) {
 172                        sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
 173                        if (sz == 0)
 174                                /* bio has maximum size possible */
 175                                break;
 176                        ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
 177                        nr_sects -= ret >> 9;
 178                        sector += ret >> 9;
 179                        if (ret < (sz << 9))
 180                                break;
 181                }
 182                ret = 0;
 183                atomic_inc(&bb.done);
 184                submit_bio(WRITE, bio);
 185        }
 186
 187        /* Wait for bios in-flight */
 188        if (!atomic_dec_and_test(&bb.done))
 189                wait_for_completion(&wait);
 190
 191        if (!test_bit(BIO_UPTODATE, &bb.flags))
 192                /* One of bios in the batch was completed with error.*/
 193                ret = -EIO;
 194
 195        if (ret)
 196                goto out;
 197
 198        if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
 199                ret = -EOPNOTSUPP;
 200                goto out;
 201        }
 202        if (nr_sects != 0)
 203                goto submit;
 204out:
 205        return ret;
 206}
 207EXPORT_SYMBOL(blkdev_issue_zeroout);
 208