linux/fs/logfs/dev_bdev.c
<<
>>
Prefs
   1/*
   2 * fs/logfs/dev_bdev.c  - Device access methods for block devices
   3 *
   4 * As should be obvious for Linux kernel code, license is GPLv2
   5 *
   6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
   7 */
   8#include "logfs.h"
   9#include <linux/bio.h>
  10#include <linux/blkdev.h>
  11#include <linux/buffer_head.h>
  12#include <linux/gfp.h>
  13#include <linux/prefetch.h>
  14
  15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
  16
  17static void request_complete(struct bio *bio, int err)
  18{
  19        complete((struct completion *)bio->bi_private);
  20}
  21
  22static int sync_request(struct page *page, struct block_device *bdev, int rw)
  23{
  24        struct bio bio;
  25        struct bio_vec bio_vec;
  26        struct completion complete;
  27
  28        bio_init(&bio);
  29        bio.bi_max_vecs = 1;
  30        bio.bi_io_vec = &bio_vec;
  31        bio_vec.bv_page = page;
  32        bio_vec.bv_len = PAGE_SIZE;
  33        bio_vec.bv_offset = 0;
  34        bio.bi_vcnt = 1;
  35        bio.bi_size = PAGE_SIZE;
  36        bio.bi_bdev = bdev;
  37        bio.bi_sector = page->index * (PAGE_SIZE >> 9);
  38        init_completion(&complete);
  39        bio.bi_private = &complete;
  40        bio.bi_end_io = request_complete;
  41
  42        submit_bio(rw, &bio);
  43        wait_for_completion(&complete);
  44        return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
  45}
  46
  47static int bdev_readpage(void *_sb, struct page *page)
  48{
  49        struct super_block *sb = _sb;
  50        struct block_device *bdev = logfs_super(sb)->s_bdev;
  51        int err;
  52
  53        err = sync_request(page, bdev, READ);
  54        if (err) {
  55                ClearPageUptodate(page);
  56                SetPageError(page);
  57        } else {
  58                SetPageUptodate(page);
  59                ClearPageError(page);
  60        }
  61        unlock_page(page);
  62        return err;
  63}
  64
  65static DECLARE_WAIT_QUEUE_HEAD(wq);
  66
  67static void writeseg_end_io(struct bio *bio, int err)
  68{
  69        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  70        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  71        struct super_block *sb = bio->bi_private;
  72        struct logfs_super *super = logfs_super(sb);
  73        struct page *page;
  74
  75        BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
  76        BUG_ON(err);
  77        BUG_ON(bio->bi_vcnt == 0);
  78        do {
  79                page = bvec->bv_page;
  80                if (--bvec >= bio->bi_io_vec)
  81                        prefetchw(&bvec->bv_page->flags);
  82
  83                end_page_writeback(page);
  84                page_cache_release(page);
  85        } while (bvec >= bio->bi_io_vec);
  86        bio_put(bio);
  87        if (atomic_dec_and_test(&super->s_pending_writes))
  88                wake_up(&wq);
  89}
  90
  91static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
  92                size_t nr_pages)
  93{
  94        struct logfs_super *super = logfs_super(sb);
  95        struct address_space *mapping = super->s_mapping_inode->i_mapping;
  96        struct bio *bio;
  97        struct page *page;
  98        unsigned int max_pages;
  99        int i;
 100
 101        max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
 102
 103        bio = bio_alloc(GFP_NOFS, max_pages);
 104        BUG_ON(!bio);
 105
 106        for (i = 0; i < nr_pages; i++) {
 107                if (i >= max_pages) {
 108                        /* Block layer cannot split bios :( */
 109                        bio->bi_vcnt = i;
 110                        bio->bi_size = i * PAGE_SIZE;
 111                        bio->bi_bdev = super->s_bdev;
 112                        bio->bi_sector = ofs >> 9;
 113                        bio->bi_private = sb;
 114                        bio->bi_end_io = writeseg_end_io;
 115                        atomic_inc(&super->s_pending_writes);
 116                        submit_bio(WRITE, bio);
 117
 118                        ofs += i * PAGE_SIZE;
 119                        index += i;
 120                        nr_pages -= i;
 121                        i = 0;
 122
 123                        bio = bio_alloc(GFP_NOFS, max_pages);
 124                        BUG_ON(!bio);
 125                }
 126                page = find_lock_page(mapping, index + i);
 127                BUG_ON(!page);
 128                bio->bi_io_vec[i].bv_page = page;
 129                bio->bi_io_vec[i].bv_len = PAGE_SIZE;
 130                bio->bi_io_vec[i].bv_offset = 0;
 131
 132                BUG_ON(PageWriteback(page));
 133                set_page_writeback(page);
 134                unlock_page(page);
 135        }
 136        bio->bi_vcnt = nr_pages;
 137        bio->bi_size = nr_pages * PAGE_SIZE;
 138        bio->bi_bdev = super->s_bdev;
 139        bio->bi_sector = ofs >> 9;
 140        bio->bi_private = sb;
 141        bio->bi_end_io = writeseg_end_io;
 142        atomic_inc(&super->s_pending_writes);
 143        submit_bio(WRITE, bio);
 144        return 0;
 145}
 146
 147static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
 148{
 149        struct logfs_super *super = logfs_super(sb);
 150        int head;
 151
 152        BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
 153
 154        if (len == 0) {
 155                /* This can happen when the object fit perfectly into a
 156                 * segment, the segment gets written per sync and subsequently
 157                 * closed.
 158                 */
 159                return;
 160        }
 161        head = ofs & (PAGE_SIZE - 1);
 162        if (head) {
 163                ofs -= head;
 164                len += head;
 165        }
 166        len = PAGE_ALIGN(len);
 167        __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
 168}
 169
 170
 171static void erase_end_io(struct bio *bio, int err) 
 172{ 
 173        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 
 174        struct super_block *sb = bio->bi_private; 
 175        struct logfs_super *super = logfs_super(sb); 
 176
 177        BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 
 178        BUG_ON(err); 
 179        BUG_ON(bio->bi_vcnt == 0); 
 180        bio_put(bio); 
 181        if (atomic_dec_and_test(&super->s_pending_writes))
 182                wake_up(&wq); 
 183} 
 184
 185static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
 186                size_t nr_pages)
 187{
 188        struct logfs_super *super = logfs_super(sb);
 189        struct bio *bio;
 190        unsigned int max_pages;
 191        int i;
 192
 193        max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
 194
 195        bio = bio_alloc(GFP_NOFS, max_pages);
 196        BUG_ON(!bio);
 197
 198        for (i = 0; i < nr_pages; i++) {
 199                if (i >= max_pages) {
 200                        /* Block layer cannot split bios :( */
 201                        bio->bi_vcnt = i;
 202                        bio->bi_size = i * PAGE_SIZE;
 203                        bio->bi_bdev = super->s_bdev;
 204                        bio->bi_sector = ofs >> 9;
 205                        bio->bi_private = sb;
 206                        bio->bi_end_io = erase_end_io;
 207                        atomic_inc(&super->s_pending_writes);
 208                        submit_bio(WRITE, bio);
 209
 210                        ofs += i * PAGE_SIZE;
 211                        index += i;
 212                        nr_pages -= i;
 213                        i = 0;
 214
 215                        bio = bio_alloc(GFP_NOFS, max_pages);
 216                        BUG_ON(!bio);
 217                }
 218                bio->bi_io_vec[i].bv_page = super->s_erase_page;
 219                bio->bi_io_vec[i].bv_len = PAGE_SIZE;
 220                bio->bi_io_vec[i].bv_offset = 0;
 221        }
 222        bio->bi_vcnt = nr_pages;
 223        bio->bi_size = nr_pages * PAGE_SIZE;
 224        bio->bi_bdev = super->s_bdev;
 225        bio->bi_sector = ofs >> 9;
 226        bio->bi_private = sb;
 227        bio->bi_end_io = erase_end_io;
 228        atomic_inc(&super->s_pending_writes);
 229        submit_bio(WRITE, bio);
 230        return 0;
 231}
 232
 233static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
 234                int ensure_write)
 235{
 236        struct logfs_super *super = logfs_super(sb);
 237
 238        BUG_ON(to & (PAGE_SIZE - 1));
 239        BUG_ON(len & (PAGE_SIZE - 1));
 240
 241        if (super->s_flags & LOGFS_SB_FLAG_RO)
 242                return -EROFS;
 243
 244        if (ensure_write) {
 245                /*
 246                 * Object store doesn't care whether erases happen or not.
 247                 * But for the journal they are required.  Otherwise a scan
 248                 * can find an old commit entry and assume it is the current
 249                 * one, travelling back in time.
 250                 */
 251                do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
 252        }
 253
 254        return 0;
 255}
 256
 257static void bdev_sync(struct super_block *sb)
 258{
 259        struct logfs_super *super = logfs_super(sb);
 260
 261        wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
 262}
 263
 264static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
 265{
 266        struct logfs_super *super = logfs_super(sb);
 267        struct address_space *mapping = super->s_mapping_inode->i_mapping;
 268        filler_t *filler = bdev_readpage;
 269
 270        *ofs = 0;
 271        return read_cache_page(mapping, 0, filler, sb);
 272}
 273
 274static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
 275{
 276        struct logfs_super *super = logfs_super(sb);
 277        struct address_space *mapping = super->s_mapping_inode->i_mapping;
 278        filler_t *filler = bdev_readpage;
 279        u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
 280        pgoff_t index = pos >> PAGE_SHIFT;
 281
 282        *ofs = pos;
 283        return read_cache_page(mapping, index, filler, sb);
 284}
 285
 286static int bdev_write_sb(struct super_block *sb, struct page *page)
 287{
 288        struct block_device *bdev = logfs_super(sb)->s_bdev;
 289
 290        /* Nothing special to do for block devices. */
 291        return sync_request(page, bdev, WRITE);
 292}
 293
 294static void bdev_put_device(struct logfs_super *s)
 295{
 296        blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 297}
 298
 299static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
 300{
 301        return 0;
 302}
 303
 304static const struct logfs_device_ops bd_devops = {
 305        .find_first_sb  = bdev_find_first_sb,
 306        .find_last_sb   = bdev_find_last_sb,
 307        .write_sb       = bdev_write_sb,
 308        .readpage       = bdev_readpage,
 309        .writeseg       = bdev_writeseg,
 310        .erase          = bdev_erase,
 311        .can_write_buf  = bdev_can_write_buf,
 312        .sync           = bdev_sync,
 313        .put_device     = bdev_put_device,
 314};
 315
 316int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
 317                const char *devname)
 318{
 319        struct block_device *bdev;
 320
 321        bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 322                                  type);
 323        if (IS_ERR(bdev))
 324                return PTR_ERR(bdev);
 325
 326        if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
 327                int mtdnr = MINOR(bdev->bd_dev);
 328                blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 329                return logfs_get_sb_mtd(p, mtdnr);
 330        }
 331
 332        p->s_bdev = bdev;
 333        p->s_mtd = NULL;
 334        p->s_devops = &bd_devops;
 335        return 0;
 336}
 337