linux/fs/logfs/dev_bdev.c
<<
>>
Prefs
   1/*
   2 * fs/logfs/dev_bdev.c  - Device access methods for block devices
   3 *
   4 * As should be obvious for Linux kernel code, license is GPLv2
   5 *
   6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
   7 */
   8#include "logfs.h"
   9#include <linux/bio.h>
  10#include <linux/blkdev.h>
  11#include <linux/buffer_head.h>
  12#include <linux/gfp.h>
  13#include <linux/prefetch.h>
  14
  15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
  16
  17static void request_complete(struct bio *bio, int err)
  18{
  19        complete((struct completion *)bio->bi_private);
  20}
  21
  22static int sync_request(struct page *page, struct block_device *bdev, int rw)
  23{
  24        struct bio bio;
  25        struct bio_vec bio_vec;
  26        struct completion complete;
  27
  28        bio_init(&bio);
  29        bio.bi_max_vecs = 1;
  30        bio.bi_io_vec = &bio_vec;
  31        bio_vec.bv_page = page;
  32        bio_vec.bv_len = PAGE_SIZE;
  33        bio_vec.bv_offset = 0;
  34        bio.bi_vcnt = 1;
  35        bio.bi_size = PAGE_SIZE;
  36        bio.bi_bdev = bdev;
  37        bio.bi_sector = page->index * (PAGE_SIZE >> 9);
  38        init_completion(&complete);
  39        bio.bi_private = &complete;
  40        bio.bi_end_io = request_complete;
  41
  42        submit_bio(rw, &bio);
  43        wait_for_completion(&complete);
  44        return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
  45}
  46
  47static int bdev_readpage(void *_sb, struct page *page)
  48{
  49        struct super_block *sb = _sb;
  50        struct block_device *bdev = logfs_super(sb)->s_bdev;
  51        int err;
  52
  53        err = sync_request(page, bdev, READ);
  54        if (err) {
  55                ClearPageUptodate(page);
  56                SetPageError(page);
  57        } else {
  58                SetPageUptodate(page);
  59                ClearPageError(page);
  60        }
  61        unlock_page(page);
  62        return err;
  63}
  64
  65static DECLARE_WAIT_QUEUE_HEAD(wq);
  66
  67static void writeseg_end_io(struct bio *bio, int err)
  68{
  69        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  70        struct bio_vec *bvec;
  71        int i;
  72        struct super_block *sb = bio->bi_private;
  73        struct logfs_super *super = logfs_super(sb);
  74
  75        BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
  76        BUG_ON(err);
  77
  78        bio_for_each_segment_all(bvec, bio, i) {
  79                end_page_writeback(bvec->bv_page);
  80                page_cache_release(bvec->bv_page);
  81        }
  82        bio_put(bio);
  83        if (atomic_dec_and_test(&super->s_pending_writes))
  84                wake_up(&wq);
  85}
  86
  87static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
  88                size_t nr_pages)
  89{
  90        struct logfs_super *super = logfs_super(sb);
  91        struct address_space *mapping = super->s_mapping_inode->i_mapping;
  92        struct bio *bio;
  93        struct page *page;
  94        unsigned int max_pages;
  95        int i;
  96
  97        max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
  98
  99        bio = bio_alloc(GFP_NOFS, max_pages);
 100        BUG_ON(!bio);
 101
 102        for (i = 0; i < nr_pages; i++) {
 103                if (i >= max_pages) {
 104                        /* Block layer cannot split bios :( */
 105                        bio->bi_vcnt = i;
 106                        bio->bi_size = i * PAGE_SIZE;
 107                        bio->bi_bdev = super->s_bdev;
 108                        bio->bi_sector = ofs >> 9;
 109                        bio->bi_private = sb;
 110                        bio->bi_end_io = writeseg_end_io;
 111                        atomic_inc(&super->s_pending_writes);
 112                        submit_bio(WRITE, bio);
 113
 114                        ofs += i * PAGE_SIZE;
 115                        index += i;
 116                        nr_pages -= i;
 117                        i = 0;
 118
 119                        bio = bio_alloc(GFP_NOFS, max_pages);
 120                        BUG_ON(!bio);
 121                }
 122                page = find_lock_page(mapping, index + i);
 123                BUG_ON(!page);
 124                bio->bi_io_vec[i].bv_page = page;
 125                bio->bi_io_vec[i].bv_len = PAGE_SIZE;
 126                bio->bi_io_vec[i].bv_offset = 0;
 127
 128                BUG_ON(PageWriteback(page));
 129                set_page_writeback(page);
 130                unlock_page(page);
 131        }
 132        bio->bi_vcnt = nr_pages;
 133        bio->bi_size = nr_pages * PAGE_SIZE;
 134        bio->bi_bdev = super->s_bdev;
 135        bio->bi_sector = ofs >> 9;
 136        bio->bi_private = sb;
 137        bio->bi_end_io = writeseg_end_io;
 138        atomic_inc(&super->s_pending_writes);
 139        submit_bio(WRITE, bio);
 140        return 0;
 141}
 142
 143static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
 144{
 145        struct logfs_super *super = logfs_super(sb);
 146        int head;
 147
 148        BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
 149
 150        if (len == 0) {
 151                /* This can happen when the object fit perfectly into a
 152                 * segment, the segment gets written per sync and subsequently
 153                 * closed.
 154                 */
 155                return;
 156        }
 157        head = ofs & (PAGE_SIZE - 1);
 158        if (head) {
 159                ofs -= head;
 160                len += head;
 161        }
 162        len = PAGE_ALIGN(len);
 163        __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
 164}
 165
 166
 167static void erase_end_io(struct bio *bio, int err) 
 168{ 
 169        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 
 170        struct super_block *sb = bio->bi_private; 
 171        struct logfs_super *super = logfs_super(sb); 
 172
 173        BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 
 174        BUG_ON(err); 
 175        BUG_ON(bio->bi_vcnt == 0); 
 176        bio_put(bio); 
 177        if (atomic_dec_and_test(&super->s_pending_writes))
 178                wake_up(&wq); 
 179} 
 180
 181static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
 182                size_t nr_pages)
 183{
 184        struct logfs_super *super = logfs_super(sb);
 185        struct bio *bio;
 186        unsigned int max_pages;
 187        int i;
 188
 189        max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
 190
 191        bio = bio_alloc(GFP_NOFS, max_pages);
 192        BUG_ON(!bio);
 193
 194        for (i = 0; i < nr_pages; i++) {
 195                if (i >= max_pages) {
 196                        /* Block layer cannot split bios :( */
 197                        bio->bi_vcnt = i;
 198                        bio->bi_size = i * PAGE_SIZE;
 199                        bio->bi_bdev = super->s_bdev;
 200                        bio->bi_sector = ofs >> 9;
 201                        bio->bi_private = sb;
 202                        bio->bi_end_io = erase_end_io;
 203                        atomic_inc(&super->s_pending_writes);
 204                        submit_bio(WRITE, bio);
 205
 206                        ofs += i * PAGE_SIZE;
 207                        index += i;
 208                        nr_pages -= i;
 209                        i = 0;
 210
 211                        bio = bio_alloc(GFP_NOFS, max_pages);
 212                        BUG_ON(!bio);
 213                }
 214                bio->bi_io_vec[i].bv_page = super->s_erase_page;
 215                bio->bi_io_vec[i].bv_len = PAGE_SIZE;
 216                bio->bi_io_vec[i].bv_offset = 0;
 217        }
 218        bio->bi_vcnt = nr_pages;
 219        bio->bi_size = nr_pages * PAGE_SIZE;
 220        bio->bi_bdev = super->s_bdev;
 221        bio->bi_sector = ofs >> 9;
 222        bio->bi_private = sb;
 223        bio->bi_end_io = erase_end_io;
 224        atomic_inc(&super->s_pending_writes);
 225        submit_bio(WRITE, bio);
 226        return 0;
 227}
 228
 229static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
 230                int ensure_write)
 231{
 232        struct logfs_super *super = logfs_super(sb);
 233
 234        BUG_ON(to & (PAGE_SIZE - 1));
 235        BUG_ON(len & (PAGE_SIZE - 1));
 236
 237        if (super->s_flags & LOGFS_SB_FLAG_RO)
 238                return -EROFS;
 239
 240        if (ensure_write) {
 241                /*
 242                 * Object store doesn't care whether erases happen or not.
 243                 * But for the journal they are required.  Otherwise a scan
 244                 * can find an old commit entry and assume it is the current
 245                 * one, travelling back in time.
 246                 */
 247                do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
 248        }
 249
 250        return 0;
 251}
 252
 253static void bdev_sync(struct super_block *sb)
 254{
 255        struct logfs_super *super = logfs_super(sb);
 256
 257        wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
 258}
 259
 260static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
 261{
 262        struct logfs_super *super = logfs_super(sb);
 263        struct address_space *mapping = super->s_mapping_inode->i_mapping;
 264        filler_t *filler = bdev_readpage;
 265
 266        *ofs = 0;
 267        return read_cache_page(mapping, 0, filler, sb);
 268}
 269
 270static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
 271{
 272        struct logfs_super *super = logfs_super(sb);
 273        struct address_space *mapping = super->s_mapping_inode->i_mapping;
 274        filler_t *filler = bdev_readpage;
 275        u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
 276        pgoff_t index = pos >> PAGE_SHIFT;
 277
 278        *ofs = pos;
 279        return read_cache_page(mapping, index, filler, sb);
 280}
 281
 282static int bdev_write_sb(struct super_block *sb, struct page *page)
 283{
 284        struct block_device *bdev = logfs_super(sb)->s_bdev;
 285
 286        /* Nothing special to do for block devices. */
 287        return sync_request(page, bdev, WRITE);
 288}
 289
 290static void bdev_put_device(struct logfs_super *s)
 291{
 292        blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 293}
 294
 295static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
 296{
 297        return 0;
 298}
 299
 300static const struct logfs_device_ops bd_devops = {
 301        .find_first_sb  = bdev_find_first_sb,
 302        .find_last_sb   = bdev_find_last_sb,
 303        .write_sb       = bdev_write_sb,
 304        .readpage       = bdev_readpage,
 305        .writeseg       = bdev_writeseg,
 306        .erase          = bdev_erase,
 307        .can_write_buf  = bdev_can_write_buf,
 308        .sync           = bdev_sync,
 309        .put_device     = bdev_put_device,
 310};
 311
 312int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
 313                const char *devname)
 314{
 315        struct block_device *bdev;
 316
 317        bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 318                                  type);
 319        if (IS_ERR(bdev))
 320                return PTR_ERR(bdev);
 321
 322        if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
 323                int mtdnr = MINOR(bdev->bd_dev);
 324                blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 325                return logfs_get_sb_mtd(p, mtdnr);
 326        }
 327
 328        p->s_bdev = bdev;
 329        p->s_mtd = NULL;
 330        p->s_devops = &bd_devops;
 331        return 0;
 332}
 333