linux/fs/hpfs/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/hpfs/file.c
   4 *
   5 *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
   6 *
   7 *  file VFS functions
   8 */
   9
  10#include "hpfs_fn.h"
  11#include <linux/mpage.h>
  12#include <linux/iomap.h>
  13#include <linux/fiemap.h>
  14
  15#define BLOCKS(size) (((size) + 511) >> 9)
  16
  17static int hpfs_file_release(struct inode *inode, struct file *file)
  18{
  19        hpfs_lock(inode->i_sb);
  20        hpfs_write_if_changed(inode);
  21        hpfs_unlock(inode->i_sb);
  22        return 0;
  23}
  24
  25int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  26{
  27        struct inode *inode = file->f_mapping->host;
  28        int ret;
  29
  30        ret = file_write_and_wait_range(file, start, end);
  31        if (ret)
  32                return ret;
  33        return sync_blockdev(inode->i_sb->s_bdev);
  34}
  35
  36/*
  37 * generic_file_read often calls bmap with non-existing sector,
  38 * so we must ignore such errors.
  39 */
  40
  41static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs)
  42{
  43        struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
  44        unsigned n, disk_secno;
  45        struct fnode *fnode;
  46        struct buffer_head *bh;
  47        if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0;
  48        n = file_secno - hpfs_inode->i_file_sec;
  49        if (n < hpfs_inode->i_n_secs) {
  50                *n_secs = hpfs_inode->i_n_secs - n;
  51                return hpfs_inode->i_disk_sec + n;
  52        }
  53        if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
  54        disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
  55        if (disk_secno == -1) return 0;
  56        if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
  57        n = file_secno - hpfs_inode->i_file_sec;
  58        if (n < hpfs_inode->i_n_secs) {
  59                *n_secs = hpfs_inode->i_n_secs - n;
  60                return hpfs_inode->i_disk_sec + n;
  61        }
  62        *n_secs = 1;
  63        return disk_secno;
  64}
  65
  66void hpfs_truncate(struct inode *i)
  67{
  68        if (IS_IMMUTABLE(i)) return /*-EPERM*/;
  69        hpfs_lock_assert(i->i_sb);
  70
  71        hpfs_i(i)->i_n_secs = 0;
  72        i->i_blocks = 1 + ((i->i_size + 511) >> 9);
  73        hpfs_i(i)->mmu_private = i->i_size;
  74        hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
  75        hpfs_write_inode(i);
  76        hpfs_i(i)->i_n_secs = 0;
  77}
  78
  79static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
  80{
  81        int r;
  82        secno s;
  83        unsigned n_secs;
  84        hpfs_lock(inode->i_sb);
  85        s = hpfs_bmap(inode, iblock, &n_secs);
  86        if (s) {
  87                if (bh_result->b_size >> 9 < n_secs)
  88                        n_secs = bh_result->b_size >> 9;
  89                n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
  90                if (unlikely(!n_secs)) {
  91                        s = hpfs_search_hotfix_map(inode->i_sb, s);
  92                        n_secs = 1;
  93                }
  94                map_bh(bh_result, inode->i_sb, s);
  95                bh_result->b_size = n_secs << 9;
  96                goto ret_0;
  97        }
  98        if (!create) goto ret_0;
  99        if (iblock<<9 != hpfs_i(inode)->mmu_private) {
 100                BUG();
 101                r = -EIO;
 102                goto ret_r;
 103        }
 104        if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
 105                hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
 106                r = -ENOSPC;
 107                goto ret_r;
 108        }
 109        inode->i_blocks++;
 110        hpfs_i(inode)->mmu_private += 512;
 111        set_buffer_new(bh_result);
 112        map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
 113        ret_0:
 114        r = 0;
 115        ret_r:
 116        hpfs_unlock(inode->i_sb);
 117        return r;
 118}
 119
 120static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
 121                unsigned flags, struct iomap *iomap, struct iomap *srcmap)
 122{
 123        struct super_block *sb = inode->i_sb;
 124        unsigned int blkbits = inode->i_blkbits;
 125        unsigned int n_secs;
 126        secno s;
 127
 128        if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
 129                return -EINVAL;
 130
 131        iomap->bdev = inode->i_sb->s_bdev;
 132        iomap->offset = offset;
 133
 134        hpfs_lock(sb);
 135        s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
 136        if (s) {
 137                n_secs = hpfs_search_hotfix_map_for_range(sb, s,
 138                                min_t(loff_t, n_secs, length));
 139                if (unlikely(!n_secs)) {
 140                        s = hpfs_search_hotfix_map(sb, s);
 141                        n_secs = 1;
 142                }
 143                iomap->type = IOMAP_MAPPED;
 144                iomap->flags = IOMAP_F_MERGED;
 145                iomap->addr = (u64)s << blkbits;
 146                iomap->length = (u64)n_secs << blkbits;
 147        } else {
 148                iomap->type = IOMAP_HOLE;
 149                iomap->addr = IOMAP_NULL_ADDR;
 150                iomap->length = 1 << blkbits;
 151        }
 152
 153        hpfs_unlock(sb);
 154        return 0;
 155}
 156
 157static const struct iomap_ops hpfs_iomap_ops = {
 158        .iomap_begin            = hpfs_iomap_begin,
 159};
 160
 161static int hpfs_readpage(struct file *file, struct page *page)
 162{
 163        return mpage_readpage(page, hpfs_get_block);
 164}
 165
 166static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
 167{
 168        return block_write_full_page(page, hpfs_get_block, wbc);
 169}
 170
 171static void hpfs_readahead(struct readahead_control *rac)
 172{
 173        mpage_readahead(rac, hpfs_get_block);
 174}
 175
 176static int hpfs_writepages(struct address_space *mapping,
 177                           struct writeback_control *wbc)
 178{
 179        return mpage_writepages(mapping, wbc, hpfs_get_block);
 180}
 181
 182static void hpfs_write_failed(struct address_space *mapping, loff_t to)
 183{
 184        struct inode *inode = mapping->host;
 185
 186        hpfs_lock(inode->i_sb);
 187
 188        if (to > inode->i_size) {
 189                truncate_pagecache(inode, inode->i_size);
 190                hpfs_truncate(inode);
 191        }
 192
 193        hpfs_unlock(inode->i_sb);
 194}
 195
 196static int hpfs_write_begin(struct file *file, struct address_space *mapping,
 197                        loff_t pos, unsigned len, unsigned flags,
 198                        struct page **pagep, void **fsdata)
 199{
 200        int ret;
 201
 202        *pagep = NULL;
 203        ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
 204                                hpfs_get_block,
 205                                &hpfs_i(mapping->host)->mmu_private);
 206        if (unlikely(ret))
 207                hpfs_write_failed(mapping, pos + len);
 208
 209        return ret;
 210}
 211
 212static int hpfs_write_end(struct file *file, struct address_space *mapping,
 213                        loff_t pos, unsigned len, unsigned copied,
 214                        struct page *pagep, void *fsdata)
 215{
 216        struct inode *inode = mapping->host;
 217        int err;
 218        err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
 219        if (err < len)
 220                hpfs_write_failed(mapping, pos + len);
 221        if (!(err < 0)) {
 222                /* make sure we write it on close, if not earlier */
 223                hpfs_lock(inode->i_sb);
 224                hpfs_i(inode)->i_dirty = 1;
 225                hpfs_unlock(inode->i_sb);
 226        }
 227        return err;
 228}
 229
 230static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 231{
 232        return generic_block_bmap(mapping, block, hpfs_get_block);
 233}
 234
 235static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
 236{
 237        int ret;
 238
 239        inode_lock(inode);
 240        len = min_t(u64, len, i_size_read(inode));
 241        ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
 242        inode_unlock(inode);
 243
 244        return ret;
 245}
 246
 247const struct address_space_operations hpfs_aops = {
 248        .set_page_dirty = __set_page_dirty_buffers,
 249        .readpage = hpfs_readpage,
 250        .writepage = hpfs_writepage,
 251        .readahead = hpfs_readahead,
 252        .writepages = hpfs_writepages,
 253        .write_begin = hpfs_write_begin,
 254        .write_end = hpfs_write_end,
 255        .bmap = _hpfs_bmap
 256};
 257
 258const struct file_operations hpfs_file_ops =
 259{
 260        .llseek         = generic_file_llseek,
 261        .read_iter      = generic_file_read_iter,
 262        .write_iter     = generic_file_write_iter,
 263        .mmap           = generic_file_mmap,
 264        .release        = hpfs_file_release,
 265        .fsync          = hpfs_file_fsync,
 266        .splice_read    = generic_file_splice_read,
 267        .unlocked_ioctl = hpfs_ioctl,
 268        .compat_ioctl   = compat_ptr_ioctl,
 269};
 270
 271const struct inode_operations hpfs_file_iops =
 272{
 273        .setattr        = hpfs_setattr,
 274        .fiemap         = hpfs_fiemap,
 275};
 276