linux/fs/hpfs/buffer.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/hpfs/buffer.c
   4 *
   5 *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
   6 *
   7 *  general buffer i/o
   8 */
   9#include <linux/sched.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include "hpfs_fn.h"
  13
  14secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
  15{
  16        unsigned i;
  17        struct hpfs_sb_info *sbi = hpfs_sb(s);
  18        for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
  19                if (sbi->hotfix_from[i] == sec) {
  20                        return sbi->hotfix_to[i];
  21                }
  22        }
  23        return sec;
  24}
  25
  26unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
  27{
  28        unsigned i;
  29        struct hpfs_sb_info *sbi = hpfs_sb(s);
  30        for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
  31                if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
  32                        n = sbi->hotfix_from[i] - sec;
  33                }
  34        }
  35        return n;
  36}
  37
  38void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
  39{
  40        struct buffer_head *bh;
  41        struct blk_plug plug;
  42
  43        if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
  44                return;
  45
  46        if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
  47                return;
  48
  49        bh = sb_find_get_block(s, secno);
  50        if (bh) {
  51                if (buffer_uptodate(bh)) {
  52                        brelse(bh);
  53                        return;
  54                }
  55                brelse(bh);
  56        };
  57
  58        blk_start_plug(&plug);
  59        while (n > 0) {
  60                if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
  61                        break;
  62                sb_breadahead(s, secno);
  63                secno++;
  64                n--;
  65        }
  66        blk_finish_plug(&plug);
  67}
  68
  69/* Map a sector into a buffer and return pointers to it and to the buffer. */
  70
  71void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
  72                 int ahead)
  73{
  74        struct buffer_head *bh;
  75
  76        hpfs_lock_assert(s);
  77
  78        hpfs_prefetch_sectors(s, secno, ahead);
  79
  80        cond_resched();
  81
  82        *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
  83        if (bh != NULL)
  84                return bh->b_data;
  85        else {
  86                pr_err("%s(): read error\n", __func__);
  87                return NULL;
  88        }
  89}
  90
  91/* Like hpfs_map_sector but don't read anything */
  92
  93void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
  94{
  95        struct buffer_head *bh;
  96        /*return hpfs_map_sector(s, secno, bhp, 0);*/
  97
  98        hpfs_lock_assert(s);
  99
 100        cond_resched();
 101
 102        if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
 103                if (!buffer_uptodate(bh)) wait_on_buffer(bh);
 104                set_buffer_uptodate(bh);
 105                return bh->b_data;
 106        } else {
 107                pr_err("%s(): getblk failed\n", __func__);
 108                return NULL;
 109        }
 110}
 111
 112/* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
 113
 114void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
 115                   int ahead)
 116{
 117        char *data;
 118
 119        hpfs_lock_assert(s);
 120
 121        cond_resched();
 122
 123        if (secno & 3) {
 124                pr_err("%s(): unaligned read\n", __func__);
 125                return NULL;
 126        }
 127
 128        hpfs_prefetch_sectors(s, secno, 4 + ahead);
 129
 130        if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
 131        if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
 132        if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
 133        if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
 134
 135        if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
 136            likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
 137            likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
 138                return qbh->data = qbh->bh[0]->b_data;
 139        }
 140
 141        qbh->data = data = kmalloc(2048, GFP_NOFS);
 142        if (!data) {
 143                pr_err("%s(): out of memory\n", __func__);
 144                goto bail4;
 145        }
 146
 147        memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
 148        memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
 149        memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
 150        memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
 151
 152        return data;
 153
 154 bail4:
 155        brelse(qbh->bh[3]);
 156 bail3:
 157        brelse(qbh->bh[2]);
 158 bail2:
 159        brelse(qbh->bh[1]);
 160 bail1:
 161        brelse(qbh->bh[0]);
 162 bail0:
 163        return NULL;
 164}
 165
 166/* Don't read sectors */
 167
 168void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
 169                          struct quad_buffer_head *qbh)
 170{
 171        cond_resched();
 172
 173        hpfs_lock_assert(s);
 174
 175        if (secno & 3) {
 176                pr_err("%s(): unaligned read\n", __func__);
 177                return NULL;
 178        }
 179
 180        if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
 181        if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
 182        if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
 183        if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
 184
 185        if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
 186            likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
 187            likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
 188                return qbh->data = qbh->bh[0]->b_data;
 189        }
 190
 191        if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
 192                pr_err("%s(): out of memory\n", __func__);
 193                goto bail4;
 194        }
 195        return qbh->data;
 196
 197bail4:
 198        brelse(qbh->bh[3]);
 199bail3:
 200        brelse(qbh->bh[2]);
 201bail2:
 202        brelse(qbh->bh[1]);
 203bail1:
 204        brelse(qbh->bh[0]);
 205bail0:
 206        return NULL;
 207}
 208        
 209
 210void hpfs_brelse4(struct quad_buffer_head *qbh)
 211{
 212        if (unlikely(qbh->data != qbh->bh[0]->b_data))
 213                kfree(qbh->data);
 214        brelse(qbh->bh[0]);
 215        brelse(qbh->bh[1]);
 216        brelse(qbh->bh[2]);
 217        brelse(qbh->bh[3]);
 218}       
 219
 220void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
 221{
 222        if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
 223                memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
 224                memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
 225                memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
 226                memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
 227        }
 228        mark_buffer_dirty(qbh->bh[0]);
 229        mark_buffer_dirty(qbh->bh[1]);
 230        mark_buffer_dirty(qbh->bh[2]);
 231        mark_buffer_dirty(qbh->bh[3]);
 232}
 233