linux/fs/hfs/btree.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/hfs/btree.c
   3 *
   4 * Copyright (C) 2001
   5 * Brad Boyer (flar@allandria.com)
   6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
   7 *
   8 * Handle opening/closing btree
   9 */
  10
  11#include <linux/pagemap.h>
  12#include <linux/slab.h>
  13#include <linux/log2.h>
  14
  15#include "btree.h"
  16
  17/* Get a reference to a B*Tree and do some initial checks */
  18struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
  19{
  20        struct hfs_btree *tree;
  21        struct hfs_btree_header_rec *head;
  22        struct address_space *mapping;
  23        struct page *page;
  24        unsigned int size;
  25
  26        tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  27        if (!tree)
  28                return NULL;
  29
  30        mutex_init(&tree->tree_lock);
  31        spin_lock_init(&tree->hash_lock);
  32        /* Set the correct compare function */
  33        tree->sb = sb;
  34        tree->cnid = id;
  35        tree->keycmp = keycmp;
  36
  37        tree->inode = iget_locked(sb, id);
  38        if (!tree->inode)
  39                goto free_tree;
  40        BUG_ON(!(tree->inode->i_state & I_NEW));
  41        {
  42        struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
  43        HFS_I(tree->inode)->flags = 0;
  44        mutex_init(&HFS_I(tree->inode)->extents_lock);
  45        switch (id) {
  46        case HFS_EXT_CNID:
  47                hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
  48                                    mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
  49                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  50                break;
  51        case HFS_CAT_CNID:
  52                hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
  53                                    mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
  54                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  55                break;
  56        default:
  57                BUG();
  58        }
  59        }
  60        unlock_new_inode(tree->inode);
  61
  62        if (!HFS_I(tree->inode)->first_blocks) {
  63                printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
  64                goto free_inode;
  65        }
  66
  67        mapping = tree->inode->i_mapping;
  68        page = read_mapping_page(mapping, 0, NULL);
  69        if (IS_ERR(page))
  70                goto free_inode;
  71
  72        /* Load the header */
  73        head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  74        tree->root = be32_to_cpu(head->root);
  75        tree->leaf_count = be32_to_cpu(head->leaf_count);
  76        tree->leaf_head = be32_to_cpu(head->leaf_head);
  77        tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  78        tree->node_count = be32_to_cpu(head->node_count);
  79        tree->free_nodes = be32_to_cpu(head->free_nodes);
  80        tree->attributes = be32_to_cpu(head->attributes);
  81        tree->node_size = be16_to_cpu(head->node_size);
  82        tree->max_key_len = be16_to_cpu(head->max_key_len);
  83        tree->depth = be16_to_cpu(head->depth);
  84
  85        size = tree->node_size;
  86        if (!is_power_of_2(size))
  87                goto fail_page;
  88        if (!tree->node_count)
  89                goto fail_page;
  90        switch (id) {
  91        case HFS_EXT_CNID:
  92                if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
  93                        printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
  94                                tree->max_key_len);
  95                        goto fail_page;
  96                }
  97                break;
  98        case HFS_CAT_CNID:
  99                if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
 100                        printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
 101                                tree->max_key_len);
 102                        goto fail_page;
 103                }
 104                break;
 105        default:
 106                BUG();
 107        }
 108
 109        tree->node_size_shift = ffs(size) - 1;
 110        tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 111
 112        kunmap(page);
 113        page_cache_release(page);
 114        return tree;
 115
 116fail_page:
 117        page_cache_release(page);
 118free_inode:
 119        tree->inode->i_mapping->a_ops = &hfs_aops;
 120        iput(tree->inode);
 121free_tree:
 122        kfree(tree);
 123        return NULL;
 124}
 125
 126/* Release resources used by a btree */
 127void hfs_btree_close(struct hfs_btree *tree)
 128{
 129        struct hfs_bnode *node;
 130        int i;
 131
 132        if (!tree)
 133                return;
 134
 135        for (i = 0; i < NODE_HASH_SIZE; i++) {
 136                while ((node = tree->node_hash[i])) {
 137                        tree->node_hash[i] = node->next_hash;
 138                        if (atomic_read(&node->refcnt))
 139                                printk(KERN_ERR "hfs: node %d:%d still has %d user(s)!\n",
 140                                        node->tree->cnid, node->this, atomic_read(&node->refcnt));
 141                        hfs_bnode_free(node);
 142                        tree->node_hash_cnt--;
 143                }
 144        }
 145        iput(tree->inode);
 146        kfree(tree);
 147}
 148
 149void hfs_btree_write(struct hfs_btree *tree)
 150{
 151        struct hfs_btree_header_rec *head;
 152        struct hfs_bnode *node;
 153        struct page *page;
 154
 155        node = hfs_bnode_find(tree, 0);
 156        if (IS_ERR(node))
 157                /* panic? */
 158                return;
 159        /* Load the header */
 160        page = node->page[0];
 161        head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
 162
 163        head->root = cpu_to_be32(tree->root);
 164        head->leaf_count = cpu_to_be32(tree->leaf_count);
 165        head->leaf_head = cpu_to_be32(tree->leaf_head);
 166        head->leaf_tail = cpu_to_be32(tree->leaf_tail);
 167        head->node_count = cpu_to_be32(tree->node_count);
 168        head->free_nodes = cpu_to_be32(tree->free_nodes);
 169        head->attributes = cpu_to_be32(tree->attributes);
 170        head->depth = cpu_to_be16(tree->depth);
 171
 172        kunmap(page);
 173        set_page_dirty(page);
 174        hfs_bnode_put(node);
 175}
 176
 177static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
 178{
 179        struct hfs_btree *tree = prev->tree;
 180        struct hfs_bnode *node;
 181        struct hfs_bnode_desc desc;
 182        __be32 cnid;
 183
 184        node = hfs_bnode_create(tree, idx);
 185        if (IS_ERR(node))
 186                return node;
 187
 188        if (!tree->free_nodes)
 189                panic("FIXME!!!");
 190        tree->free_nodes--;
 191        prev->next = idx;
 192        cnid = cpu_to_be32(idx);
 193        hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
 194
 195        node->type = HFS_NODE_MAP;
 196        node->num_recs = 1;
 197        hfs_bnode_clear(node, 0, tree->node_size);
 198        desc.next = 0;
 199        desc.prev = 0;
 200        desc.type = HFS_NODE_MAP;
 201        desc.height = 0;
 202        desc.num_recs = cpu_to_be16(1);
 203        desc.reserved = 0;
 204        hfs_bnode_write(node, &desc, 0, sizeof(desc));
 205        hfs_bnode_write_u16(node, 14, 0x8000);
 206        hfs_bnode_write_u16(node, tree->node_size - 2, 14);
 207        hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
 208
 209        return node;
 210}
 211
 212struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 213{
 214        struct hfs_bnode *node, *next_node;
 215        struct page **pagep;
 216        u32 nidx, idx;
 217        unsigned off;
 218        u16 off16;
 219        u16 len;
 220        u8 *data, byte, m;
 221        int i;
 222
 223        while (!tree->free_nodes) {
 224                struct inode *inode = tree->inode;
 225                u32 count;
 226                int res;
 227
 228                res = hfs_extend_file(inode);
 229                if (res)
 230                        return ERR_PTR(res);
 231                HFS_I(inode)->phys_size = inode->i_size =
 232                                (loff_t)HFS_I(inode)->alloc_blocks *
 233                                HFS_SB(tree->sb)->alloc_blksz;
 234                HFS_I(inode)->fs_blocks = inode->i_size >>
 235                                          tree->sb->s_blocksize_bits;
 236                inode_set_bytes(inode, inode->i_size);
 237                count = inode->i_size >> tree->node_size_shift;
 238                tree->free_nodes = count - tree->node_count;
 239                tree->node_count = count;
 240        }
 241
 242        nidx = 0;
 243        node = hfs_bnode_find(tree, nidx);
 244        if (IS_ERR(node))
 245                return node;
 246        len = hfs_brec_lenoff(node, 2, &off16);
 247        off = off16;
 248
 249        off += node->page_offset;
 250        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
 251        data = kmap(*pagep);
 252        off &= ~PAGE_CACHE_MASK;
 253        idx = 0;
 254
 255        for (;;) {
 256                while (len) {
 257                        byte = data[off];
 258                        if (byte != 0xff) {
 259                                for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
 260                                        if (!(byte & m)) {
 261                                                idx += i;
 262                                                data[off] |= m;
 263                                                set_page_dirty(*pagep);
 264                                                kunmap(*pagep);
 265                                                tree->free_nodes--;
 266                                                mark_inode_dirty(tree->inode);
 267                                                hfs_bnode_put(node);
 268                                                return hfs_bnode_create(tree, idx);
 269                                        }
 270                                }
 271                        }
 272                        if (++off >= PAGE_CACHE_SIZE) {
 273                                kunmap(*pagep);
 274                                data = kmap(*++pagep);
 275                                off = 0;
 276                        }
 277                        idx += 8;
 278                        len--;
 279                }
 280                kunmap(*pagep);
 281                nidx = node->next;
 282                if (!nidx) {
 283                        printk(KERN_DEBUG "hfs: create new bmap node...\n");
 284                        next_node = hfs_bmap_new_bmap(node, idx);
 285                } else
 286                        next_node = hfs_bnode_find(tree, nidx);
 287                hfs_bnode_put(node);
 288                if (IS_ERR(next_node))
 289                        return next_node;
 290                node = next_node;
 291
 292                len = hfs_brec_lenoff(node, 0, &off16);
 293                off = off16;
 294                off += node->page_offset;
 295                pagep = node->page + (off >> PAGE_CACHE_SHIFT);
 296                data = kmap(*pagep);
 297                off &= ~PAGE_CACHE_MASK;
 298        }
 299}
 300
 301void hfs_bmap_free(struct hfs_bnode *node)
 302{
 303        struct hfs_btree *tree;
 304        struct page *page;
 305        u16 off, len;
 306        u32 nidx;
 307        u8 *data, byte, m;
 308
 309        dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
 310        tree = node->tree;
 311        nidx = node->this;
 312        node = hfs_bnode_find(tree, 0);
 313        if (IS_ERR(node))
 314                return;
 315        len = hfs_brec_lenoff(node, 2, &off);
 316        while (nidx >= len * 8) {
 317                u32 i;
 318
 319                nidx -= len * 8;
 320                i = node->next;
 321                hfs_bnode_put(node);
 322                if (!i) {
 323                        /* panic */;
 324                        printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
 325                        return;
 326                }
 327                node = hfs_bnode_find(tree, i);
 328                if (IS_ERR(node))
 329                        return;
 330                if (node->type != HFS_NODE_MAP) {
 331                        /* panic */;
 332                        printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
 333                        hfs_bnode_put(node);
 334                        return;
 335                }
 336                len = hfs_brec_lenoff(node, 0, &off);
 337        }
 338        off += node->page_offset + nidx / 8;
 339        page = node->page[off >> PAGE_CACHE_SHIFT];
 340        data = kmap(page);
 341        off &= ~PAGE_CACHE_MASK;
 342        m = 1 << (~nidx & 7);
 343        byte = data[off];
 344        if (!(byte & m)) {
 345                printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
 346                kunmap(page);
 347                hfs_bnode_put(node);
 348                return;
 349        }
 350        data[off] = byte & ~m;
 351        set_page_dirty(page);
 352        kunmap(page);
 353        hfs_bnode_put(node);
 354        tree->free_nodes++;
 355        mark_inode_dirty(tree->inode);
 356}
 357