linux/fs/hfs/btree.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/hfs/btree.c
   3 *
   4 * Copyright (C) 2001
   5 * Brad Boyer (flar@allandria.com)
   6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
   7 *
   8 * Handle opening/closing btree
   9 */
  10
  11#include <linux/pagemap.h>
  12#include <linux/log2.h>
  13
  14#include "btree.h"
  15
  16/* Get a reference to a B*Tree and do some initial checks */
  17struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
  18{
  19        struct hfs_btree *tree;
  20        struct hfs_btree_header_rec *head;
  21        struct address_space *mapping;
  22        struct page *page;
  23        unsigned int size;
  24
  25        tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  26        if (!tree)
  27                return NULL;
  28
  29        init_MUTEX(&tree->tree_lock);
  30        spin_lock_init(&tree->hash_lock);
  31        /* Set the correct compare function */
  32        tree->sb = sb;
  33        tree->cnid = id;
  34        tree->keycmp = keycmp;
  35
  36        tree->inode = iget_locked(sb, id);
  37        if (!tree->inode)
  38                goto free_tree;
  39        BUG_ON(!(tree->inode->i_state & I_NEW));
  40        {
  41        struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
  42        HFS_I(tree->inode)->flags = 0;
  43        init_MUTEX(&HFS_I(tree->inode)->extents_lock);
  44        switch (id) {
  45        case HFS_EXT_CNID:
  46                hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
  47                                    mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
  48                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  49                break;
  50        case HFS_CAT_CNID:
  51                hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
  52                                    mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
  53                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  54                break;
  55        default:
  56                BUG();
  57        }
  58        }
  59        unlock_new_inode(tree->inode);
  60
  61        mapping = tree->inode->i_mapping;
  62        page = read_mapping_page(mapping, 0, NULL);
  63        if (IS_ERR(page))
  64                goto free_inode;
  65
  66        /* Load the header */
  67        head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  68        tree->root = be32_to_cpu(head->root);
  69        tree->leaf_count = be32_to_cpu(head->leaf_count);
  70        tree->leaf_head = be32_to_cpu(head->leaf_head);
  71        tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  72        tree->node_count = be32_to_cpu(head->node_count);
  73        tree->free_nodes = be32_to_cpu(head->free_nodes);
  74        tree->attributes = be32_to_cpu(head->attributes);
  75        tree->node_size = be16_to_cpu(head->node_size);
  76        tree->max_key_len = be16_to_cpu(head->max_key_len);
  77        tree->depth = be16_to_cpu(head->depth);
  78
  79        size = tree->node_size;
  80        if (!is_power_of_2(size))
  81                goto fail_page;
  82        if (!tree->node_count)
  83                goto fail_page;
  84        if ((id == HFS_EXT_CNID) && (tree->max_key_len != HFS_MAX_EXT_KEYLEN)) {
  85                printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
  86                        tree->max_key_len);
  87                goto fail_page;
  88        }
  89        if ((id == HFS_CAT_CNID) && (tree->max_key_len != HFS_MAX_CAT_KEYLEN)) {
  90                printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
  91                        tree->max_key_len);
  92                goto fail_page;
  93        }
  94
  95        tree->node_size_shift = ffs(size) - 1;
  96        tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  97
  98        kunmap(page);
  99        page_cache_release(page);
 100        return tree;
 101
 102fail_page:
 103        page_cache_release(page);
 104free_inode:
 105        tree->inode->i_mapping->a_ops = &hfs_aops;
 106        iput(tree->inode);
 107free_tree:
 108        kfree(tree);
 109        return NULL;
 110}
 111
 112/* Release resources used by a btree */
 113void hfs_btree_close(struct hfs_btree *tree)
 114{
 115        struct hfs_bnode *node;
 116        int i;
 117
 118        if (!tree)
 119                return;
 120
 121        for (i = 0; i < NODE_HASH_SIZE; i++) {
 122                while ((node = tree->node_hash[i])) {
 123                        tree->node_hash[i] = node->next_hash;
 124                        if (atomic_read(&node->refcnt))
 125                                printk(KERN_ERR "hfs: node %d:%d still has %d user(s)!\n",
 126                                        node->tree->cnid, node->this, atomic_read(&node->refcnt));
 127                        hfs_bnode_free(node);
 128                        tree->node_hash_cnt--;
 129                }
 130        }
 131        iput(tree->inode);
 132        kfree(tree);
 133}
 134
 135void hfs_btree_write(struct hfs_btree *tree)
 136{
 137        struct hfs_btree_header_rec *head;
 138        struct hfs_bnode *node;
 139        struct page *page;
 140
 141        node = hfs_bnode_find(tree, 0);
 142        if (IS_ERR(node))
 143                /* panic? */
 144                return;
 145        /* Load the header */
 146        page = node->page[0];
 147        head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
 148
 149        head->root = cpu_to_be32(tree->root);
 150        head->leaf_count = cpu_to_be32(tree->leaf_count);
 151        head->leaf_head = cpu_to_be32(tree->leaf_head);
 152        head->leaf_tail = cpu_to_be32(tree->leaf_tail);
 153        head->node_count = cpu_to_be32(tree->node_count);
 154        head->free_nodes = cpu_to_be32(tree->free_nodes);
 155        head->attributes = cpu_to_be32(tree->attributes);
 156        head->depth = cpu_to_be16(tree->depth);
 157
 158        kunmap(page);
 159        set_page_dirty(page);
 160        hfs_bnode_put(node);
 161}
 162
 163static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
 164{
 165        struct hfs_btree *tree = prev->tree;
 166        struct hfs_bnode *node;
 167        struct hfs_bnode_desc desc;
 168        __be32 cnid;
 169
 170        node = hfs_bnode_create(tree, idx);
 171        if (IS_ERR(node))
 172                return node;
 173
 174        if (!tree->free_nodes)
 175                panic("FIXME!!!");
 176        tree->free_nodes--;
 177        prev->next = idx;
 178        cnid = cpu_to_be32(idx);
 179        hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
 180
 181        node->type = HFS_NODE_MAP;
 182        node->num_recs = 1;
 183        hfs_bnode_clear(node, 0, tree->node_size);
 184        desc.next = 0;
 185        desc.prev = 0;
 186        desc.type = HFS_NODE_MAP;
 187        desc.height = 0;
 188        desc.num_recs = cpu_to_be16(1);
 189        desc.reserved = 0;
 190        hfs_bnode_write(node, &desc, 0, sizeof(desc));
 191        hfs_bnode_write_u16(node, 14, 0x8000);
 192        hfs_bnode_write_u16(node, tree->node_size - 2, 14);
 193        hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
 194
 195        return node;
 196}
 197
 198struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 199{
 200        struct hfs_bnode *node, *next_node;
 201        struct page **pagep;
 202        u32 nidx, idx;
 203        u16 off, len;
 204        u8 *data, byte, m;
 205        int i;
 206
 207        while (!tree->free_nodes) {
 208                struct inode *inode = tree->inode;
 209                u32 count;
 210                int res;
 211
 212                res = hfs_extend_file(inode);
 213                if (res)
 214                        return ERR_PTR(res);
 215                HFS_I(inode)->phys_size = inode->i_size =
 216                                (loff_t)HFS_I(inode)->alloc_blocks *
 217                                HFS_SB(tree->sb)->alloc_blksz;
 218                HFS_I(inode)->fs_blocks = inode->i_size >>
 219                                          tree->sb->s_blocksize_bits;
 220                inode_set_bytes(inode, inode->i_size);
 221                count = inode->i_size >> tree->node_size_shift;
 222                tree->free_nodes = count - tree->node_count;
 223                tree->node_count = count;
 224        }
 225
 226        nidx = 0;
 227        node = hfs_bnode_find(tree, nidx);
 228        if (IS_ERR(node))
 229                return node;
 230        len = hfs_brec_lenoff(node, 2, &off);
 231
 232        off += node->page_offset;
 233        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
 234        data = kmap(*pagep);
 235        off &= ~PAGE_CACHE_MASK;
 236        idx = 0;
 237
 238        for (;;) {
 239                while (len) {
 240                        byte = data[off];
 241                        if (byte != 0xff) {
 242                                for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
 243                                        if (!(byte & m)) {
 244                                                idx += i;
 245                                                data[off] |= m;
 246                                                set_page_dirty(*pagep);
 247                                                kunmap(*pagep);
 248                                                tree->free_nodes--;
 249                                                mark_inode_dirty(tree->inode);
 250                                                hfs_bnode_put(node);
 251                                                return hfs_bnode_create(tree, idx);
 252                                        }
 253                                }
 254                        }
 255                        if (++off >= PAGE_CACHE_SIZE) {
 256                                kunmap(*pagep);
 257                                data = kmap(*++pagep);
 258                                off = 0;
 259                        }
 260                        idx += 8;
 261                        len--;
 262                }
 263                kunmap(*pagep);
 264                nidx = node->next;
 265                if (!nidx) {
 266                        printk(KERN_DEBUG "hfs: create new bmap node...\n");
 267                        next_node = hfs_bmap_new_bmap(node, idx);
 268                } else
 269                        next_node = hfs_bnode_find(tree, nidx);
 270                hfs_bnode_put(node);
 271                if (IS_ERR(next_node))
 272                        return next_node;
 273                node = next_node;
 274
 275                len = hfs_brec_lenoff(node, 0, &off);
 276                off += node->page_offset;
 277                pagep = node->page + (off >> PAGE_CACHE_SHIFT);
 278                data = kmap(*pagep);
 279                off &= ~PAGE_CACHE_MASK;
 280        }
 281}
 282
 283void hfs_bmap_free(struct hfs_bnode *node)
 284{
 285        struct hfs_btree *tree;
 286        struct page *page;
 287        u16 off, len;
 288        u32 nidx;
 289        u8 *data, byte, m;
 290
 291        dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
 292        tree = node->tree;
 293        nidx = node->this;
 294        node = hfs_bnode_find(tree, 0);
 295        if (IS_ERR(node))
 296                return;
 297        len = hfs_brec_lenoff(node, 2, &off);
 298        while (nidx >= len * 8) {
 299                u32 i;
 300
 301                nidx -= len * 8;
 302                i = node->next;
 303                hfs_bnode_put(node);
 304                if (!i) {
 305                        /* panic */;
 306                        printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
 307                        return;
 308                }
 309                node = hfs_bnode_find(tree, i);
 310                if (IS_ERR(node))
 311                        return;
 312                if (node->type != HFS_NODE_MAP) {
 313                        /* panic */;
 314                        printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
 315                        hfs_bnode_put(node);
 316                        return;
 317                }
 318                len = hfs_brec_lenoff(node, 0, &off);
 319        }
 320        off += node->page_offset + nidx / 8;
 321        page = node->page[off >> PAGE_CACHE_SHIFT];
 322        data = kmap(page);
 323        off &= ~PAGE_CACHE_MASK;
 324        m = 1 << (~nidx & 7);
 325        byte = data[off];
 326        if (!(byte & m)) {
 327                printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
 328                kunmap(page);
 329                hfs_bnode_put(node);
 330                return;
 331        }
 332        data[off] = byte & ~m;
 333        set_page_dirty(page);
 334        kunmap(page);
 335        hfs_bnode_put(node);
 336        tree->free_nodes++;
 337        mark_inode_dirty(tree->inode);
 338}
 339