linux/fs/hfs/btree.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/hfs/btree.c
   3 *
   4 * Copyright (C) 2001
   5 * Brad Boyer (flar@allandria.com)
   6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
   7 *
   8 * Handle opening/closing btree
   9 */
  10
  11#include <linux/pagemap.h>
  12#include <linux/slab.h>
  13#include <linux/log2.h>
  14
  15#include "btree.h"
  16
  17/* Get a reference to a B*Tree and do some initial checks */
  18struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
  19{
  20        struct hfs_btree *tree;
  21        struct hfs_btree_header_rec *head;
  22        struct address_space *mapping;
  23        struct page *page;
  24        unsigned int size;
  25
  26        tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  27        if (!tree)
  28                return NULL;
  29
  30        mutex_init(&tree->tree_lock);
  31        spin_lock_init(&tree->hash_lock);
  32        /* Set the correct compare function */
  33        tree->sb = sb;
  34        tree->cnid = id;
  35        tree->keycmp = keycmp;
  36
  37        tree->inode = iget_locked(sb, id);
  38        if (!tree->inode)
  39                goto free_tree;
  40        BUG_ON(!(tree->inode->i_state & I_NEW));
  41        {
  42        struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
  43        HFS_I(tree->inode)->flags = 0;
  44        mutex_init(&HFS_I(tree->inode)->extents_lock);
  45        switch (id) {
  46        case HFS_EXT_CNID:
  47                hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
  48                                    mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
  49                if (HFS_I(tree->inode)->alloc_blocks >
  50                                        HFS_I(tree->inode)->first_blocks) {
  51                        pr_err("invalid btree extent records\n");
  52                        unlock_new_inode(tree->inode);
  53                        goto free_inode;
  54                }
  55
  56                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  57                break;
  58        case HFS_CAT_CNID:
  59                hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
  60                                    mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
  61
  62                if (!HFS_I(tree->inode)->first_blocks) {
  63                        pr_err("invalid btree extent records (0 size)\n");
  64                        unlock_new_inode(tree->inode);
  65                        goto free_inode;
  66                }
  67
  68                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  69                break;
  70        default:
  71                BUG();
  72        }
  73        }
  74        unlock_new_inode(tree->inode);
  75
  76        mapping = tree->inode->i_mapping;
  77        page = read_mapping_page(mapping, 0, NULL);
  78        if (IS_ERR(page))
  79                goto free_inode;
  80
  81        /* Load the header */
  82        head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  83        tree->root = be32_to_cpu(head->root);
  84        tree->leaf_count = be32_to_cpu(head->leaf_count);
  85        tree->leaf_head = be32_to_cpu(head->leaf_head);
  86        tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  87        tree->node_count = be32_to_cpu(head->node_count);
  88        tree->free_nodes = be32_to_cpu(head->free_nodes);
  89        tree->attributes = be32_to_cpu(head->attributes);
  90        tree->node_size = be16_to_cpu(head->node_size);
  91        tree->max_key_len = be16_to_cpu(head->max_key_len);
  92        tree->depth = be16_to_cpu(head->depth);
  93
  94        size = tree->node_size;
  95        if (!is_power_of_2(size))
  96                goto fail_page;
  97        if (!tree->node_count)
  98                goto fail_page;
  99        switch (id) {
 100        case HFS_EXT_CNID:
 101                if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
 102                        pr_err("invalid extent max_key_len %d\n",
 103                               tree->max_key_len);
 104                        goto fail_page;
 105                }
 106                break;
 107        case HFS_CAT_CNID:
 108                if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
 109                        pr_err("invalid catalog max_key_len %d\n",
 110                               tree->max_key_len);
 111                        goto fail_page;
 112                }
 113                break;
 114        default:
 115                BUG();
 116        }
 117
 118        tree->node_size_shift = ffs(size) - 1;
 119        tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 120
 121        kunmap(page);
 122        page_cache_release(page);
 123        return tree;
 124
 125fail_page:
 126        page_cache_release(page);
 127free_inode:
 128        tree->inode->i_mapping->a_ops = &hfs_aops;
 129        iput(tree->inode);
 130free_tree:
 131        kfree(tree);
 132        return NULL;
 133}
 134
 135/* Release resources used by a btree */
 136void hfs_btree_close(struct hfs_btree *tree)
 137{
 138        struct hfs_bnode *node;
 139        int i;
 140
 141        if (!tree)
 142                return;
 143
 144        for (i = 0; i < NODE_HASH_SIZE; i++) {
 145                while ((node = tree->node_hash[i])) {
 146                        tree->node_hash[i] = node->next_hash;
 147                        if (atomic_read(&node->refcnt))
 148                                pr_err("node %d:%d still has %d user(s)!\n",
 149                                       node->tree->cnid, node->this,
 150                                       atomic_read(&node->refcnt));
 151                        hfs_bnode_free(node);
 152                        tree->node_hash_cnt--;
 153                }
 154        }
 155        iput(tree->inode);
 156        kfree(tree);
 157}
 158
 159void hfs_btree_write(struct hfs_btree *tree)
 160{
 161        struct hfs_btree_header_rec *head;
 162        struct hfs_bnode *node;
 163        struct page *page;
 164
 165        node = hfs_bnode_find(tree, 0);
 166        if (IS_ERR(node))
 167                /* panic? */
 168                return;
 169        /* Load the header */
 170        page = node->page[0];
 171        head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
 172
 173        head->root = cpu_to_be32(tree->root);
 174        head->leaf_count = cpu_to_be32(tree->leaf_count);
 175        head->leaf_head = cpu_to_be32(tree->leaf_head);
 176        head->leaf_tail = cpu_to_be32(tree->leaf_tail);
 177        head->node_count = cpu_to_be32(tree->node_count);
 178        head->free_nodes = cpu_to_be32(tree->free_nodes);
 179        head->attributes = cpu_to_be32(tree->attributes);
 180        head->depth = cpu_to_be16(tree->depth);
 181
 182        kunmap(page);
 183        set_page_dirty(page);
 184        hfs_bnode_put(node);
 185}
 186
 187static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
 188{
 189        struct hfs_btree *tree = prev->tree;
 190        struct hfs_bnode *node;
 191        struct hfs_bnode_desc desc;
 192        __be32 cnid;
 193
 194        node = hfs_bnode_create(tree, idx);
 195        if (IS_ERR(node))
 196                return node;
 197
 198        if (!tree->free_nodes)
 199                panic("FIXME!!!");
 200        tree->free_nodes--;
 201        prev->next = idx;
 202        cnid = cpu_to_be32(idx);
 203        hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
 204
 205        node->type = HFS_NODE_MAP;
 206        node->num_recs = 1;
 207        hfs_bnode_clear(node, 0, tree->node_size);
 208        desc.next = 0;
 209        desc.prev = 0;
 210        desc.type = HFS_NODE_MAP;
 211        desc.height = 0;
 212        desc.num_recs = cpu_to_be16(1);
 213        desc.reserved = 0;
 214        hfs_bnode_write(node, &desc, 0, sizeof(desc));
 215        hfs_bnode_write_u16(node, 14, 0x8000);
 216        hfs_bnode_write_u16(node, tree->node_size - 2, 14);
 217        hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
 218
 219        return node;
 220}
 221
 222struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 223{
 224        struct hfs_bnode *node, *next_node;
 225        struct page **pagep;
 226        u32 nidx, idx;
 227        unsigned off;
 228        u16 off16;
 229        u16 len;
 230        u8 *data, byte, m;
 231        int i;
 232
 233        while (!tree->free_nodes) {
 234                struct inode *inode = tree->inode;
 235                u32 count;
 236                int res;
 237
 238                res = hfs_extend_file(inode);
 239                if (res)
 240                        return ERR_PTR(res);
 241                HFS_I(inode)->phys_size = inode->i_size =
 242                                (loff_t)HFS_I(inode)->alloc_blocks *
 243                                HFS_SB(tree->sb)->alloc_blksz;
 244                HFS_I(inode)->fs_blocks = inode->i_size >>
 245                                          tree->sb->s_blocksize_bits;
 246                inode_set_bytes(inode, inode->i_size);
 247                count = inode->i_size >> tree->node_size_shift;
 248                tree->free_nodes = count - tree->node_count;
 249                tree->node_count = count;
 250        }
 251
 252        nidx = 0;
 253        node = hfs_bnode_find(tree, nidx);
 254        if (IS_ERR(node))
 255                return node;
 256        len = hfs_brec_lenoff(node, 2, &off16);
 257        off = off16;
 258
 259        off += node->page_offset;
 260        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
 261        data = kmap(*pagep);
 262        off &= ~PAGE_CACHE_MASK;
 263        idx = 0;
 264
 265        for (;;) {
 266                while (len) {
 267                        byte = data[off];
 268                        if (byte != 0xff) {
 269                                for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
 270                                        if (!(byte & m)) {
 271                                                idx += i;
 272                                                data[off] |= m;
 273                                                set_page_dirty(*pagep);
 274                                                kunmap(*pagep);
 275                                                tree->free_nodes--;
 276                                                mark_inode_dirty(tree->inode);
 277                                                hfs_bnode_put(node);
 278                                                return hfs_bnode_create(tree, idx);
 279                                        }
 280                                }
 281                        }
 282                        if (++off >= PAGE_CACHE_SIZE) {
 283                                kunmap(*pagep);
 284                                data = kmap(*++pagep);
 285                                off = 0;
 286                        }
 287                        idx += 8;
 288                        len--;
 289                }
 290                kunmap(*pagep);
 291                nidx = node->next;
 292                if (!nidx) {
 293                        printk(KERN_DEBUG "create new bmap node...\n");
 294                        next_node = hfs_bmap_new_bmap(node, idx);
 295                } else
 296                        next_node = hfs_bnode_find(tree, nidx);
 297                hfs_bnode_put(node);
 298                if (IS_ERR(next_node))
 299                        return next_node;
 300                node = next_node;
 301
 302                len = hfs_brec_lenoff(node, 0, &off16);
 303                off = off16;
 304                off += node->page_offset;
 305                pagep = node->page + (off >> PAGE_CACHE_SHIFT);
 306                data = kmap(*pagep);
 307                off &= ~PAGE_CACHE_MASK;
 308        }
 309}
 310
 311void hfs_bmap_free(struct hfs_bnode *node)
 312{
 313        struct hfs_btree *tree;
 314        struct page *page;
 315        u16 off, len;
 316        u32 nidx;
 317        u8 *data, byte, m;
 318
 319        hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
 320        tree = node->tree;
 321        nidx = node->this;
 322        node = hfs_bnode_find(tree, 0);
 323        if (IS_ERR(node))
 324                return;
 325        len = hfs_brec_lenoff(node, 2, &off);
 326        while (nidx >= len * 8) {
 327                u32 i;
 328
 329                nidx -= len * 8;
 330                i = node->next;
 331                hfs_bnode_put(node);
 332                if (!i) {
 333                        /* panic */;
 334                        pr_crit("unable to free bnode %u. bmap not found!\n",
 335                                node->this);
 336                        return;
 337                }
 338                node = hfs_bnode_find(tree, i);
 339                if (IS_ERR(node))
 340                        return;
 341                if (node->type != HFS_NODE_MAP) {
 342                        /* panic */;
 343                        pr_crit("invalid bmap found! (%u,%d)\n",
 344                                node->this, node->type);
 345                        hfs_bnode_put(node);
 346                        return;
 347                }
 348                len = hfs_brec_lenoff(node, 0, &off);
 349        }
 350        off += node->page_offset + nidx / 8;
 351        page = node->page[off >> PAGE_CACHE_SHIFT];
 352        data = kmap(page);
 353        off &= ~PAGE_CACHE_MASK;
 354        m = 1 << (~nidx & 7);
 355        byte = data[off];
 356        if (!(byte & m)) {
 357                pr_crit("trying to free free bnode %u(%d)\n",
 358                        node->this, node->type);
 359                kunmap(page);
 360                hfs_bnode_put(node);
 361                return;
 362        }
 363        data[off] = byte & ~m;
 364        set_page_dirty(page);
 365        kunmap(page);
 366        hfs_bnode_put(node);
 367        tree->free_nodes++;
 368        mark_inode_dirty(tree->inode);
 369}
 370