linux/fs/udf/balloc.c
<<
>>
Prefs
   1/*
   2 * balloc.c
   3 *
   4 * PURPOSE
   5 *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
   6 *
   7 * COPYRIGHT
   8 *      This file is distributed under the terms of the GNU General Public
   9 *      License (GPL). Copies of the GPL can be obtained from:
  10 *              ftp://prep.ai.mit.edu/pub/gnu/GPL
  11 *      Each contributing author retains all rights to their own work.
  12 *
  13 *  (C) 1999-2001 Ben Fennema
  14 *  (C) 1999 Stelias Computing Inc
  15 *
  16 * HISTORY
  17 *
  18 *  02/24/99 blf  Created.
  19 *
  20 */
  21
  22#include "udfdecl.h"
  23
  24#include <linux/buffer_head.h>
  25#include <linux/bitops.h>
  26
  27#include "udf_i.h"
  28#include "udf_sb.h"
  29
  30#define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
  31#define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
  32#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
  33#define udf_find_next_one_bit(addr, size, offset) \
  34                ext2_find_next_bit(addr, size, offset)
  35
  36static int read_block_bitmap(struct super_block *sb,
  37                             struct udf_bitmap *bitmap, unsigned int block,
  38                             unsigned long bitmap_nr)
  39{
  40        struct buffer_head *bh = NULL;
  41        int retval = 0;
  42        struct kernel_lb_addr loc;
  43
  44        loc.logicalBlockNum = bitmap->s_extPosition;
  45        loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
  46
  47        bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
  48        if (!bh)
  49                retval = -EIO;
  50
  51        bitmap->s_block_bitmap[bitmap_nr] = bh;
  52        return retval;
  53}
  54
  55static int __load_block_bitmap(struct super_block *sb,
  56                               struct udf_bitmap *bitmap,
  57                               unsigned int block_group)
  58{
  59        int retval = 0;
  60        int nr_groups = bitmap->s_nr_groups;
  61
  62        if (block_group >= nr_groups) {
  63                udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
  64                          nr_groups);
  65        }
  66
  67        if (bitmap->s_block_bitmap[block_group]) {
  68                return block_group;
  69        } else {
  70                retval = read_block_bitmap(sb, bitmap, block_group,
  71                                           block_group);
  72                if (retval < 0)
  73                        return retval;
  74                return block_group;
  75        }
  76}
  77
  78static inline int load_block_bitmap(struct super_block *sb,
  79                                    struct udf_bitmap *bitmap,
  80                                    unsigned int block_group)
  81{
  82        int slot;
  83
  84        slot = __load_block_bitmap(sb, bitmap, block_group);
  85
  86        if (slot < 0)
  87                return slot;
  88
  89        if (!bitmap->s_block_bitmap[slot])
  90                return -EIO;
  91
  92        return slot;
  93}
  94
  95static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
  96{
  97        struct udf_sb_info *sbi = UDF_SB(sb);
  98        struct logicalVolIntegrityDesc *lvid;
  99
 100        if (!sbi->s_lvid_bh)
 101                return;
 102
 103        lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
 104        le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
 105        udf_updated_lvid(sb);
 106}
 107
 108static void udf_bitmap_free_blocks(struct super_block *sb,
 109                                   struct inode *inode,
 110                                   struct udf_bitmap *bitmap,
 111                                   struct kernel_lb_addr *bloc,
 112                                   uint32_t offset,
 113                                   uint32_t count)
 114{
 115        struct udf_sb_info *sbi = UDF_SB(sb);
 116        struct buffer_head *bh = NULL;
 117        struct udf_part_map *partmap;
 118        unsigned long block;
 119        unsigned long block_group;
 120        unsigned long bit;
 121        unsigned long i;
 122        int bitmap_nr;
 123        unsigned long overflow;
 124
 125        mutex_lock(&sbi->s_alloc_mutex);
 126        partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
 127        if (bloc->logicalBlockNum + count < count ||
 128            (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
 129                udf_debug("%d < %d || %d + %d > %d\n",
 130                          bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
 131                          count, partmap->s_partition_len);
 132                goto error_return;
 133        }
 134
 135        block = bloc->logicalBlockNum + offset +
 136                (sizeof(struct spaceBitmapDesc) << 3);
 137
 138        do {
 139                overflow = 0;
 140                block_group = block >> (sb->s_blocksize_bits + 3);
 141                bit = block % (sb->s_blocksize << 3);
 142
 143                /*
 144                * Check to see if we are freeing blocks across a group boundary.
 145                */
 146                if (bit + count > (sb->s_blocksize << 3)) {
 147                        overflow = bit + count - (sb->s_blocksize << 3);
 148                        count -= overflow;
 149                }
 150                bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 151                if (bitmap_nr < 0)
 152                        goto error_return;
 153
 154                bh = bitmap->s_block_bitmap[bitmap_nr];
 155                for (i = 0; i < count; i++) {
 156                        if (udf_set_bit(bit + i, bh->b_data)) {
 157                                udf_debug("bit %ld already set\n", bit + i);
 158                                udf_debug("byte=%2x\n",
 159                                        ((char *)bh->b_data)[(bit + i) >> 3]);
 160                        }
 161                }
 162                udf_add_free_space(sb, sbi->s_partition, count);
 163                mark_buffer_dirty(bh);
 164                if (overflow) {
 165                        block += count;
 166                        count = overflow;
 167                }
 168        } while (overflow);
 169
 170error_return:
 171        mutex_unlock(&sbi->s_alloc_mutex);
 172}
 173
 174static int udf_bitmap_prealloc_blocks(struct super_block *sb,
 175                                      struct inode *inode,
 176                                      struct udf_bitmap *bitmap,
 177                                      uint16_t partition, uint32_t first_block,
 178                                      uint32_t block_count)
 179{
 180        struct udf_sb_info *sbi = UDF_SB(sb);
 181        int alloc_count = 0;
 182        int bit, block, block_group, group_start;
 183        int nr_groups, bitmap_nr;
 184        struct buffer_head *bh;
 185        __u32 part_len;
 186
 187        mutex_lock(&sbi->s_alloc_mutex);
 188        part_len = sbi->s_partmaps[partition].s_partition_len;
 189        if (first_block >= part_len)
 190                goto out;
 191
 192        if (first_block + block_count > part_len)
 193                block_count = part_len - first_block;
 194
 195        do {
 196                nr_groups = udf_compute_nr_groups(sb, partition);
 197                block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
 198                block_group = block >> (sb->s_blocksize_bits + 3);
 199                group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
 200
 201                bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 202                if (bitmap_nr < 0)
 203                        goto out;
 204                bh = bitmap->s_block_bitmap[bitmap_nr];
 205
 206                bit = block % (sb->s_blocksize << 3);
 207
 208                while (bit < (sb->s_blocksize << 3) && block_count > 0) {
 209                        if (!udf_clear_bit(bit, bh->b_data))
 210                                goto out;
 211                        block_count--;
 212                        alloc_count++;
 213                        bit++;
 214                        block++;
 215                }
 216                mark_buffer_dirty(bh);
 217        } while (block_count > 0);
 218
 219out:
 220        udf_add_free_space(sb, partition, -alloc_count);
 221        mutex_unlock(&sbi->s_alloc_mutex);
 222        return alloc_count;
 223}
 224
 225static int udf_bitmap_new_block(struct super_block *sb,
 226                                struct inode *inode,
 227                                struct udf_bitmap *bitmap, uint16_t partition,
 228                                uint32_t goal, int *err)
 229{
 230        struct udf_sb_info *sbi = UDF_SB(sb);
 231        int newbit, bit = 0, block, block_group, group_start;
 232        int end_goal, nr_groups, bitmap_nr, i;
 233        struct buffer_head *bh = NULL;
 234        char *ptr;
 235        int newblock = 0;
 236
 237        *err = -ENOSPC;
 238        mutex_lock(&sbi->s_alloc_mutex);
 239
 240repeat:
 241        if (goal >= sbi->s_partmaps[partition].s_partition_len)
 242                goal = 0;
 243
 244        nr_groups = bitmap->s_nr_groups;
 245        block = goal + (sizeof(struct spaceBitmapDesc) << 3);
 246        block_group = block >> (sb->s_blocksize_bits + 3);
 247        group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
 248
 249        bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 250        if (bitmap_nr < 0)
 251                goto error_return;
 252        bh = bitmap->s_block_bitmap[bitmap_nr];
 253        ptr = memscan((char *)bh->b_data + group_start, 0xFF,
 254                      sb->s_blocksize - group_start);
 255
 256        if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
 257                bit = block % (sb->s_blocksize << 3);
 258                if (udf_test_bit(bit, bh->b_data))
 259                        goto got_block;
 260
 261                end_goal = (bit + 63) & ~63;
 262                bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
 263                if (bit < end_goal)
 264                        goto got_block;
 265
 266                ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
 267                              sb->s_blocksize - ((bit + 7) >> 3));
 268                newbit = (ptr - ((char *)bh->b_data)) << 3;
 269                if (newbit < sb->s_blocksize << 3) {
 270                        bit = newbit;
 271                        goto search_back;
 272                }
 273
 274                newbit = udf_find_next_one_bit(bh->b_data,
 275                                               sb->s_blocksize << 3, bit);
 276                if (newbit < sb->s_blocksize << 3) {
 277                        bit = newbit;
 278                        goto got_block;
 279                }
 280        }
 281
 282        for (i = 0; i < (nr_groups * 2); i++) {
 283                block_group++;
 284                if (block_group >= nr_groups)
 285                        block_group = 0;
 286                group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
 287
 288                bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 289                if (bitmap_nr < 0)
 290                        goto error_return;
 291                bh = bitmap->s_block_bitmap[bitmap_nr];
 292                if (i < nr_groups) {
 293                        ptr = memscan((char *)bh->b_data + group_start, 0xFF,
 294                                      sb->s_blocksize - group_start);
 295                        if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
 296                                bit = (ptr - ((char *)bh->b_data)) << 3;
 297                                break;
 298                        }
 299                } else {
 300                        bit = udf_find_next_one_bit((char *)bh->b_data,
 301                                                    sb->s_blocksize << 3,
 302                                                    group_start << 3);
 303                        if (bit < sb->s_blocksize << 3)
 304                                break;
 305                }
 306        }
 307        if (i >= (nr_groups * 2)) {
 308                mutex_unlock(&sbi->s_alloc_mutex);
 309                return newblock;
 310        }
 311        if (bit < sb->s_blocksize << 3)
 312                goto search_back;
 313        else
 314                bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
 315                                            group_start << 3);
 316        if (bit >= sb->s_blocksize << 3) {
 317                mutex_unlock(&sbi->s_alloc_mutex);
 318                return 0;
 319        }
 320
 321search_back:
 322        i = 0;
 323        while (i < 7 && bit > (group_start << 3) &&
 324               udf_test_bit(bit - 1, bh->b_data)) {
 325                ++i;
 326                --bit;
 327        }
 328
 329got_block:
 330        newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
 331                (sizeof(struct spaceBitmapDesc) << 3);
 332
 333        if (!udf_clear_bit(bit, bh->b_data)) {
 334                udf_debug("bit already cleared for block %d\n", bit);
 335                goto repeat;
 336        }
 337
 338        mark_buffer_dirty(bh);
 339
 340        udf_add_free_space(sb, partition, -1);
 341        mutex_unlock(&sbi->s_alloc_mutex);
 342        *err = 0;
 343        return newblock;
 344
 345error_return:
 346        *err = -EIO;
 347        mutex_unlock(&sbi->s_alloc_mutex);
 348        return 0;
 349}
 350
 351static void udf_table_free_blocks(struct super_block *sb,
 352                                  struct inode *inode,
 353                                  struct inode *table,
 354                                  struct kernel_lb_addr *bloc,
 355                                  uint32_t offset,
 356                                  uint32_t count)
 357{
 358        struct udf_sb_info *sbi = UDF_SB(sb);
 359        struct udf_part_map *partmap;
 360        uint32_t start, end;
 361        uint32_t elen;
 362        struct kernel_lb_addr eloc;
 363        struct extent_position oepos, epos;
 364        int8_t etype;
 365        int i;
 366        struct udf_inode_info *iinfo;
 367
 368        mutex_lock(&sbi->s_alloc_mutex);
 369        partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
 370        if (bloc->logicalBlockNum + count < count ||
 371            (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
 372                udf_debug("%d < %d || %d + %d > %d\n",
 373                          bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
 374                          partmap->s_partition_len);
 375                goto error_return;
 376        }
 377
 378        iinfo = UDF_I(table);
 379        udf_add_free_space(sb, sbi->s_partition, count);
 380
 381        start = bloc->logicalBlockNum + offset;
 382        end = bloc->logicalBlockNum + offset + count - 1;
 383
 384        epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
 385        elen = 0;
 386        epos.block = oepos.block = iinfo->i_location;
 387        epos.bh = oepos.bh = NULL;
 388
 389        while (count &&
 390               (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
 391                if (((eloc.logicalBlockNum +
 392                        (elen >> sb->s_blocksize_bits)) == start)) {
 393                        if ((0x3FFFFFFF - elen) <
 394                                        (count << sb->s_blocksize_bits)) {
 395                                uint32_t tmp = ((0x3FFFFFFF - elen) >>
 396                                                        sb->s_blocksize_bits);
 397                                count -= tmp;
 398                                start += tmp;
 399                                elen = (etype << 30) |
 400                                        (0x40000000 - sb->s_blocksize);
 401                        } else {
 402                                elen = (etype << 30) |
 403                                        (elen +
 404                                        (count << sb->s_blocksize_bits));
 405                                start += count;
 406                                count = 0;
 407                        }
 408                        udf_write_aext(table, &oepos, &eloc, elen, 1);
 409                } else if (eloc.logicalBlockNum == (end + 1)) {
 410                        if ((0x3FFFFFFF - elen) <
 411                                        (count << sb->s_blocksize_bits)) {
 412                                uint32_t tmp = ((0x3FFFFFFF - elen) >>
 413                                                sb->s_blocksize_bits);
 414                                count -= tmp;
 415                                end -= tmp;
 416                                eloc.logicalBlockNum -= tmp;
 417                                elen = (etype << 30) |
 418                                        (0x40000000 - sb->s_blocksize);
 419                        } else {
 420                                eloc.logicalBlockNum = start;
 421                                elen = (etype << 30) |
 422                                        (elen +
 423                                        (count << sb->s_blocksize_bits));
 424                                end -= count;
 425                                count = 0;
 426                        }
 427                        udf_write_aext(table, &oepos, &eloc, elen, 1);
 428                }
 429
 430                if (epos.bh != oepos.bh) {
 431                        i = -1;
 432                        oepos.block = epos.block;
 433                        brelse(oepos.bh);
 434                        get_bh(epos.bh);
 435                        oepos.bh = epos.bh;
 436                        oepos.offset = 0;
 437                } else {
 438                        oepos.offset = epos.offset;
 439                }
 440        }
 441
 442        if (count) {
 443                /*
 444                 * NOTE: we CANNOT use udf_add_aext here, as it can try to
 445                 * allocate a new block, and since we hold the super block
 446                 * lock already very bad things would happen :)
 447                 *
 448                 * We copy the behavior of udf_add_aext, but instead of
 449                 * trying to allocate a new block close to the existing one,
 450                 * we just steal a block from the extent we are trying to add.
 451                 *
 452                 * It would be nice if the blocks were close together, but it
 453                 * isn't required.
 454                 */
 455
 456                int adsize;
 457                struct short_ad *sad = NULL;
 458                struct long_ad *lad = NULL;
 459                struct allocExtDesc *aed;
 460
 461                eloc.logicalBlockNum = start;
 462                elen = EXT_RECORDED_ALLOCATED |
 463                        (count << sb->s_blocksize_bits);
 464
 465                if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 466                        adsize = sizeof(struct short_ad);
 467                else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
 468                        adsize = sizeof(struct long_ad);
 469                else {
 470                        brelse(oepos.bh);
 471                        brelse(epos.bh);
 472                        goto error_return;
 473                }
 474
 475                if (epos.offset + (2 * adsize) > sb->s_blocksize) {
 476                        unsigned char *sptr, *dptr;
 477                        int loffset;
 478
 479                        brelse(oepos.bh);
 480                        oepos = epos;
 481
 482                        /* Steal a block from the extent being free'd */
 483                        epos.block.logicalBlockNum = eloc.logicalBlockNum;
 484                        eloc.logicalBlockNum++;
 485                        elen -= sb->s_blocksize;
 486
 487                        epos.bh = udf_tread(sb,
 488                                        udf_get_lb_pblock(sb, &epos.block, 0));
 489                        if (!epos.bh) {
 490                                brelse(oepos.bh);
 491                                goto error_return;
 492                        }
 493                        aed = (struct allocExtDesc *)(epos.bh->b_data);
 494                        aed->previousAllocExtLocation =
 495                                cpu_to_le32(oepos.block.logicalBlockNum);
 496                        if (epos.offset + adsize > sb->s_blocksize) {
 497                                loffset = epos.offset;
 498                                aed->lengthAllocDescs = cpu_to_le32(adsize);
 499                                sptr = iinfo->i_ext.i_data + epos.offset
 500                                                                - adsize;
 501                                dptr = epos.bh->b_data +
 502                                        sizeof(struct allocExtDesc);
 503                                memcpy(dptr, sptr, adsize);
 504                                epos.offset = sizeof(struct allocExtDesc) +
 505                                                adsize;
 506                        } else {
 507                                loffset = epos.offset + adsize;
 508                                aed->lengthAllocDescs = cpu_to_le32(0);
 509                                if (oepos.bh) {
 510                                        sptr = oepos.bh->b_data + epos.offset;
 511                                        aed = (struct allocExtDesc *)
 512                                                oepos.bh->b_data;
 513                                        le32_add_cpu(&aed->lengthAllocDescs,
 514                                                        adsize);
 515                                } else {
 516                                        sptr = iinfo->i_ext.i_data +
 517                                                                epos.offset;
 518                                        iinfo->i_lenAlloc += adsize;
 519                                        mark_inode_dirty(table);
 520                                }
 521                                epos.offset = sizeof(struct allocExtDesc);
 522                        }
 523                        if (sbi->s_udfrev >= 0x0200)
 524                                udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
 525                                            3, 1, epos.block.logicalBlockNum,
 526                                            sizeof(struct tag));
 527                        else
 528                                udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
 529                                            2, 1, epos.block.logicalBlockNum,
 530                                            sizeof(struct tag));
 531
 532                        switch (iinfo->i_alloc_type) {
 533                        case ICBTAG_FLAG_AD_SHORT:
 534                                sad = (struct short_ad *)sptr;
 535                                sad->extLength = cpu_to_le32(
 536                                        EXT_NEXT_EXTENT_ALLOCDECS |
 537                                        sb->s_blocksize);
 538                                sad->extPosition =
 539                                        cpu_to_le32(epos.block.logicalBlockNum);
 540                                break;
 541                        case ICBTAG_FLAG_AD_LONG:
 542                                lad = (struct long_ad *)sptr;
 543                                lad->extLength = cpu_to_le32(
 544                                        EXT_NEXT_EXTENT_ALLOCDECS |
 545                                        sb->s_blocksize);
 546                                lad->extLocation =
 547                                        cpu_to_lelb(epos.block);
 548                                break;
 549                        }
 550                        if (oepos.bh) {
 551                                udf_update_tag(oepos.bh->b_data, loffset);
 552                                mark_buffer_dirty(oepos.bh);
 553                        } else {
 554                                mark_inode_dirty(table);
 555                        }
 556                }
 557
 558                /* It's possible that stealing the block emptied the extent */
 559                if (elen) {
 560                        udf_write_aext(table, &epos, &eloc, elen, 1);
 561
 562                        if (!epos.bh) {
 563                                iinfo->i_lenAlloc += adsize;
 564                                mark_inode_dirty(table);
 565                        } else {
 566                                aed = (struct allocExtDesc *)epos.bh->b_data;
 567                                le32_add_cpu(&aed->lengthAllocDescs, adsize);
 568                                udf_update_tag(epos.bh->b_data, epos.offset);
 569                                mark_buffer_dirty(epos.bh);
 570                        }
 571                }
 572        }
 573
 574        brelse(epos.bh);
 575        brelse(oepos.bh);
 576
 577error_return:
 578        mutex_unlock(&sbi->s_alloc_mutex);
 579        return;
 580}
 581
 582static int udf_table_prealloc_blocks(struct super_block *sb,
 583                                     struct inode *inode,
 584                                     struct inode *table, uint16_t partition,
 585                                     uint32_t first_block, uint32_t block_count)
 586{
 587        struct udf_sb_info *sbi = UDF_SB(sb);
 588        int alloc_count = 0;
 589        uint32_t elen, adsize;
 590        struct kernel_lb_addr eloc;
 591        struct extent_position epos;
 592        int8_t etype = -1;
 593        struct udf_inode_info *iinfo;
 594
 595        if (first_block >= sbi->s_partmaps[partition].s_partition_len)
 596                return 0;
 597
 598        iinfo = UDF_I(table);
 599        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 600                adsize = sizeof(struct short_ad);
 601        else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
 602                adsize = sizeof(struct long_ad);
 603        else
 604                return 0;
 605
 606        mutex_lock(&sbi->s_alloc_mutex);
 607        epos.offset = sizeof(struct unallocSpaceEntry);
 608        epos.block = iinfo->i_location;
 609        epos.bh = NULL;
 610        eloc.logicalBlockNum = 0xFFFFFFFF;
 611
 612        while (first_block != eloc.logicalBlockNum &&
 613               (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
 614                udf_debug("eloc=%d, elen=%d, first_block=%d\n",
 615                          eloc.logicalBlockNum, elen, first_block);
 616                ; /* empty loop body */
 617        }
 618
 619        if (first_block == eloc.logicalBlockNum) {
 620                epos.offset -= adsize;
 621
 622                alloc_count = (elen >> sb->s_blocksize_bits);
 623                if (alloc_count > block_count) {
 624                        alloc_count = block_count;
 625                        eloc.logicalBlockNum += alloc_count;
 626                        elen -= (alloc_count << sb->s_blocksize_bits);
 627                        udf_write_aext(table, &epos, &eloc,
 628                                        (etype << 30) | elen, 1);
 629                } else
 630                        udf_delete_aext(table, epos, eloc,
 631                                        (etype << 30) | elen);
 632        } else {
 633                alloc_count = 0;
 634        }
 635
 636        brelse(epos.bh);
 637
 638        if (alloc_count)
 639                udf_add_free_space(sb, partition, -alloc_count);
 640        mutex_unlock(&sbi->s_alloc_mutex);
 641        return alloc_count;
 642}
 643
 644static int udf_table_new_block(struct super_block *sb,
 645                               struct inode *inode,
 646                               struct inode *table, uint16_t partition,
 647                               uint32_t goal, int *err)
 648{
 649        struct udf_sb_info *sbi = UDF_SB(sb);
 650        uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
 651        uint32_t newblock = 0, adsize;
 652        uint32_t elen, goal_elen = 0;
 653        struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
 654        struct extent_position epos, goal_epos;
 655        int8_t etype;
 656        struct udf_inode_info *iinfo = UDF_I(table);
 657
 658        *err = -ENOSPC;
 659
 660        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 661                adsize = sizeof(struct short_ad);
 662        else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
 663                adsize = sizeof(struct long_ad);
 664        else
 665                return newblock;
 666
 667        mutex_lock(&sbi->s_alloc_mutex);
 668        if (goal >= sbi->s_partmaps[partition].s_partition_len)
 669                goal = 0;
 670
 671        /* We search for the closest matching block to goal. If we find
 672           a exact hit, we stop. Otherwise we keep going till we run out
 673           of extents. We store the buffer_head, bloc, and extoffset
 674           of the current closest match and use that when we are done.
 675         */
 676        epos.offset = sizeof(struct unallocSpaceEntry);
 677        epos.block = iinfo->i_location;
 678        epos.bh = goal_epos.bh = NULL;
 679
 680        while (spread &&
 681               (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
 682                if (goal >= eloc.logicalBlockNum) {
 683                        if (goal < eloc.logicalBlockNum +
 684                                        (elen >> sb->s_blocksize_bits))
 685                                nspread = 0;
 686                        else
 687                                nspread = goal - eloc.logicalBlockNum -
 688                                        (elen >> sb->s_blocksize_bits);
 689                } else {
 690                        nspread = eloc.logicalBlockNum - goal;
 691                }
 692
 693                if (nspread < spread) {
 694                        spread = nspread;
 695                        if (goal_epos.bh != epos.bh) {
 696                                brelse(goal_epos.bh);
 697                                goal_epos.bh = epos.bh;
 698                                get_bh(goal_epos.bh);
 699                        }
 700                        goal_epos.block = epos.block;
 701                        goal_epos.offset = epos.offset - adsize;
 702                        goal_eloc = eloc;
 703                        goal_elen = (etype << 30) | elen;
 704                }
 705        }
 706
 707        brelse(epos.bh);
 708
 709        if (spread == 0xFFFFFFFF) {
 710                brelse(goal_epos.bh);
 711                mutex_unlock(&sbi->s_alloc_mutex);
 712                return 0;
 713        }
 714
 715        /* Only allocate blocks from the beginning of the extent.
 716           That way, we only delete (empty) extents, never have to insert an
 717           extent because of splitting */
 718        /* This works, but very poorly.... */
 719
 720        newblock = goal_eloc.logicalBlockNum;
 721        goal_eloc.logicalBlockNum++;
 722        goal_elen -= sb->s_blocksize;
 723
 724        if (goal_elen)
 725                udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
 726        else
 727                udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
 728        brelse(goal_epos.bh);
 729
 730        udf_add_free_space(sb, partition, -1);
 731
 732        mutex_unlock(&sbi->s_alloc_mutex);
 733        *err = 0;
 734        return newblock;
 735}
 736
 737void udf_free_blocks(struct super_block *sb, struct inode *inode,
 738                     struct kernel_lb_addr *bloc, uint32_t offset,
 739                     uint32_t count)
 740{
 741        uint16_t partition = bloc->partitionReferenceNum;
 742        struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
 743
 744        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
 745                udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap,
 746                                       bloc, offset, count);
 747        } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
 748                udf_table_free_blocks(sb, inode, map->s_uspace.s_table,
 749                                      bloc, offset, count);
 750        } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
 751                udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap,
 752                                       bloc, offset, count);
 753        } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
 754                udf_table_free_blocks(sb, inode, map->s_fspace.s_table,
 755                                      bloc, offset, count);
 756        }
 757}
 758
 759inline int udf_prealloc_blocks(struct super_block *sb,
 760                               struct inode *inode,
 761                               uint16_t partition, uint32_t first_block,
 762                               uint32_t block_count)
 763{
 764        struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
 765
 766        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
 767                return udf_bitmap_prealloc_blocks(sb, inode,
 768                                                  map->s_uspace.s_bitmap,
 769                                                  partition, first_block,
 770                                                  block_count);
 771        else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
 772                return udf_table_prealloc_blocks(sb, inode,
 773                                                 map->s_uspace.s_table,
 774                                                 partition, first_block,
 775                                                 block_count);
 776        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
 777                return udf_bitmap_prealloc_blocks(sb, inode,
 778                                                  map->s_fspace.s_bitmap,
 779                                                  partition, first_block,
 780                                                  block_count);
 781        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
 782                return udf_table_prealloc_blocks(sb, inode,
 783                                                 map->s_fspace.s_table,
 784                                                 partition, first_block,
 785                                                 block_count);
 786        else
 787                return 0;
 788}
 789
 790inline int udf_new_block(struct super_block *sb,
 791                         struct inode *inode,
 792                         uint16_t partition, uint32_t goal, int *err)
 793{
 794        struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
 795
 796        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
 797                return udf_bitmap_new_block(sb, inode,
 798                                           map->s_uspace.s_bitmap,
 799                                           partition, goal, err);
 800        else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
 801                return udf_table_new_block(sb, inode,
 802                                           map->s_uspace.s_table,
 803                                           partition, goal, err);
 804        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
 805                return udf_bitmap_new_block(sb, inode,
 806                                            map->s_fspace.s_bitmap,
 807                                            partition, goal, err);
 808        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
 809                return udf_table_new_block(sb, inode,
 810                                           map->s_fspace.s_table,
 811                                           partition, goal, err);
 812        else {
 813                *err = -EIO;
 814                return 0;
 815        }
 816}
 817