linux/fs/udf/balloc.c
<<
>>
Prefs
   1/*
   2 * balloc.c
   3 *
   4 * PURPOSE
   5 *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
   6 *
   7 * COPYRIGHT
   8 *      This file is distributed under the terms of the GNU General Public
   9 *      License (GPL). Copies of the GPL can be obtained from:
  10 *              ftp://prep.ai.mit.edu/pub/gnu/GPL
  11 *      Each contributing author retains all rights to their own work.
  12 *
  13 *  (C) 1999-2001 Ben Fennema
  14 *  (C) 1999 Stelias Computing Inc
  15 *
  16 * HISTORY
  17 *
  18 *  02/24/99 blf  Created.
  19 *
  20 */
  21
  22#include "udfdecl.h"
  23
  24#include <linux/buffer_head.h>
  25#include <linux/bitops.h>
  26
  27#include "udf_i.h"
  28#include "udf_sb.h"
  29
  30#define udf_clear_bit   __test_and_clear_bit_le
  31#define udf_set_bit     __test_and_set_bit_le
  32#define udf_test_bit    test_bit_le
  33#define udf_find_next_one_bit   find_next_bit_le
  34
  35static int read_block_bitmap(struct super_block *sb,
  36                             struct udf_bitmap *bitmap, unsigned int block,
  37                             unsigned long bitmap_nr)
  38{
  39        struct buffer_head *bh = NULL;
  40        int retval = 0;
  41        struct kernel_lb_addr loc;
  42
  43        loc.logicalBlockNum = bitmap->s_extPosition;
  44        loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
  45
  46        bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
  47        if (!bh)
  48                retval = -EIO;
  49
  50        bitmap->s_block_bitmap[bitmap_nr] = bh;
  51        return retval;
  52}
  53
  54static int __load_block_bitmap(struct super_block *sb,
  55                               struct udf_bitmap *bitmap,
  56                               unsigned int block_group)
  57{
  58        int retval = 0;
  59        int nr_groups = bitmap->s_nr_groups;
  60
  61        if (block_group >= nr_groups) {
  62                udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
  63                          nr_groups);
  64        }
  65
  66        if (bitmap->s_block_bitmap[block_group]) {
  67                return block_group;
  68        } else {
  69                retval = read_block_bitmap(sb, bitmap, block_group,
  70                                           block_group);
  71                if (retval < 0)
  72                        return retval;
  73                return block_group;
  74        }
  75}
  76
  77static inline int load_block_bitmap(struct super_block *sb,
  78                                    struct udf_bitmap *bitmap,
  79                                    unsigned int block_group)
  80{
  81        int slot;
  82
  83        slot = __load_block_bitmap(sb, bitmap, block_group);
  84
  85        if (slot < 0)
  86                return slot;
  87
  88        if (!bitmap->s_block_bitmap[slot])
  89                return -EIO;
  90
  91        return slot;
  92}
  93
  94static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
  95{
  96        struct udf_sb_info *sbi = UDF_SB(sb);
  97        struct logicalVolIntegrityDesc *lvid;
  98
  99        if (!sbi->s_lvid_bh)
 100                return;
 101
 102        lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
 103        le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
 104        udf_updated_lvid(sb);
 105}
 106
 107static void udf_bitmap_free_blocks(struct super_block *sb,
 108                                   struct inode *inode,
 109                                   struct udf_bitmap *bitmap,
 110                                   struct kernel_lb_addr *bloc,
 111                                   uint32_t offset,
 112                                   uint32_t count)
 113{
 114        struct udf_sb_info *sbi = UDF_SB(sb);
 115        struct buffer_head *bh = NULL;
 116        struct udf_part_map *partmap;
 117        unsigned long block;
 118        unsigned long block_group;
 119        unsigned long bit;
 120        unsigned long i;
 121        int bitmap_nr;
 122        unsigned long overflow;
 123
 124        mutex_lock(&sbi->s_alloc_mutex);
 125        partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
 126        if (bloc->logicalBlockNum + count < count ||
 127            (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
 128                udf_debug("%d < %d || %d + %d > %d\n",
 129                          bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
 130                          count, partmap->s_partition_len);
 131                goto error_return;
 132        }
 133
 134        block = bloc->logicalBlockNum + offset +
 135                (sizeof(struct spaceBitmapDesc) << 3);
 136
 137        do {
 138                overflow = 0;
 139                block_group = block >> (sb->s_blocksize_bits + 3);
 140                bit = block % (sb->s_blocksize << 3);
 141
 142                /*
 143                * Check to see if we are freeing blocks across a group boundary.
 144                */
 145                if (bit + count > (sb->s_blocksize << 3)) {
 146                        overflow = bit + count - (sb->s_blocksize << 3);
 147                        count -= overflow;
 148                }
 149                bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 150                if (bitmap_nr < 0)
 151                        goto error_return;
 152
 153                bh = bitmap->s_block_bitmap[bitmap_nr];
 154                for (i = 0; i < count; i++) {
 155                        if (udf_set_bit(bit + i, bh->b_data)) {
 156                                udf_debug("bit %ld already set\n", bit + i);
 157                                udf_debug("byte=%2x\n",
 158                                        ((char *)bh->b_data)[(bit + i) >> 3]);
 159                        }
 160                }
 161                udf_add_free_space(sb, sbi->s_partition, count);
 162                mark_buffer_dirty(bh);
 163                if (overflow) {
 164                        block += count;
 165                        count = overflow;
 166                }
 167        } while (overflow);
 168
 169error_return:
 170        mutex_unlock(&sbi->s_alloc_mutex);
 171}
 172
 173static int udf_bitmap_prealloc_blocks(struct super_block *sb,
 174                                      struct inode *inode,
 175                                      struct udf_bitmap *bitmap,
 176                                      uint16_t partition, uint32_t first_block,
 177                                      uint32_t block_count)
 178{
 179        struct udf_sb_info *sbi = UDF_SB(sb);
 180        int alloc_count = 0;
 181        int bit, block, block_group, group_start;
 182        int nr_groups, bitmap_nr;
 183        struct buffer_head *bh;
 184        __u32 part_len;
 185
 186        mutex_lock(&sbi->s_alloc_mutex);
 187        part_len = sbi->s_partmaps[partition].s_partition_len;
 188        if (first_block >= part_len)
 189                goto out;
 190
 191        if (first_block + block_count > part_len)
 192                block_count = part_len - first_block;
 193
 194        do {
 195                nr_groups = udf_compute_nr_groups(sb, partition);
 196                block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
 197                block_group = block >> (sb->s_blocksize_bits + 3);
 198                group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
 199
 200                bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 201                if (bitmap_nr < 0)
 202                        goto out;
 203                bh = bitmap->s_block_bitmap[bitmap_nr];
 204
 205                bit = block % (sb->s_blocksize << 3);
 206
 207                while (bit < (sb->s_blocksize << 3) && block_count > 0) {
 208                        if (!udf_clear_bit(bit, bh->b_data))
 209                                goto out;
 210                        block_count--;
 211                        alloc_count++;
 212                        bit++;
 213                        block++;
 214                }
 215                mark_buffer_dirty(bh);
 216        } while (block_count > 0);
 217
 218out:
 219        udf_add_free_space(sb, partition, -alloc_count);
 220        mutex_unlock(&sbi->s_alloc_mutex);
 221        return alloc_count;
 222}
 223
 224static int udf_bitmap_new_block(struct super_block *sb,
 225                                struct inode *inode,
 226                                struct udf_bitmap *bitmap, uint16_t partition,
 227                                uint32_t goal, int *err)
 228{
 229        struct udf_sb_info *sbi = UDF_SB(sb);
 230        int newbit, bit = 0, block, block_group, group_start;
 231        int end_goal, nr_groups, bitmap_nr, i;
 232        struct buffer_head *bh = NULL;
 233        char *ptr;
 234        int newblock = 0;
 235
 236        *err = -ENOSPC;
 237        mutex_lock(&sbi->s_alloc_mutex);
 238
 239repeat:
 240        if (goal >= sbi->s_partmaps[partition].s_partition_len)
 241                goal = 0;
 242
 243        nr_groups = bitmap->s_nr_groups;
 244        block = goal + (sizeof(struct spaceBitmapDesc) << 3);
 245        block_group = block >> (sb->s_blocksize_bits + 3);
 246        group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
 247
 248        bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 249        if (bitmap_nr < 0)
 250                goto error_return;
 251        bh = bitmap->s_block_bitmap[bitmap_nr];
 252        ptr = memscan((char *)bh->b_data + group_start, 0xFF,
 253                      sb->s_blocksize - group_start);
 254
 255        if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
 256                bit = block % (sb->s_blocksize << 3);
 257                if (udf_test_bit(bit, bh->b_data))
 258                        goto got_block;
 259
 260                end_goal = (bit + 63) & ~63;
 261                bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
 262                if (bit < end_goal)
 263                        goto got_block;
 264
 265                ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
 266                              sb->s_blocksize - ((bit + 7) >> 3));
 267                newbit = (ptr - ((char *)bh->b_data)) << 3;
 268                if (newbit < sb->s_blocksize << 3) {
 269                        bit = newbit;
 270                        goto search_back;
 271                }
 272
 273                newbit = udf_find_next_one_bit(bh->b_data,
 274                                               sb->s_blocksize << 3, bit);
 275                if (newbit < sb->s_blocksize << 3) {
 276                        bit = newbit;
 277                        goto got_block;
 278                }
 279        }
 280
 281        for (i = 0; i < (nr_groups * 2); i++) {
 282                block_group++;
 283                if (block_group >= nr_groups)
 284                        block_group = 0;
 285                group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
 286
 287                bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
 288                if (bitmap_nr < 0)
 289                        goto error_return;
 290                bh = bitmap->s_block_bitmap[bitmap_nr];
 291                if (i < nr_groups) {
 292                        ptr = memscan((char *)bh->b_data + group_start, 0xFF,
 293                                      sb->s_blocksize - group_start);
 294                        if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
 295                                bit = (ptr - ((char *)bh->b_data)) << 3;
 296                                break;
 297                        }
 298                } else {
 299                        bit = udf_find_next_one_bit(bh->b_data,
 300                                                    sb->s_blocksize << 3,
 301                                                    group_start << 3);
 302                        if (bit < sb->s_blocksize << 3)
 303                                break;
 304                }
 305        }
 306        if (i >= (nr_groups * 2)) {
 307                mutex_unlock(&sbi->s_alloc_mutex);
 308                return newblock;
 309        }
 310        if (bit < sb->s_blocksize << 3)
 311                goto search_back;
 312        else
 313                bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
 314                                            group_start << 3);
 315        if (bit >= sb->s_blocksize << 3) {
 316                mutex_unlock(&sbi->s_alloc_mutex);
 317                return 0;
 318        }
 319
 320search_back:
 321        i = 0;
 322        while (i < 7 && bit > (group_start << 3) &&
 323               udf_test_bit(bit - 1, bh->b_data)) {
 324                ++i;
 325                --bit;
 326        }
 327
 328got_block:
 329        newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
 330                (sizeof(struct spaceBitmapDesc) << 3);
 331
 332        if (!udf_clear_bit(bit, bh->b_data)) {
 333                udf_debug("bit already cleared for block %d\n", bit);
 334                goto repeat;
 335        }
 336
 337        mark_buffer_dirty(bh);
 338
 339        udf_add_free_space(sb, partition, -1);
 340        mutex_unlock(&sbi->s_alloc_mutex);
 341        *err = 0;
 342        return newblock;
 343
 344error_return:
 345        *err = -EIO;
 346        mutex_unlock(&sbi->s_alloc_mutex);
 347        return 0;
 348}
 349
 350static void udf_table_free_blocks(struct super_block *sb,
 351                                  struct inode *inode,
 352                                  struct inode *table,
 353                                  struct kernel_lb_addr *bloc,
 354                                  uint32_t offset,
 355                                  uint32_t count)
 356{
 357        struct udf_sb_info *sbi = UDF_SB(sb);
 358        struct udf_part_map *partmap;
 359        uint32_t start, end;
 360        uint32_t elen;
 361        struct kernel_lb_addr eloc;
 362        struct extent_position oepos, epos;
 363        int8_t etype;
 364        int i;
 365        struct udf_inode_info *iinfo;
 366
 367        mutex_lock(&sbi->s_alloc_mutex);
 368        partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
 369        if (bloc->logicalBlockNum + count < count ||
 370            (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
 371                udf_debug("%d < %d || %d + %d > %d\n",
 372                          bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
 373                          partmap->s_partition_len);
 374                goto error_return;
 375        }
 376
 377        iinfo = UDF_I(table);
 378        udf_add_free_space(sb, sbi->s_partition, count);
 379
 380        start = bloc->logicalBlockNum + offset;
 381        end = bloc->logicalBlockNum + offset + count - 1;
 382
 383        epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
 384        elen = 0;
 385        epos.block = oepos.block = iinfo->i_location;
 386        epos.bh = oepos.bh = NULL;
 387
 388        while (count &&
 389               (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
 390                if (((eloc.logicalBlockNum +
 391                        (elen >> sb->s_blocksize_bits)) == start)) {
 392                        if ((0x3FFFFFFF - elen) <
 393                                        (count << sb->s_blocksize_bits)) {
 394                                uint32_t tmp = ((0x3FFFFFFF - elen) >>
 395                                                        sb->s_blocksize_bits);
 396                                count -= tmp;
 397                                start += tmp;
 398                                elen = (etype << 30) |
 399                                        (0x40000000 - sb->s_blocksize);
 400                        } else {
 401                                elen = (etype << 30) |
 402                                        (elen +
 403                                        (count << sb->s_blocksize_bits));
 404                                start += count;
 405                                count = 0;
 406                        }
 407                        udf_write_aext(table, &oepos, &eloc, elen, 1);
 408                } else if (eloc.logicalBlockNum == (end + 1)) {
 409                        if ((0x3FFFFFFF - elen) <
 410                                        (count << sb->s_blocksize_bits)) {
 411                                uint32_t tmp = ((0x3FFFFFFF - elen) >>
 412                                                sb->s_blocksize_bits);
 413                                count -= tmp;
 414                                end -= tmp;
 415                                eloc.logicalBlockNum -= tmp;
 416                                elen = (etype << 30) |
 417                                        (0x40000000 - sb->s_blocksize);
 418                        } else {
 419                                eloc.logicalBlockNum = start;
 420                                elen = (etype << 30) |
 421                                        (elen +
 422                                        (count << sb->s_blocksize_bits));
 423                                end -= count;
 424                                count = 0;
 425                        }
 426                        udf_write_aext(table, &oepos, &eloc, elen, 1);
 427                }
 428
 429                if (epos.bh != oepos.bh) {
 430                        i = -1;
 431                        oepos.block = epos.block;
 432                        brelse(oepos.bh);
 433                        get_bh(epos.bh);
 434                        oepos.bh = epos.bh;
 435                        oepos.offset = 0;
 436                } else {
 437                        oepos.offset = epos.offset;
 438                }
 439        }
 440
 441        if (count) {
 442                /*
 443                 * NOTE: we CANNOT use udf_add_aext here, as it can try to
 444                 * allocate a new block, and since we hold the super block
 445                 * lock already very bad things would happen :)
 446                 *
 447                 * We copy the behavior of udf_add_aext, but instead of
 448                 * trying to allocate a new block close to the existing one,
 449                 * we just steal a block from the extent we are trying to add.
 450                 *
 451                 * It would be nice if the blocks were close together, but it
 452                 * isn't required.
 453                 */
 454
 455                int adsize;
 456                struct short_ad *sad = NULL;
 457                struct long_ad *lad = NULL;
 458                struct allocExtDesc *aed;
 459
 460                eloc.logicalBlockNum = start;
 461                elen = EXT_RECORDED_ALLOCATED |
 462                        (count << sb->s_blocksize_bits);
 463
 464                if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 465                        adsize = sizeof(struct short_ad);
 466                else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
 467                        adsize = sizeof(struct long_ad);
 468                else {
 469                        brelse(oepos.bh);
 470                        brelse(epos.bh);
 471                        goto error_return;
 472                }
 473
 474                if (epos.offset + (2 * adsize) > sb->s_blocksize) {
 475                        unsigned char *sptr, *dptr;
 476                        int loffset;
 477
 478                        brelse(oepos.bh);
 479                        oepos = epos;
 480
 481                        /* Steal a block from the extent being free'd */
 482                        epos.block.logicalBlockNum = eloc.logicalBlockNum;
 483                        eloc.logicalBlockNum++;
 484                        elen -= sb->s_blocksize;
 485
 486                        epos.bh = udf_tread(sb,
 487                                        udf_get_lb_pblock(sb, &epos.block, 0));
 488                        if (!epos.bh) {
 489                                brelse(oepos.bh);
 490                                goto error_return;
 491                        }
 492                        aed = (struct allocExtDesc *)(epos.bh->b_data);
 493                        aed->previousAllocExtLocation =
 494                                cpu_to_le32(oepos.block.logicalBlockNum);
 495                        if (epos.offset + adsize > sb->s_blocksize) {
 496                                loffset = epos.offset;
 497                                aed->lengthAllocDescs = cpu_to_le32(adsize);
 498                                sptr = iinfo->i_ext.i_data + epos.offset
 499                                                                - adsize;
 500                                dptr = epos.bh->b_data +
 501                                        sizeof(struct allocExtDesc);
 502                                memcpy(dptr, sptr, adsize);
 503                                epos.offset = sizeof(struct allocExtDesc) +
 504                                                adsize;
 505                        } else {
 506                                loffset = epos.offset + adsize;
 507                                aed->lengthAllocDescs = cpu_to_le32(0);
 508                                if (oepos.bh) {
 509                                        sptr = oepos.bh->b_data + epos.offset;
 510                                        aed = (struct allocExtDesc *)
 511                                                oepos.bh->b_data;
 512                                        le32_add_cpu(&aed->lengthAllocDescs,
 513                                                        adsize);
 514                                } else {
 515                                        sptr = iinfo->i_ext.i_data +
 516                                                                epos.offset;
 517                                        iinfo->i_lenAlloc += adsize;
 518                                        mark_inode_dirty(table);
 519                                }
 520                                epos.offset = sizeof(struct allocExtDesc);
 521                        }
 522                        if (sbi->s_udfrev >= 0x0200)
 523                                udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
 524                                            3, 1, epos.block.logicalBlockNum,
 525                                            sizeof(struct tag));
 526                        else
 527                                udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
 528                                            2, 1, epos.block.logicalBlockNum,
 529                                            sizeof(struct tag));
 530
 531                        switch (iinfo->i_alloc_type) {
 532                        case ICBTAG_FLAG_AD_SHORT:
 533                                sad = (struct short_ad *)sptr;
 534                                sad->extLength = cpu_to_le32(
 535                                        EXT_NEXT_EXTENT_ALLOCDECS |
 536                                        sb->s_blocksize);
 537                                sad->extPosition =
 538                                        cpu_to_le32(epos.block.logicalBlockNum);
 539                                break;
 540                        case ICBTAG_FLAG_AD_LONG:
 541                                lad = (struct long_ad *)sptr;
 542                                lad->extLength = cpu_to_le32(
 543                                        EXT_NEXT_EXTENT_ALLOCDECS |
 544                                        sb->s_blocksize);
 545                                lad->extLocation =
 546                                        cpu_to_lelb(epos.block);
 547                                break;
 548                        }
 549                        if (oepos.bh) {
 550                                udf_update_tag(oepos.bh->b_data, loffset);
 551                                mark_buffer_dirty(oepos.bh);
 552                        } else {
 553                                mark_inode_dirty(table);
 554                        }
 555                }
 556
 557                /* It's possible that stealing the block emptied the extent */
 558                if (elen) {
 559                        udf_write_aext(table, &epos, &eloc, elen, 1);
 560
 561                        if (!epos.bh) {
 562                                iinfo->i_lenAlloc += adsize;
 563                                mark_inode_dirty(table);
 564                        } else {
 565                                aed = (struct allocExtDesc *)epos.bh->b_data;
 566                                le32_add_cpu(&aed->lengthAllocDescs, adsize);
 567                                udf_update_tag(epos.bh->b_data, epos.offset);
 568                                mark_buffer_dirty(epos.bh);
 569                        }
 570                }
 571        }
 572
 573        brelse(epos.bh);
 574        brelse(oepos.bh);
 575
 576error_return:
 577        mutex_unlock(&sbi->s_alloc_mutex);
 578        return;
 579}
 580
 581static int udf_table_prealloc_blocks(struct super_block *sb,
 582                                     struct inode *inode,
 583                                     struct inode *table, uint16_t partition,
 584                                     uint32_t first_block, uint32_t block_count)
 585{
 586        struct udf_sb_info *sbi = UDF_SB(sb);
 587        int alloc_count = 0;
 588        uint32_t elen, adsize;
 589        struct kernel_lb_addr eloc;
 590        struct extent_position epos;
 591        int8_t etype = -1;
 592        struct udf_inode_info *iinfo;
 593
 594        if (first_block >= sbi->s_partmaps[partition].s_partition_len)
 595                return 0;
 596
 597        iinfo = UDF_I(table);
 598        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 599                adsize = sizeof(struct short_ad);
 600        else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
 601                adsize = sizeof(struct long_ad);
 602        else
 603                return 0;
 604
 605        mutex_lock(&sbi->s_alloc_mutex);
 606        epos.offset = sizeof(struct unallocSpaceEntry);
 607        epos.block = iinfo->i_location;
 608        epos.bh = NULL;
 609        eloc.logicalBlockNum = 0xFFFFFFFF;
 610
 611        while (first_block != eloc.logicalBlockNum &&
 612               (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
 613                udf_debug("eloc=%d, elen=%d, first_block=%d\n",
 614                          eloc.logicalBlockNum, elen, first_block);
 615                ; /* empty loop body */
 616        }
 617
 618        if (first_block == eloc.logicalBlockNum) {
 619                epos.offset -= adsize;
 620
 621                alloc_count = (elen >> sb->s_blocksize_bits);
 622                if (alloc_count > block_count) {
 623                        alloc_count = block_count;
 624                        eloc.logicalBlockNum += alloc_count;
 625                        elen -= (alloc_count << sb->s_blocksize_bits);
 626                        udf_write_aext(table, &epos, &eloc,
 627                                        (etype << 30) | elen, 1);
 628                } else
 629                        udf_delete_aext(table, epos, eloc,
 630                                        (etype << 30) | elen);
 631        } else {
 632                alloc_count = 0;
 633        }
 634
 635        brelse(epos.bh);
 636
 637        if (alloc_count)
 638                udf_add_free_space(sb, partition, -alloc_count);
 639        mutex_unlock(&sbi->s_alloc_mutex);
 640        return alloc_count;
 641}
 642
 643static int udf_table_new_block(struct super_block *sb,
 644                               struct inode *inode,
 645                               struct inode *table, uint16_t partition,
 646                               uint32_t goal, int *err)
 647{
 648        struct udf_sb_info *sbi = UDF_SB(sb);
 649        uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
 650        uint32_t newblock = 0, adsize;
 651        uint32_t elen, goal_elen = 0;
 652        struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
 653        struct extent_position epos, goal_epos;
 654        int8_t etype;
 655        struct udf_inode_info *iinfo = UDF_I(table);
 656
 657        *err = -ENOSPC;
 658
 659        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 660                adsize = sizeof(struct short_ad);
 661        else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
 662                adsize = sizeof(struct long_ad);
 663        else
 664                return newblock;
 665
 666        mutex_lock(&sbi->s_alloc_mutex);
 667        if (goal >= sbi->s_partmaps[partition].s_partition_len)
 668                goal = 0;
 669
 670        /* We search for the closest matching block to goal. If we find
 671           a exact hit, we stop. Otherwise we keep going till we run out
 672           of extents. We store the buffer_head, bloc, and extoffset
 673           of the current closest match and use that when we are done.
 674         */
 675        epos.offset = sizeof(struct unallocSpaceEntry);
 676        epos.block = iinfo->i_location;
 677        epos.bh = goal_epos.bh = NULL;
 678
 679        while (spread &&
 680               (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
 681                if (goal >= eloc.logicalBlockNum) {
 682                        if (goal < eloc.logicalBlockNum +
 683                                        (elen >> sb->s_blocksize_bits))
 684                                nspread = 0;
 685                        else
 686                                nspread = goal - eloc.logicalBlockNum -
 687                                        (elen >> sb->s_blocksize_bits);
 688                } else {
 689                        nspread = eloc.logicalBlockNum - goal;
 690                }
 691
 692                if (nspread < spread) {
 693                        spread = nspread;
 694                        if (goal_epos.bh != epos.bh) {
 695                                brelse(goal_epos.bh);
 696                                goal_epos.bh = epos.bh;
 697                                get_bh(goal_epos.bh);
 698                        }
 699                        goal_epos.block = epos.block;
 700                        goal_epos.offset = epos.offset - adsize;
 701                        goal_eloc = eloc;
 702                        goal_elen = (etype << 30) | elen;
 703                }
 704        }
 705
 706        brelse(epos.bh);
 707
 708        if (spread == 0xFFFFFFFF) {
 709                brelse(goal_epos.bh);
 710                mutex_unlock(&sbi->s_alloc_mutex);
 711                return 0;
 712        }
 713
 714        /* Only allocate blocks from the beginning of the extent.
 715           That way, we only delete (empty) extents, never have to insert an
 716           extent because of splitting */
 717        /* This works, but very poorly.... */
 718
 719        newblock = goal_eloc.logicalBlockNum;
 720        goal_eloc.logicalBlockNum++;
 721        goal_elen -= sb->s_blocksize;
 722
 723        if (goal_elen)
 724                udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
 725        else
 726                udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
 727        brelse(goal_epos.bh);
 728
 729        udf_add_free_space(sb, partition, -1);
 730
 731        mutex_unlock(&sbi->s_alloc_mutex);
 732        *err = 0;
 733        return newblock;
 734}
 735
 736void udf_free_blocks(struct super_block *sb, struct inode *inode,
 737                     struct kernel_lb_addr *bloc, uint32_t offset,
 738                     uint32_t count)
 739{
 740        uint16_t partition = bloc->partitionReferenceNum;
 741        struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
 742
 743        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
 744                udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap,
 745                                       bloc, offset, count);
 746        } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
 747                udf_table_free_blocks(sb, inode, map->s_uspace.s_table,
 748                                      bloc, offset, count);
 749        } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
 750                udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap,
 751                                       bloc, offset, count);
 752        } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
 753                udf_table_free_blocks(sb, inode, map->s_fspace.s_table,
 754                                      bloc, offset, count);
 755        }
 756}
 757
 758inline int udf_prealloc_blocks(struct super_block *sb,
 759                               struct inode *inode,
 760                               uint16_t partition, uint32_t first_block,
 761                               uint32_t block_count)
 762{
 763        struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
 764
 765        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
 766                return udf_bitmap_prealloc_blocks(sb, inode,
 767                                                  map->s_uspace.s_bitmap,
 768                                                  partition, first_block,
 769                                                  block_count);
 770        else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
 771                return udf_table_prealloc_blocks(sb, inode,
 772                                                 map->s_uspace.s_table,
 773                                                 partition, first_block,
 774                                                 block_count);
 775        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
 776                return udf_bitmap_prealloc_blocks(sb, inode,
 777                                                  map->s_fspace.s_bitmap,
 778                                                  partition, first_block,
 779                                                  block_count);
 780        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
 781                return udf_table_prealloc_blocks(sb, inode,
 782                                                 map->s_fspace.s_table,
 783                                                 partition, first_block,
 784                                                 block_count);
 785        else
 786                return 0;
 787}
 788
 789inline int udf_new_block(struct super_block *sb,
 790                         struct inode *inode,
 791                         uint16_t partition, uint32_t goal, int *err)
 792{
 793        struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
 794
 795        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
 796                return udf_bitmap_new_block(sb, inode,
 797                                           map->s_uspace.s_bitmap,
 798                                           partition, goal, err);
 799        else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
 800                return udf_table_new_block(sb, inode,
 801                                           map->s_uspace.s_table,
 802                                           partition, goal, err);
 803        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
 804                return udf_bitmap_new_block(sb, inode,
 805                                            map->s_fspace.s_bitmap,
 806                                            partition, goal, err);
 807        else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
 808                return udf_table_new_block(sb, inode,
 809                                           map->s_fspace.s_table,
 810                                           partition, goal, err);
 811        else {
 812                *err = -EIO;
 813                return 0;
 814        }
 815}
 816