linux/fs/udf/super.c
<<
>>
Prefs
   1/*
   2 * super.c
   3 *
   4 * PURPOSE
   5 *  Super block routines for the OSTA-UDF(tm) filesystem.
   6 *
   7 * DESCRIPTION
   8 *  OSTA-UDF(tm) = Optical Storage Technology Association
   9 *  Universal Disk Format.
  10 *
  11 *  This code is based on version 2.00 of the UDF specification,
  12 *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
  13 *    http://www.osta.org/
  14 *    https://www.ecma.ch/
  15 *    https://www.iso.org/
  16 *
  17 * COPYRIGHT
  18 *  This file is distributed under the terms of the GNU General Public
  19 *  License (GPL). Copies of the GPL can be obtained from:
  20 *    ftp://prep.ai.mit.edu/pub/gnu/GPL
  21 *  Each contributing author retains all rights to their own work.
  22 *
  23 *  (C) 1998 Dave Boynton
  24 *  (C) 1998-2004 Ben Fennema
  25 *  (C) 2000 Stelias Computing Inc
  26 *
  27 * HISTORY
  28 *
  29 *  09/24/98 dgb  changed to allow compiling outside of kernel, and
  30 *                added some debugging.
  31 *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
  32 *  10/16/98      attempting some multi-session support
  33 *  10/17/98      added freespace count for "df"
  34 *  11/11/98 gr   added novrs option
  35 *  11/26/98 dgb  added fileset,anchor mount options
  36 *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
  37 *                vol descs. rewrote option handling based on isofs
  38 *  12/20/98      find the free space bitmap (if it exists)
  39 */
  40
  41#include "udfdecl.h"
  42
  43#include <linux/blkdev.h>
  44#include <linux/slab.h>
  45#include <linux/kernel.h>
  46#include <linux/module.h>
  47#include <linux/parser.h>
  48#include <linux/stat.h>
  49#include <linux/cdrom.h>
  50#include <linux/nls.h>
  51#include <linux/vfs.h>
  52#include <linux/vmalloc.h>
  53#include <linux/errno.h>
  54#include <linux/mount.h>
  55#include <linux/seq_file.h>
  56#include <linux/bitmap.h>
  57#include <linux/crc-itu-t.h>
  58#include <linux/log2.h>
  59#include <asm/byteorder.h>
  60
  61#include "udf_sb.h"
  62#include "udf_i.h"
  63
  64#include <linux/init.h>
  65#include <linux/uaccess.h>
  66
  67enum {
  68        VDS_POS_PRIMARY_VOL_DESC,
  69        VDS_POS_UNALLOC_SPACE_DESC,
  70        VDS_POS_LOGICAL_VOL_DESC,
  71        VDS_POS_IMP_USE_VOL_DESC,
  72        VDS_POS_LENGTH
  73};
  74
  75#define VSD_FIRST_SECTOR_OFFSET         32768
  76#define VSD_MAX_SECTOR_OFFSET           0x800000
  77
  78/*
  79 * Maximum number of Terminating Descriptor / Logical Volume Integrity
  80 * Descriptor redirections. The chosen numbers are arbitrary - just that we
  81 * hopefully don't limit any real use of rewritten inode on write-once media
  82 * but avoid looping for too long on corrupted media.
  83 */
  84#define UDF_MAX_TD_NESTING 64
  85#define UDF_MAX_LVID_NESTING 1000
  86
  87enum { UDF_MAX_LINKS = 0xffff };
  88
  89/* These are the "meat" - everything else is stuffing */
  90static int udf_fill_super(struct super_block *, void *, int);
  91static void udf_put_super(struct super_block *);
  92static int udf_sync_fs(struct super_block *, int);
  93static int udf_remount_fs(struct super_block *, int *, char *);
  94static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
  95static void udf_open_lvid(struct super_block *);
  96static void udf_close_lvid(struct super_block *);
  97static unsigned int udf_count_free(struct super_block *);
  98static int udf_statfs(struct dentry *, struct kstatfs *);
  99static int udf_show_options(struct seq_file *, struct dentry *);
 100
 101struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
 102{
 103        struct logicalVolIntegrityDesc *lvid;
 104        unsigned int partnum;
 105        unsigned int offset;
 106
 107        if (!UDF_SB(sb)->s_lvid_bh)
 108                return NULL;
 109        lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
 110        partnum = le32_to_cpu(lvid->numOfPartitions);
 111        if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
 112             offsetof(struct logicalVolIntegrityDesc, impUse)) /
 113             (2 * sizeof(uint32_t)) < partnum) {
 114                udf_err(sb, "Logical volume integrity descriptor corrupted "
 115                        "(numOfPartitions = %u)!\n", partnum);
 116                return NULL;
 117        }
 118        /* The offset is to skip freeSpaceTable and sizeTable arrays */
 119        offset = partnum * 2 * sizeof(uint32_t);
 120        return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
 121}
 122
 123/* UDF filesystem type */
 124static struct dentry *udf_mount(struct file_system_type *fs_type,
 125                      int flags, const char *dev_name, void *data)
 126{
 127        return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
 128}
 129
 130static struct file_system_type udf_fstype = {
 131        .owner          = THIS_MODULE,
 132        .name           = "udf",
 133        .mount          = udf_mount,
 134        .kill_sb        = kill_block_super,
 135        .fs_flags       = FS_REQUIRES_DEV,
 136};
 137MODULE_ALIAS_FS("udf");
 138
 139static struct kmem_cache *udf_inode_cachep;
 140
 141static struct inode *udf_alloc_inode(struct super_block *sb)
 142{
 143        struct udf_inode_info *ei;
 144        ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
 145        if (!ei)
 146                return NULL;
 147
 148        ei->i_unique = 0;
 149        ei->i_lenExtents = 0;
 150        ei->i_lenStreams = 0;
 151        ei->i_next_alloc_block = 0;
 152        ei->i_next_alloc_goal = 0;
 153        ei->i_strat4096 = 0;
 154        ei->i_streamdir = 0;
 155        init_rwsem(&ei->i_data_sem);
 156        ei->cached_extent.lstart = -1;
 157        spin_lock_init(&ei->i_extent_cache_lock);
 158
 159        return &ei->vfs_inode;
 160}
 161
 162static void udf_free_in_core_inode(struct inode *inode)
 163{
 164        kmem_cache_free(udf_inode_cachep, UDF_I(inode));
 165}
 166
 167static void init_once(void *foo)
 168{
 169        struct udf_inode_info *ei = (struct udf_inode_info *)foo;
 170
 171        ei->i_data = NULL;
 172        inode_init_once(&ei->vfs_inode);
 173}
 174
 175static int __init init_inodecache(void)
 176{
 177        udf_inode_cachep = kmem_cache_create("udf_inode_cache",
 178                                             sizeof(struct udf_inode_info),
 179                                             0, (SLAB_RECLAIM_ACCOUNT |
 180                                                 SLAB_MEM_SPREAD |
 181                                                 SLAB_ACCOUNT),
 182                                             init_once);
 183        if (!udf_inode_cachep)
 184                return -ENOMEM;
 185        return 0;
 186}
 187
 188static void destroy_inodecache(void)
 189{
 190        /*
 191         * Make sure all delayed rcu free inodes are flushed before we
 192         * destroy cache.
 193         */
 194        rcu_barrier();
 195        kmem_cache_destroy(udf_inode_cachep);
 196}
 197
 198/* Superblock operations */
 199static const struct super_operations udf_sb_ops = {
 200        .alloc_inode    = udf_alloc_inode,
 201        .free_inode     = udf_free_in_core_inode,
 202        .write_inode    = udf_write_inode,
 203        .evict_inode    = udf_evict_inode,
 204        .put_super      = udf_put_super,
 205        .sync_fs        = udf_sync_fs,
 206        .statfs         = udf_statfs,
 207        .remount_fs     = udf_remount_fs,
 208        .show_options   = udf_show_options,
 209};
 210
 211struct udf_options {
 212        unsigned char novrs;
 213        unsigned int blocksize;
 214        unsigned int session;
 215        unsigned int lastblock;
 216        unsigned int anchor;
 217        unsigned int flags;
 218        umode_t umask;
 219        kgid_t gid;
 220        kuid_t uid;
 221        umode_t fmode;
 222        umode_t dmode;
 223        struct nls_table *nls_map;
 224};
 225
 226static int __init init_udf_fs(void)
 227{
 228        int err;
 229
 230        err = init_inodecache();
 231        if (err)
 232                goto out1;
 233        err = register_filesystem(&udf_fstype);
 234        if (err)
 235                goto out;
 236
 237        return 0;
 238
 239out:
 240        destroy_inodecache();
 241
 242out1:
 243        return err;
 244}
 245
 246static void __exit exit_udf_fs(void)
 247{
 248        unregister_filesystem(&udf_fstype);
 249        destroy_inodecache();
 250}
 251
 252static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
 253{
 254        struct udf_sb_info *sbi = UDF_SB(sb);
 255
 256        sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
 257        if (!sbi->s_partmaps) {
 258                sbi->s_partitions = 0;
 259                return -ENOMEM;
 260        }
 261
 262        sbi->s_partitions = count;
 263        return 0;
 264}
 265
 266static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
 267{
 268        int i;
 269        int nr_groups = bitmap->s_nr_groups;
 270
 271        for (i = 0; i < nr_groups; i++)
 272                brelse(bitmap->s_block_bitmap[i]);
 273
 274        kvfree(bitmap);
 275}
 276
 277static void udf_free_partition(struct udf_part_map *map)
 278{
 279        int i;
 280        struct udf_meta_data *mdata;
 281
 282        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
 283                iput(map->s_uspace.s_table);
 284        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
 285                udf_sb_free_bitmap(map->s_uspace.s_bitmap);
 286        if (map->s_partition_type == UDF_SPARABLE_MAP15)
 287                for (i = 0; i < 4; i++)
 288                        brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
 289        else if (map->s_partition_type == UDF_METADATA_MAP25) {
 290                mdata = &map->s_type_specific.s_metadata;
 291                iput(mdata->s_metadata_fe);
 292                mdata->s_metadata_fe = NULL;
 293
 294                iput(mdata->s_mirror_fe);
 295                mdata->s_mirror_fe = NULL;
 296
 297                iput(mdata->s_bitmap_fe);
 298                mdata->s_bitmap_fe = NULL;
 299        }
 300}
 301
 302static void udf_sb_free_partitions(struct super_block *sb)
 303{
 304        struct udf_sb_info *sbi = UDF_SB(sb);
 305        int i;
 306
 307        if (!sbi->s_partmaps)
 308                return;
 309        for (i = 0; i < sbi->s_partitions; i++)
 310                udf_free_partition(&sbi->s_partmaps[i]);
 311        kfree(sbi->s_partmaps);
 312        sbi->s_partmaps = NULL;
 313}
 314
 315static int udf_show_options(struct seq_file *seq, struct dentry *root)
 316{
 317        struct super_block *sb = root->d_sb;
 318        struct udf_sb_info *sbi = UDF_SB(sb);
 319
 320        if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
 321                seq_puts(seq, ",nostrict");
 322        if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
 323                seq_printf(seq, ",bs=%lu", sb->s_blocksize);
 324        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
 325                seq_puts(seq, ",unhide");
 326        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
 327                seq_puts(seq, ",undelete");
 328        if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
 329                seq_puts(seq, ",noadinicb");
 330        if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
 331                seq_puts(seq, ",shortad");
 332        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
 333                seq_puts(seq, ",uid=forget");
 334        if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
 335                seq_puts(seq, ",gid=forget");
 336        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
 337                seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
 338        if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
 339                seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
 340        if (sbi->s_umask != 0)
 341                seq_printf(seq, ",umask=%ho", sbi->s_umask);
 342        if (sbi->s_fmode != UDF_INVALID_MODE)
 343                seq_printf(seq, ",mode=%ho", sbi->s_fmode);
 344        if (sbi->s_dmode != UDF_INVALID_MODE)
 345                seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
 346        if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
 347                seq_printf(seq, ",session=%d", sbi->s_session);
 348        if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
 349                seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
 350        if (sbi->s_anchor != 0)
 351                seq_printf(seq, ",anchor=%u", sbi->s_anchor);
 352        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
 353                seq_puts(seq, ",utf8");
 354        if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
 355                seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
 356
 357        return 0;
 358}
 359
 360/*
 361 * udf_parse_options
 362 *
 363 * PURPOSE
 364 *      Parse mount options.
 365 *
 366 * DESCRIPTION
 367 *      The following mount options are supported:
 368 *
 369 *      gid=            Set the default group.
 370 *      umask=          Set the default umask.
 371 *      mode=           Set the default file permissions.
 372 *      dmode=          Set the default directory permissions.
 373 *      uid=            Set the default user.
 374 *      bs=             Set the block size.
 375 *      unhide          Show otherwise hidden files.
 376 *      undelete        Show deleted files in lists.
 377 *      adinicb         Embed data in the inode (default)
 378 *      noadinicb       Don't embed data in the inode
 379 *      shortad         Use short ad's
 380 *      longad          Use long ad's (default)
 381 *      nostrict        Unset strict conformance
 382 *      iocharset=      Set the NLS character set
 383 *
 384 *      The remaining are for debugging and disaster recovery:
 385 *
 386 *      novrs           Skip volume sequence recognition
 387 *
 388 *      The following expect a offset from 0.
 389 *
 390 *      session=        Set the CDROM session (default= last session)
 391 *      anchor=         Override standard anchor location. (default= 256)
 392 *      volume=         Override the VolumeDesc location. (unused)
 393 *      partition=      Override the PartitionDesc location. (unused)
 394 *      lastblock=      Set the last block of the filesystem/
 395 *
 396 *      The following expect a offset from the partition root.
 397 *
 398 *      fileset=        Override the fileset block location. (unused)
 399 *      rootdir=        Override the root directory location. (unused)
 400 *              WARNING: overriding the rootdir to a non-directory may
 401 *              yield highly unpredictable results.
 402 *
 403 * PRE-CONDITIONS
 404 *      options         Pointer to mount options string.
 405 *      uopts           Pointer to mount options variable.
 406 *
 407 * POST-CONDITIONS
 408 *      <return>        1       Mount options parsed okay.
 409 *      <return>        0       Error parsing mount options.
 410 *
 411 * HISTORY
 412 *      July 1, 1997 - Andrew E. Mileski
 413 *      Written, tested, and released.
 414 */
 415
 416enum {
 417        Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
 418        Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
 419        Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
 420        Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
 421        Opt_rootdir, Opt_utf8, Opt_iocharset,
 422        Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
 423        Opt_fmode, Opt_dmode
 424};
 425
 426static const match_table_t tokens = {
 427        {Opt_novrs,     "novrs"},
 428        {Opt_nostrict,  "nostrict"},
 429        {Opt_bs,        "bs=%u"},
 430        {Opt_unhide,    "unhide"},
 431        {Opt_undelete,  "undelete"},
 432        {Opt_noadinicb, "noadinicb"},
 433        {Opt_adinicb,   "adinicb"},
 434        {Opt_shortad,   "shortad"},
 435        {Opt_longad,    "longad"},
 436        {Opt_uforget,   "uid=forget"},
 437        {Opt_uignore,   "uid=ignore"},
 438        {Opt_gforget,   "gid=forget"},
 439        {Opt_gignore,   "gid=ignore"},
 440        {Opt_gid,       "gid=%u"},
 441        {Opt_uid,       "uid=%u"},
 442        {Opt_umask,     "umask=%o"},
 443        {Opt_session,   "session=%u"},
 444        {Opt_lastblock, "lastblock=%u"},
 445        {Opt_anchor,    "anchor=%u"},
 446        {Opt_volume,    "volume=%u"},
 447        {Opt_partition, "partition=%u"},
 448        {Opt_fileset,   "fileset=%u"},
 449        {Opt_rootdir,   "rootdir=%u"},
 450        {Opt_utf8,      "utf8"},
 451        {Opt_iocharset, "iocharset=%s"},
 452        {Opt_fmode,     "mode=%o"},
 453        {Opt_dmode,     "dmode=%o"},
 454        {Opt_err,       NULL}
 455};
 456
 457static int udf_parse_options(char *options, struct udf_options *uopt,
 458                             bool remount)
 459{
 460        char *p;
 461        int option;
 462        unsigned int uv;
 463
 464        uopt->novrs = 0;
 465        uopt->session = 0xFFFFFFFF;
 466        uopt->lastblock = 0;
 467        uopt->anchor = 0;
 468
 469        if (!options)
 470                return 1;
 471
 472        while ((p = strsep(&options, ",")) != NULL) {
 473                substring_t args[MAX_OPT_ARGS];
 474                int token;
 475                unsigned n;
 476                if (!*p)
 477                        continue;
 478
 479                token = match_token(p, tokens, args);
 480                switch (token) {
 481                case Opt_novrs:
 482                        uopt->novrs = 1;
 483                        break;
 484                case Opt_bs:
 485                        if (match_int(&args[0], &option))
 486                                return 0;
 487                        n = option;
 488                        if (n != 512 && n != 1024 && n != 2048 && n != 4096)
 489                                return 0;
 490                        uopt->blocksize = n;
 491                        uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
 492                        break;
 493                case Opt_unhide:
 494                        uopt->flags |= (1 << UDF_FLAG_UNHIDE);
 495                        break;
 496                case Opt_undelete:
 497                        uopt->flags |= (1 << UDF_FLAG_UNDELETE);
 498                        break;
 499                case Opt_noadinicb:
 500                        uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
 501                        break;
 502                case Opt_adinicb:
 503                        uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
 504                        break;
 505                case Opt_shortad:
 506                        uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
 507                        break;
 508                case Opt_longad:
 509                        uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
 510                        break;
 511                case Opt_gid:
 512                        if (match_uint(args, &uv))
 513                                return 0;
 514                        uopt->gid = make_kgid(current_user_ns(), uv);
 515                        if (!gid_valid(uopt->gid))
 516                                return 0;
 517                        uopt->flags |= (1 << UDF_FLAG_GID_SET);
 518                        break;
 519                case Opt_uid:
 520                        if (match_uint(args, &uv))
 521                                return 0;
 522                        uopt->uid = make_kuid(current_user_ns(), uv);
 523                        if (!uid_valid(uopt->uid))
 524                                return 0;
 525                        uopt->flags |= (1 << UDF_FLAG_UID_SET);
 526                        break;
 527                case Opt_umask:
 528                        if (match_octal(args, &option))
 529                                return 0;
 530                        uopt->umask = option;
 531                        break;
 532                case Opt_nostrict:
 533                        uopt->flags &= ~(1 << UDF_FLAG_STRICT);
 534                        break;
 535                case Opt_session:
 536                        if (match_int(args, &option))
 537                                return 0;
 538                        uopt->session = option;
 539                        if (!remount)
 540                                uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
 541                        break;
 542                case Opt_lastblock:
 543                        if (match_int(args, &option))
 544                                return 0;
 545                        uopt->lastblock = option;
 546                        if (!remount)
 547                                uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
 548                        break;
 549                case Opt_anchor:
 550                        if (match_int(args, &option))
 551                                return 0;
 552                        uopt->anchor = option;
 553                        break;
 554                case Opt_volume:
 555                case Opt_partition:
 556                case Opt_fileset:
 557                case Opt_rootdir:
 558                        /* Ignored (never implemented properly) */
 559                        break;
 560                case Opt_utf8:
 561                        uopt->flags |= (1 << UDF_FLAG_UTF8);
 562                        break;
 563                case Opt_iocharset:
 564                        if (!remount) {
 565                                if (uopt->nls_map)
 566                                        unload_nls(uopt->nls_map);
 567                                /*
 568                                 * load_nls() failure is handled later in
 569                                 * udf_fill_super() after all options are
 570                                 * parsed.
 571                                 */
 572                                uopt->nls_map = load_nls(args[0].from);
 573                                uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
 574                        }
 575                        break;
 576                case Opt_uforget:
 577                        uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
 578                        break;
 579                case Opt_uignore:
 580                case Opt_gignore:
 581                        /* These options are superseeded by uid=<number> */
 582                        break;
 583                case Opt_gforget:
 584                        uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
 585                        break;
 586                case Opt_fmode:
 587                        if (match_octal(args, &option))
 588                                return 0;
 589                        uopt->fmode = option & 0777;
 590                        break;
 591                case Opt_dmode:
 592                        if (match_octal(args, &option))
 593                                return 0;
 594                        uopt->dmode = option & 0777;
 595                        break;
 596                default:
 597                        pr_err("bad mount option \"%s\" or missing value\n", p);
 598                        return 0;
 599                }
 600        }
 601        return 1;
 602}
 603
 604static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
 605{
 606        struct udf_options uopt;
 607        struct udf_sb_info *sbi = UDF_SB(sb);
 608        int error = 0;
 609
 610        if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
 611                return -EACCES;
 612
 613        sync_filesystem(sb);
 614
 615        uopt.flags = sbi->s_flags;
 616        uopt.uid   = sbi->s_uid;
 617        uopt.gid   = sbi->s_gid;
 618        uopt.umask = sbi->s_umask;
 619        uopt.fmode = sbi->s_fmode;
 620        uopt.dmode = sbi->s_dmode;
 621        uopt.nls_map = NULL;
 622
 623        if (!udf_parse_options(options, &uopt, true))
 624                return -EINVAL;
 625
 626        write_lock(&sbi->s_cred_lock);
 627        sbi->s_flags = uopt.flags;
 628        sbi->s_uid   = uopt.uid;
 629        sbi->s_gid   = uopt.gid;
 630        sbi->s_umask = uopt.umask;
 631        sbi->s_fmode = uopt.fmode;
 632        sbi->s_dmode = uopt.dmode;
 633        write_unlock(&sbi->s_cred_lock);
 634
 635        if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
 636                goto out_unlock;
 637
 638        if (*flags & SB_RDONLY)
 639                udf_close_lvid(sb);
 640        else
 641                udf_open_lvid(sb);
 642
 643out_unlock:
 644        return error;
 645}
 646
 647/*
 648 * Check VSD descriptor. Returns -1 in case we are at the end of volume
 649 * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
 650 * we found one of NSR descriptors we are looking for.
 651 */
 652static int identify_vsd(const struct volStructDesc *vsd)
 653{
 654        int ret = 0;
 655
 656        if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
 657                switch (vsd->structType) {
 658                case 0:
 659                        udf_debug("ISO9660 Boot Record found\n");
 660                        break;
 661                case 1:
 662                        udf_debug("ISO9660 Primary Volume Descriptor found\n");
 663                        break;
 664                case 2:
 665                        udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
 666                        break;
 667                case 3:
 668                        udf_debug("ISO9660 Volume Partition Descriptor found\n");
 669                        break;
 670                case 255:
 671                        udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
 672                        break;
 673                default:
 674                        udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
 675                        break;
 676                }
 677        } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
 678                ; /* ret = 0 */
 679        else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
 680                ret = 1;
 681        else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
 682                ret = 1;
 683        else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
 684                ; /* ret = 0 */
 685        else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
 686                ; /* ret = 0 */
 687        else {
 688                /* TEA01 or invalid id : end of volume recognition area */
 689                ret = -1;
 690        }
 691
 692        return ret;
 693}
 694
 695/*
 696 * Check Volume Structure Descriptors (ECMA 167 2/9.1)
 697 * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
 698 * @return   1 if NSR02 or NSR03 found,
 699 *          -1 if first sector read error, 0 otherwise
 700 */
 701static int udf_check_vsd(struct super_block *sb)
 702{
 703        struct volStructDesc *vsd = NULL;
 704        loff_t sector = VSD_FIRST_SECTOR_OFFSET;
 705        int sectorsize;
 706        struct buffer_head *bh = NULL;
 707        int nsr = 0;
 708        struct udf_sb_info *sbi;
 709        loff_t session_offset;
 710
 711        sbi = UDF_SB(sb);
 712        if (sb->s_blocksize < sizeof(struct volStructDesc))
 713                sectorsize = sizeof(struct volStructDesc);
 714        else
 715                sectorsize = sb->s_blocksize;
 716
 717        session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
 718        sector += session_offset;
 719
 720        udf_debug("Starting at sector %u (%lu byte sectors)\n",
 721                  (unsigned int)(sector >> sb->s_blocksize_bits),
 722                  sb->s_blocksize);
 723        /* Process the sequence (if applicable). The hard limit on the sector
 724         * offset is arbitrary, hopefully large enough so that all valid UDF
 725         * filesystems will be recognised. There is no mention of an upper
 726         * bound to the size of the volume recognition area in the standard.
 727         *  The limit will prevent the code to read all the sectors of a
 728         * specially crafted image (like a bluray disc full of CD001 sectors),
 729         * potentially causing minutes or even hours of uninterruptible I/O
 730         * activity. This actually happened with uninitialised SSD partitions
 731         * (all 0xFF) before the check for the limit and all valid IDs were
 732         * added */
 733        for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
 734                /* Read a block */
 735                bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
 736                if (!bh)
 737                        break;
 738
 739                vsd = (struct volStructDesc *)(bh->b_data +
 740                                              (sector & (sb->s_blocksize - 1)));
 741                nsr = identify_vsd(vsd);
 742                /* Found NSR or end? */
 743                if (nsr) {
 744                        brelse(bh);
 745                        break;
 746                }
 747                /*
 748                 * Special handling for improperly formatted VRS (e.g., Win10)
 749                 * where components are separated by 2048 bytes even though
 750                 * sectors are 4K
 751                 */
 752                if (sb->s_blocksize == 4096) {
 753                        nsr = identify_vsd(vsd + 1);
 754                        /* Ignore unknown IDs... */
 755                        if (nsr < 0)
 756                                nsr = 0;
 757                }
 758                brelse(bh);
 759        }
 760
 761        if (nsr > 0)
 762                return 1;
 763        else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
 764                return -1;
 765        else
 766                return 0;
 767}
 768
 769static int udf_verify_domain_identifier(struct super_block *sb,
 770                                        struct regid *ident, char *dname)
 771{
 772        struct domainIdentSuffix *suffix;
 773
 774        if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
 775                udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
 776                goto force_ro;
 777        }
 778        if (ident->flags & ENTITYID_FLAGS_DIRTY) {
 779                udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
 780                         dname);
 781                goto force_ro;
 782        }
 783        suffix = (struct domainIdentSuffix *)ident->identSuffix;
 784        if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) ||
 785            (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) {
 786                if (!sb_rdonly(sb)) {
 787                        udf_warn(sb, "Descriptor for %s marked write protected."
 788                                 " Forcing read only mount.\n", dname);
 789                }
 790                goto force_ro;
 791        }
 792        return 0;
 793
 794force_ro:
 795        if (!sb_rdonly(sb))
 796                return -EACCES;
 797        UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
 798        return 0;
 799}
 800
 801static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
 802                            struct kernel_lb_addr *root)
 803{
 804        int ret;
 805
 806        ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
 807        if (ret < 0)
 808                return ret;
 809
 810        *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
 811        UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
 812
 813        udf_debug("Rootdir at block=%u, partition=%u\n",
 814                  root->logicalBlockNum, root->partitionReferenceNum);
 815        return 0;
 816}
 817
 818static int udf_find_fileset(struct super_block *sb,
 819                            struct kernel_lb_addr *fileset,
 820                            struct kernel_lb_addr *root)
 821{
 822        struct buffer_head *bh = NULL;
 823        uint16_t ident;
 824        int ret;
 825
 826        if (fileset->logicalBlockNum == 0xFFFFFFFF &&
 827            fileset->partitionReferenceNum == 0xFFFF)
 828                return -EINVAL;
 829
 830        bh = udf_read_ptagged(sb, fileset, 0, &ident);
 831        if (!bh)
 832                return -EIO;
 833        if (ident != TAG_IDENT_FSD) {
 834                brelse(bh);
 835                return -EINVAL;
 836        }
 837
 838        udf_debug("Fileset at block=%u, partition=%u\n",
 839                  fileset->logicalBlockNum, fileset->partitionReferenceNum);
 840
 841        UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
 842        ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
 843        brelse(bh);
 844        return ret;
 845}
 846
 847/*
 848 * Load primary Volume Descriptor Sequence
 849 *
 850 * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
 851 * should be tried.
 852 */
 853static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 854{
 855        struct primaryVolDesc *pvoldesc;
 856        uint8_t *outstr;
 857        struct buffer_head *bh;
 858        uint16_t ident;
 859        int ret;
 860        struct timestamp *ts;
 861
 862        outstr = kmalloc(128, GFP_NOFS);
 863        if (!outstr)
 864                return -ENOMEM;
 865
 866        bh = udf_read_tagged(sb, block, block, &ident);
 867        if (!bh) {
 868                ret = -EAGAIN;
 869                goto out2;
 870        }
 871
 872        if (ident != TAG_IDENT_PVD) {
 873                ret = -EIO;
 874                goto out_bh;
 875        }
 876
 877        pvoldesc = (struct primaryVolDesc *)bh->b_data;
 878
 879        udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
 880                              pvoldesc->recordingDateAndTime);
 881        ts = &pvoldesc->recordingDateAndTime;
 882        udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
 883                  le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
 884                  ts->minute, le16_to_cpu(ts->typeAndTimezone));
 885
 886        ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
 887        if (ret < 0) {
 888                strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
 889                pr_warn("incorrect volume identification, setting to "
 890                        "'InvalidName'\n");
 891        } else {
 892                strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
 893        }
 894        udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 895
 896        ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
 897        if (ret < 0) {
 898                ret = 0;
 899                goto out_bh;
 900        }
 901        outstr[ret] = 0;
 902        udf_debug("volSetIdent[] = '%s'\n", outstr);
 903
 904        ret = 0;
 905out_bh:
 906        brelse(bh);
 907out2:
 908        kfree(outstr);
 909        return ret;
 910}
 911
 912struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 913                                        u32 meta_file_loc, u32 partition_ref)
 914{
 915        struct kernel_lb_addr addr;
 916        struct inode *metadata_fe;
 917
 918        addr.logicalBlockNum = meta_file_loc;
 919        addr.partitionReferenceNum = partition_ref;
 920
 921        metadata_fe = udf_iget_special(sb, &addr);
 922
 923        if (IS_ERR(metadata_fe)) {
 924                udf_warn(sb, "metadata inode efe not found\n");
 925                return metadata_fe;
 926        }
 927        if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
 928                udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
 929                iput(metadata_fe);
 930                return ERR_PTR(-EIO);
 931        }
 932
 933        return metadata_fe;
 934}
 935
 936static int udf_load_metadata_files(struct super_block *sb, int partition,
 937                                   int type1_index)
 938{
 939        struct udf_sb_info *sbi = UDF_SB(sb);
 940        struct udf_part_map *map;
 941        struct udf_meta_data *mdata;
 942        struct kernel_lb_addr addr;
 943        struct inode *fe;
 944
 945        map = &sbi->s_partmaps[partition];
 946        mdata = &map->s_type_specific.s_metadata;
 947        mdata->s_phys_partition_ref = type1_index;
 948
 949        /* metadata address */
 950        udf_debug("Metadata file location: block = %u part = %u\n",
 951                  mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
 952
 953        fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
 954                                         mdata->s_phys_partition_ref);
 955        if (IS_ERR(fe)) {
 956                /* mirror file entry */
 957                udf_debug("Mirror metadata file location: block = %u part = %u\n",
 958                          mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
 959
 960                fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
 961                                                 mdata->s_phys_partition_ref);
 962
 963                if (IS_ERR(fe)) {
 964                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
 965                        return PTR_ERR(fe);
 966                }
 967                mdata->s_mirror_fe = fe;
 968        } else
 969                mdata->s_metadata_fe = fe;
 970
 971
 972        /*
 973         * bitmap file entry
 974         * Note:
 975         * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
 976        */
 977        if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
 978                addr.logicalBlockNum = mdata->s_bitmap_file_loc;
 979                addr.partitionReferenceNum = mdata->s_phys_partition_ref;
 980
 981                udf_debug("Bitmap file location: block = %u part = %u\n",
 982                          addr.logicalBlockNum, addr.partitionReferenceNum);
 983
 984                fe = udf_iget_special(sb, &addr);
 985                if (IS_ERR(fe)) {
 986                        if (sb_rdonly(sb))
 987                                udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
 988                        else {
 989                                udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
 990                                return PTR_ERR(fe);
 991                        }
 992                } else
 993                        mdata->s_bitmap_fe = fe;
 994        }
 995
 996        udf_debug("udf_load_metadata_files Ok\n");
 997        return 0;
 998}
 999
1000int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1001{
1002        struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1003        return DIV_ROUND_UP(map->s_partition_len +
1004                            (sizeof(struct spaceBitmapDesc) << 3),
1005                            sb->s_blocksize * 8);
1006}
1007
1008static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1009{
1010        struct udf_bitmap *bitmap;
1011        int nr_groups = udf_compute_nr_groups(sb, index);
1012
1013        bitmap = kvzalloc(struct_size(bitmap, s_block_bitmap, nr_groups),
1014                          GFP_KERNEL);
1015        if (!bitmap)
1016                return NULL;
1017
1018        bitmap->s_nr_groups = nr_groups;
1019        return bitmap;
1020}
1021
1022static int check_partition_desc(struct super_block *sb,
1023                                struct partitionDesc *p,
1024                                struct udf_part_map *map)
1025{
1026        bool umap, utable, fmap, ftable;
1027        struct partitionHeaderDesc *phd;
1028
1029        switch (le32_to_cpu(p->accessType)) {
1030        case PD_ACCESS_TYPE_READ_ONLY:
1031        case PD_ACCESS_TYPE_WRITE_ONCE:
1032        case PD_ACCESS_TYPE_NONE:
1033                goto force_ro;
1034        }
1035
1036        /* No Partition Header Descriptor? */
1037        if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1038            strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1039                goto force_ro;
1040
1041        phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1042        utable = phd->unallocSpaceTable.extLength;
1043        umap = phd->unallocSpaceBitmap.extLength;
1044        ftable = phd->freedSpaceTable.extLength;
1045        fmap = phd->freedSpaceBitmap.extLength;
1046
1047        /* No allocation info? */
1048        if (!utable && !umap && !ftable && !fmap)
1049                goto force_ro;
1050
1051        /* We don't support blocks that require erasing before overwrite */
1052        if (ftable || fmap)
1053                goto force_ro;
1054        /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
1055        if (utable && umap)
1056                goto force_ro;
1057
1058        if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1059            map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1060            map->s_partition_type == UDF_METADATA_MAP25)
1061                goto force_ro;
1062
1063        return 0;
1064force_ro:
1065        if (!sb_rdonly(sb))
1066                return -EACCES;
1067        UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1068        return 0;
1069}
1070
1071static int udf_fill_partdesc_info(struct super_block *sb,
1072                struct partitionDesc *p, int p_index)
1073{
1074        struct udf_part_map *map;
1075        struct udf_sb_info *sbi = UDF_SB(sb);
1076        struct partitionHeaderDesc *phd;
1077        int err;
1078
1079        map = &sbi->s_partmaps[p_index];
1080
1081        map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1082        map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1083
1084        if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1085                map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1086        if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1087                map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1088        if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1089                map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1090        if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1091                map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1092
1093        udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
1094                  p_index, map->s_partition_type,
1095                  map->s_partition_root, map->s_partition_len);
1096
1097        err = check_partition_desc(sb, p, map);
1098        if (err)
1099                return err;
1100
1101        /*
1102         * Skip loading allocation info it we cannot ever write to the fs.
1103         * This is a correctness thing as we may have decided to force ro mount
1104         * to avoid allocation info we don't support.
1105         */
1106        if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
1107                return 0;
1108
1109        phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1110        if (phd->unallocSpaceTable.extLength) {
1111                struct kernel_lb_addr loc = {
1112                        .logicalBlockNum = le32_to_cpu(
1113                                phd->unallocSpaceTable.extPosition),
1114                        .partitionReferenceNum = p_index,
1115                };
1116                struct inode *inode;
1117
1118                inode = udf_iget_special(sb, &loc);
1119                if (IS_ERR(inode)) {
1120                        udf_debug("cannot load unallocSpaceTable (part %d)\n",
1121                                  p_index);
1122                        return PTR_ERR(inode);
1123                }
1124                map->s_uspace.s_table = inode;
1125                map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1126                udf_debug("unallocSpaceTable (part %d) @ %lu\n",
1127                          p_index, map->s_uspace.s_table->i_ino);
1128        }
1129
1130        if (phd->unallocSpaceBitmap.extLength) {
1131                struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1132                if (!bitmap)
1133                        return -ENOMEM;
1134                map->s_uspace.s_bitmap = bitmap;
1135                bitmap->s_extPosition = le32_to_cpu(
1136                                phd->unallocSpaceBitmap.extPosition);
1137                map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1138                udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
1139                          p_index, bitmap->s_extPosition);
1140        }
1141
1142        return 0;
1143}
1144
1145static void udf_find_vat_block(struct super_block *sb, int p_index,
1146                               int type1_index, sector_t start_block)
1147{
1148        struct udf_sb_info *sbi = UDF_SB(sb);
1149        struct udf_part_map *map = &sbi->s_partmaps[p_index];
1150        sector_t vat_block;
1151        struct kernel_lb_addr ino;
1152        struct inode *inode;
1153
1154        /*
1155         * VAT file entry is in the last recorded block. Some broken disks have
1156         * it a few blocks before so try a bit harder...
1157         */
1158        ino.partitionReferenceNum = type1_index;
1159        for (vat_block = start_block;
1160             vat_block >= map->s_partition_root &&
1161             vat_block >= start_block - 3; vat_block--) {
1162                ino.logicalBlockNum = vat_block - map->s_partition_root;
1163                inode = udf_iget_special(sb, &ino);
1164                if (!IS_ERR(inode)) {
1165                        sbi->s_vat_inode = inode;
1166                        break;
1167                }
1168        }
1169}
1170
1171static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1172{
1173        struct udf_sb_info *sbi = UDF_SB(sb);
1174        struct udf_part_map *map = &sbi->s_partmaps[p_index];
1175        struct buffer_head *bh = NULL;
1176        struct udf_inode_info *vati;
1177        uint32_t pos;
1178        struct virtualAllocationTable20 *vat20;
1179        sector_t blocks = i_size_read(sb->s_bdev->bd_inode) >>
1180                          sb->s_blocksize_bits;
1181
1182        udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1183        if (!sbi->s_vat_inode &&
1184            sbi->s_last_block != blocks - 1) {
1185                pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1186                          (unsigned long)sbi->s_last_block,
1187                          (unsigned long)blocks - 1);
1188                udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1189        }
1190        if (!sbi->s_vat_inode)
1191                return -EIO;
1192
1193        if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1194                map->s_type_specific.s_virtual.s_start_offset = 0;
1195                map->s_type_specific.s_virtual.s_num_entries =
1196                        (sbi->s_vat_inode->i_size - 36) >> 2;
1197        } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1198                vati = UDF_I(sbi->s_vat_inode);
1199                if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1200                        pos = udf_block_map(sbi->s_vat_inode, 0);
1201                        bh = sb_bread(sb, pos);
1202                        if (!bh)
1203                                return -EIO;
1204                        vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1205                } else {
1206                        vat20 = (struct virtualAllocationTable20 *)
1207                                                        vati->i_data;
1208                }
1209
1210                map->s_type_specific.s_virtual.s_start_offset =
1211                        le16_to_cpu(vat20->lengthHeader);
1212                map->s_type_specific.s_virtual.s_num_entries =
1213                        (sbi->s_vat_inode->i_size -
1214                                map->s_type_specific.s_virtual.
1215                                        s_start_offset) >> 2;
1216                brelse(bh);
1217        }
1218        return 0;
1219}
1220
1221/*
1222 * Load partition descriptor block
1223 *
1224 * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1225 * sequence.
1226 */
1227static int udf_load_partdesc(struct super_block *sb, sector_t block)
1228{
1229        struct buffer_head *bh;
1230        struct partitionDesc *p;
1231        struct udf_part_map *map;
1232        struct udf_sb_info *sbi = UDF_SB(sb);
1233        int i, type1_idx;
1234        uint16_t partitionNumber;
1235        uint16_t ident;
1236        int ret;
1237
1238        bh = udf_read_tagged(sb, block, block, &ident);
1239        if (!bh)
1240                return -EAGAIN;
1241        if (ident != TAG_IDENT_PD) {
1242                ret = 0;
1243                goto out_bh;
1244        }
1245
1246        p = (struct partitionDesc *)bh->b_data;
1247        partitionNumber = le16_to_cpu(p->partitionNumber);
1248
1249        /* First scan for TYPE1 and SPARABLE partitions */
1250        for (i = 0; i < sbi->s_partitions; i++) {
1251                map = &sbi->s_partmaps[i];
1252                udf_debug("Searching map: (%u == %u)\n",
1253                          map->s_partition_num, partitionNumber);
1254                if (map->s_partition_num == partitionNumber &&
1255                    (map->s_partition_type == UDF_TYPE1_MAP15 ||
1256                     map->s_partition_type == UDF_SPARABLE_MAP15))
1257                        break;
1258        }
1259
1260        if (i >= sbi->s_partitions) {
1261                udf_debug("Partition (%u) not found in partition map\n",
1262                          partitionNumber);
1263                ret = 0;
1264                goto out_bh;
1265        }
1266
1267        ret = udf_fill_partdesc_info(sb, p, i);
1268        if (ret < 0)
1269                goto out_bh;
1270
1271        /*
1272         * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1273         * PHYSICAL partitions are already set up
1274         */
1275        type1_idx = i;
1276        map = NULL; /* supress 'maybe used uninitialized' warning */
1277        for (i = 0; i < sbi->s_partitions; i++) {
1278                map = &sbi->s_partmaps[i];
1279
1280                if (map->s_partition_num == partitionNumber &&
1281                    (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1282                     map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1283                     map->s_partition_type == UDF_METADATA_MAP25))
1284                        break;
1285        }
1286
1287        if (i >= sbi->s_partitions) {
1288                ret = 0;
1289                goto out_bh;
1290        }
1291
1292        ret = udf_fill_partdesc_info(sb, p, i);
1293        if (ret < 0)
1294                goto out_bh;
1295
1296        if (map->s_partition_type == UDF_METADATA_MAP25) {
1297                ret = udf_load_metadata_files(sb, i, type1_idx);
1298                if (ret < 0) {
1299                        udf_err(sb, "error loading MetaData partition map %d\n",
1300                                i);
1301                        goto out_bh;
1302                }
1303        } else {
1304                /*
1305                 * If we have a partition with virtual map, we don't handle
1306                 * writing to it (we overwrite blocks instead of relocating
1307                 * them).
1308                 */
1309                if (!sb_rdonly(sb)) {
1310                        ret = -EACCES;
1311                        goto out_bh;
1312                }
1313                UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1314                ret = udf_load_vat(sb, i, type1_idx);
1315                if (ret < 0)
1316                        goto out_bh;
1317        }
1318        ret = 0;
1319out_bh:
1320        /* In case loading failed, we handle cleanup in udf_fill_super */
1321        brelse(bh);
1322        return ret;
1323}
1324
1325static int udf_load_sparable_map(struct super_block *sb,
1326                                 struct udf_part_map *map,
1327                                 struct sparablePartitionMap *spm)
1328{
1329        uint32_t loc;
1330        uint16_t ident;
1331        struct sparingTable *st;
1332        struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1333        int i;
1334        struct buffer_head *bh;
1335
1336        map->s_partition_type = UDF_SPARABLE_MAP15;
1337        sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1338        if (!is_power_of_2(sdata->s_packet_len)) {
1339                udf_err(sb, "error loading logical volume descriptor: "
1340                        "Invalid packet length %u\n",
1341                        (unsigned)sdata->s_packet_len);
1342                return -EIO;
1343        }
1344        if (spm->numSparingTables > 4) {
1345                udf_err(sb, "error loading logical volume descriptor: "
1346                        "Too many sparing tables (%d)\n",
1347                        (int)spm->numSparingTables);
1348                return -EIO;
1349        }
1350        if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
1351                udf_err(sb, "error loading logical volume descriptor: "
1352                        "Too big sparing table size (%u)\n",
1353                        le32_to_cpu(spm->sizeSparingTable));
1354                return -EIO;
1355        }
1356
1357        for (i = 0; i < spm->numSparingTables; i++) {
1358                loc = le32_to_cpu(spm->locSparingTable[i]);
1359                bh = udf_read_tagged(sb, loc, loc, &ident);
1360                if (!bh)
1361                        continue;
1362
1363                st = (struct sparingTable *)bh->b_data;
1364                if (ident != 0 ||
1365                    strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1366                            strlen(UDF_ID_SPARING)) ||
1367                    sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1368                                                        sb->s_blocksize) {
1369                        brelse(bh);
1370                        continue;
1371                }
1372
1373                sdata->s_spar_map[i] = bh;
1374        }
1375        map->s_partition_func = udf_get_pblock_spar15;
1376        return 0;
1377}
1378
1379static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1380                               struct kernel_lb_addr *fileset)
1381{
1382        struct logicalVolDesc *lvd;
1383        int i, offset;
1384        uint8_t type;
1385        struct udf_sb_info *sbi = UDF_SB(sb);
1386        struct genericPartitionMap *gpm;
1387        uint16_t ident;
1388        struct buffer_head *bh;
1389        unsigned int table_len;
1390        int ret;
1391
1392        bh = udf_read_tagged(sb, block, block, &ident);
1393        if (!bh)
1394                return -EAGAIN;
1395        BUG_ON(ident != TAG_IDENT_LVD);
1396        lvd = (struct logicalVolDesc *)bh->b_data;
1397        table_len = le32_to_cpu(lvd->mapTableLength);
1398        if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1399                udf_err(sb, "error loading logical volume descriptor: "
1400                        "Partition table too long (%u > %lu)\n", table_len,
1401                        sb->s_blocksize - sizeof(*lvd));
1402                ret = -EIO;
1403                goto out_bh;
1404        }
1405
1406        ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
1407                                           "logical volume");
1408        if (ret)
1409                goto out_bh;
1410        ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1411        if (ret)
1412                goto out_bh;
1413
1414        for (i = 0, offset = 0;
1415             i < sbi->s_partitions && offset < table_len;
1416             i++, offset += gpm->partitionMapLength) {
1417                struct udf_part_map *map = &sbi->s_partmaps[i];
1418                gpm = (struct genericPartitionMap *)
1419                                &(lvd->partitionMaps[offset]);
1420                type = gpm->partitionMapType;
1421                if (type == 1) {
1422                        struct genericPartitionMap1 *gpm1 =
1423                                (struct genericPartitionMap1 *)gpm;
1424                        map->s_partition_type = UDF_TYPE1_MAP15;
1425                        map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1426                        map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1427                        map->s_partition_func = NULL;
1428                } else if (type == 2) {
1429                        struct udfPartitionMap2 *upm2 =
1430                                                (struct udfPartitionMap2 *)gpm;
1431                        if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1432                                                strlen(UDF_ID_VIRTUAL))) {
1433                                u16 suf =
1434                                        le16_to_cpu(((__le16 *)upm2->partIdent.
1435                                                        identSuffix)[0]);
1436                                if (suf < 0x0200) {
1437                                        map->s_partition_type =
1438                                                        UDF_VIRTUAL_MAP15;
1439                                        map->s_partition_func =
1440                                                        udf_get_pblock_virt15;
1441                                } else {
1442                                        map->s_partition_type =
1443                                                        UDF_VIRTUAL_MAP20;
1444                                        map->s_partition_func =
1445                                                        udf_get_pblock_virt20;
1446                                }
1447                        } else if (!strncmp(upm2->partIdent.ident,
1448                                                UDF_ID_SPARABLE,
1449                                                strlen(UDF_ID_SPARABLE))) {
1450                                ret = udf_load_sparable_map(sb, map,
1451                                        (struct sparablePartitionMap *)gpm);
1452                                if (ret < 0)
1453                                        goto out_bh;
1454                        } else if (!strncmp(upm2->partIdent.ident,
1455                                                UDF_ID_METADATA,
1456                                                strlen(UDF_ID_METADATA))) {
1457                                struct udf_meta_data *mdata =
1458                                        &map->s_type_specific.s_metadata;
1459                                struct metadataPartitionMap *mdm =
1460                                                (struct metadataPartitionMap *)
1461                                                &(lvd->partitionMaps[offset]);
1462                                udf_debug("Parsing Logical vol part %d type %u  id=%s\n",
1463                                          i, type, UDF_ID_METADATA);
1464
1465                                map->s_partition_type = UDF_METADATA_MAP25;
1466                                map->s_partition_func = udf_get_pblock_meta25;
1467
1468                                mdata->s_meta_file_loc   =
1469                                        le32_to_cpu(mdm->metadataFileLoc);
1470                                mdata->s_mirror_file_loc =
1471                                        le32_to_cpu(mdm->metadataMirrorFileLoc);
1472                                mdata->s_bitmap_file_loc =
1473                                        le32_to_cpu(mdm->metadataBitmapFileLoc);
1474                                mdata->s_alloc_unit_size =
1475                                        le32_to_cpu(mdm->allocUnitSize);
1476                                mdata->s_align_unit_size =
1477                                        le16_to_cpu(mdm->alignUnitSize);
1478                                if (mdm->flags & 0x01)
1479                                        mdata->s_flags |= MF_DUPLICATE_MD;
1480
1481                                udf_debug("Metadata Ident suffix=0x%x\n",
1482                                          le16_to_cpu(*(__le16 *)
1483                                                      mdm->partIdent.identSuffix));
1484                                udf_debug("Metadata part num=%u\n",
1485                                          le16_to_cpu(mdm->partitionNum));
1486                                udf_debug("Metadata part alloc unit size=%u\n",
1487                                          le32_to_cpu(mdm->allocUnitSize));
1488                                udf_debug("Metadata file loc=%u\n",
1489                                          le32_to_cpu(mdm->metadataFileLoc));
1490                                udf_debug("Mirror file loc=%u\n",
1491                                          le32_to_cpu(mdm->metadataMirrorFileLoc));
1492                                udf_debug("Bitmap file loc=%u\n",
1493                                          le32_to_cpu(mdm->metadataBitmapFileLoc));
1494                                udf_debug("Flags: %d %u\n",
1495                                          mdata->s_flags, mdm->flags);
1496                        } else {
1497                                udf_debug("Unknown ident: %s\n",
1498                                          upm2->partIdent.ident);
1499                                continue;
1500                        }
1501                        map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1502                        map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1503                }
1504                udf_debug("Partition (%d:%u) type %u on volume %u\n",
1505                          i, map->s_partition_num, type, map->s_volumeseqnum);
1506        }
1507
1508        if (fileset) {
1509                struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1510
1511                *fileset = lelb_to_cpu(la->extLocation);
1512                udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
1513                          fileset->logicalBlockNum,
1514                          fileset->partitionReferenceNum);
1515        }
1516        if (lvd->integritySeqExt.extLength)
1517                udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1518        ret = 0;
1519
1520        if (!sbi->s_lvid_bh) {
1521                /* We can't generate unique IDs without a valid LVID */
1522                if (sb_rdonly(sb)) {
1523                        UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1524                } else {
1525                        udf_warn(sb, "Damaged or missing LVID, forcing "
1526                                     "readonly mount\n");
1527                        ret = -EACCES;
1528                }
1529        }
1530out_bh:
1531        brelse(bh);
1532        return ret;
1533}
1534
1535/*
1536 * Find the prevailing Logical Volume Integrity Descriptor.
1537 */
1538static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1539{
1540        struct buffer_head *bh, *final_bh;
1541        uint16_t ident;
1542        struct udf_sb_info *sbi = UDF_SB(sb);
1543        struct logicalVolIntegrityDesc *lvid;
1544        int indirections = 0;
1545
1546        while (++indirections <= UDF_MAX_LVID_NESTING) {
1547                final_bh = NULL;
1548                while (loc.extLength > 0 &&
1549                        (bh = udf_read_tagged(sb, loc.extLocation,
1550                                        loc.extLocation, &ident))) {
1551                        if (ident != TAG_IDENT_LVID) {
1552                                brelse(bh);
1553                                break;
1554                        }
1555
1556                        brelse(final_bh);
1557                        final_bh = bh;
1558
1559                        loc.extLength -= sb->s_blocksize;
1560                        loc.extLocation++;
1561                }
1562
1563                if (!final_bh)
1564                        return;
1565
1566                brelse(sbi->s_lvid_bh);
1567                sbi->s_lvid_bh = final_bh;
1568
1569                lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
1570                if (lvid->nextIntegrityExt.extLength == 0)
1571                        return;
1572
1573                loc = leea_to_cpu(lvid->nextIntegrityExt);
1574        }
1575
1576        udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
1577                UDF_MAX_LVID_NESTING);
1578        brelse(sbi->s_lvid_bh);
1579        sbi->s_lvid_bh = NULL;
1580}
1581
1582/*
1583 * Step for reallocation of table of partition descriptor sequence numbers.
1584 * Must be power of 2.
1585 */
1586#define PART_DESC_ALLOC_STEP 32
1587
1588struct part_desc_seq_scan_data {
1589        struct udf_vds_record rec;
1590        u32 partnum;
1591};
1592
1593struct desc_seq_scan_data {
1594        struct udf_vds_record vds[VDS_POS_LENGTH];
1595        unsigned int size_part_descs;
1596        unsigned int num_part_descs;
1597        struct part_desc_seq_scan_data *part_descs_loc;
1598};
1599
1600static struct udf_vds_record *handle_partition_descriptor(
1601                                struct buffer_head *bh,
1602                                struct desc_seq_scan_data *data)
1603{
1604        struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1605        int partnum;
1606        int i;
1607
1608        partnum = le16_to_cpu(desc->partitionNumber);
1609        for (i = 0; i < data->num_part_descs; i++)
1610                if (partnum == data->part_descs_loc[i].partnum)
1611                        return &(data->part_descs_loc[i].rec);
1612        if (data->num_part_descs >= data->size_part_descs) {
1613                struct part_desc_seq_scan_data *new_loc;
1614                unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1615
1616                new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
1617                if (!new_loc)
1618                        return ERR_PTR(-ENOMEM);
1619                memcpy(new_loc, data->part_descs_loc,
1620                       data->size_part_descs * sizeof(*new_loc));
1621                kfree(data->part_descs_loc);
1622                data->part_descs_loc = new_loc;
1623                data->size_part_descs = new_size;
1624        }
1625        return &(data->part_descs_loc[data->num_part_descs++].rec);
1626}
1627
1628
1629static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
1630                struct buffer_head *bh, struct desc_seq_scan_data *data)
1631{
1632        switch (ident) {
1633        case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1634                return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
1635        case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1636                return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
1637        case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1638                return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
1639        case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1640                return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
1641        case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1642                return handle_partition_descriptor(bh, data);
1643        }
1644        return NULL;
1645}
1646
1647/*
1648 * Process a main/reserve volume descriptor sequence.
1649 *   @block             First block of first extent of the sequence.
1650 *   @lastblock         Lastblock of first extent of the sequence.
1651 *   @fileset           There we store extent containing root fileset
1652 *
1653 * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1654 * sequence
1655 */
1656static noinline int udf_process_sequence(
1657                struct super_block *sb,
1658                sector_t block, sector_t lastblock,
1659                struct kernel_lb_addr *fileset)
1660{
1661        struct buffer_head *bh = NULL;
1662        struct udf_vds_record *curr;
1663        struct generic_desc *gd;
1664        struct volDescPtr *vdp;
1665        bool done = false;
1666        uint32_t vdsn;
1667        uint16_t ident;
1668        int ret;
1669        unsigned int indirections = 0;
1670        struct desc_seq_scan_data data;
1671        unsigned int i;
1672
1673        memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1674        data.size_part_descs = PART_DESC_ALLOC_STEP;
1675        data.num_part_descs = 0;
1676        data.part_descs_loc = kcalloc(data.size_part_descs,
1677                                      sizeof(*data.part_descs_loc),
1678                                      GFP_KERNEL);
1679        if (!data.part_descs_loc)
1680                return -ENOMEM;
1681
1682        /*
1683         * Read the main descriptor sequence and find which descriptors
1684         * are in it.
1685         */
1686        for (; (!done && block <= lastblock); block++) {
1687                bh = udf_read_tagged(sb, block, block, &ident);
1688                if (!bh)
1689                        break;
1690
1691                /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1692                gd = (struct generic_desc *)bh->b_data;
1693                vdsn = le32_to_cpu(gd->volDescSeqNum);
1694                switch (ident) {
1695                case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1696                        if (++indirections > UDF_MAX_TD_NESTING) {
1697                                udf_err(sb, "too many Volume Descriptor "
1698                                        "Pointers (max %u supported)\n",
1699                                        UDF_MAX_TD_NESTING);
1700                                brelse(bh);
1701                                ret = -EIO;
1702                                goto out;
1703                        }
1704
1705                        vdp = (struct volDescPtr *)bh->b_data;
1706                        block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1707                        lastblock = le32_to_cpu(
1708                                vdp->nextVolDescSeqExt.extLength) >>
1709                                sb->s_blocksize_bits;
1710                        lastblock += block - 1;
1711                        /* For loop is going to increment 'block' again */
1712                        block--;
1713                        break;
1714                case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1715                case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1716                case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1717                case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1718                case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1719                        curr = get_volume_descriptor_record(ident, bh, &data);
1720                        if (IS_ERR(curr)) {
1721                                brelse(bh);
1722                                ret = PTR_ERR(curr);
1723                                goto out;
1724                        }
1725                        /* Descriptor we don't care about? */
1726                        if (!curr)
1727                                break;
1728                        if (vdsn >= curr->volDescSeqNum) {
1729                                curr->volDescSeqNum = vdsn;
1730                                curr->block = block;
1731                        }
1732                        break;
1733                case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1734                        done = true;
1735                        break;
1736                }
1737                brelse(bh);
1738        }
1739        /*
1740         * Now read interesting descriptors again and process them
1741         * in a suitable order
1742         */
1743        if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1744                udf_err(sb, "Primary Volume Descriptor not found!\n");
1745                ret = -EAGAIN;
1746                goto out;
1747        }
1748        ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
1749        if (ret < 0)
1750                goto out;
1751
1752        if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1753                ret = udf_load_logicalvol(sb,
1754                                data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
1755                                fileset);
1756                if (ret < 0)
1757                        goto out;
1758        }
1759
1760        /* Now handle prevailing Partition Descriptors */
1761        for (i = 0; i < data.num_part_descs; i++) {
1762                ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1763                if (ret < 0)
1764                        goto out;
1765        }
1766        ret = 0;
1767out:
1768        kfree(data.part_descs_loc);
1769        return ret;
1770}
1771
1772/*
1773 * Load Volume Descriptor Sequence described by anchor in bh
1774 *
1775 * Returns <0 on error, 0 on success
1776 */
1777static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1778                             struct kernel_lb_addr *fileset)
1779{
1780        struct anchorVolDescPtr *anchor;
1781        sector_t main_s, main_e, reserve_s, reserve_e;
1782        int ret;
1783
1784        anchor = (struct anchorVolDescPtr *)bh->b_data;
1785
1786        /* Locate the main sequence */
1787        main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1788        main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1789        main_e = main_e >> sb->s_blocksize_bits;
1790        main_e += main_s - 1;
1791
1792        /* Locate the reserve sequence */
1793        reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1794        reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1795        reserve_e = reserve_e >> sb->s_blocksize_bits;
1796        reserve_e += reserve_s - 1;
1797
1798        /* Process the main & reserve sequences */
1799        /* responsible for finding the PartitionDesc(s) */
1800        ret = udf_process_sequence(sb, main_s, main_e, fileset);
1801        if (ret != -EAGAIN)
1802                return ret;
1803        udf_sb_free_partitions(sb);
1804        ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1805        if (ret < 0) {
1806                udf_sb_free_partitions(sb);
1807                /* No sequence was OK, return -EIO */
1808                if (ret == -EAGAIN)
1809                        ret = -EIO;
1810        }
1811        return ret;
1812}
1813
1814/*
1815 * Check whether there is an anchor block in the given block and
1816 * load Volume Descriptor Sequence if so.
1817 *
1818 * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1819 * block
1820 */
1821static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1822                                  struct kernel_lb_addr *fileset)
1823{
1824        struct buffer_head *bh;
1825        uint16_t ident;
1826        int ret;
1827
1828        if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1829            udf_fixed_to_variable(block) >=
1830            i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits)
1831                return -EAGAIN;
1832
1833        bh = udf_read_tagged(sb, block, block, &ident);
1834        if (!bh)
1835                return -EAGAIN;
1836        if (ident != TAG_IDENT_AVDP) {
1837                brelse(bh);
1838                return -EAGAIN;
1839        }
1840        ret = udf_load_sequence(sb, bh, fileset);
1841        brelse(bh);
1842        return ret;
1843}
1844
1845/*
1846 * Search for an anchor volume descriptor pointer.
1847 *
1848 * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1849 * of anchors.
1850 */
1851static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1852                            struct kernel_lb_addr *fileset)
1853{
1854        sector_t last[6];
1855        int i;
1856        struct udf_sb_info *sbi = UDF_SB(sb);
1857        int last_count = 0;
1858        int ret;
1859
1860        /* First try user provided anchor */
1861        if (sbi->s_anchor) {
1862                ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1863                if (ret != -EAGAIN)
1864                        return ret;
1865        }
1866        /*
1867         * according to spec, anchor is in either:
1868         *     block 256
1869         *     lastblock-256
1870         *     lastblock
1871         *  however, if the disc isn't closed, it could be 512.
1872         */
1873        ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1874        if (ret != -EAGAIN)
1875                return ret;
1876        /*
1877         * The trouble is which block is the last one. Drives often misreport
1878         * this so we try various possibilities.
1879         */
1880        last[last_count++] = *lastblock;
1881        if (*lastblock >= 1)
1882                last[last_count++] = *lastblock - 1;
1883        last[last_count++] = *lastblock + 1;
1884        if (*lastblock >= 2)
1885                last[last_count++] = *lastblock - 2;
1886        if (*lastblock >= 150)
1887                last[last_count++] = *lastblock - 150;
1888        if (*lastblock >= 152)
1889                last[last_count++] = *lastblock - 152;
1890
1891        for (i = 0; i < last_count; i++) {
1892                if (last[i] >= i_size_read(sb->s_bdev->bd_inode) >>
1893                                sb->s_blocksize_bits)
1894                        continue;
1895                ret = udf_check_anchor_block(sb, last[i], fileset);
1896                if (ret != -EAGAIN) {
1897                        if (!ret)
1898                                *lastblock = last[i];
1899                        return ret;
1900                }
1901                if (last[i] < 256)
1902                        continue;
1903                ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1904                if (ret != -EAGAIN) {
1905                        if (!ret)
1906                                *lastblock = last[i];
1907                        return ret;
1908                }
1909        }
1910
1911        /* Finally try block 512 in case media is open */
1912        return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1913}
1914
1915/*
1916 * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1917 * area specified by it. The function expects sbi->s_lastblock to be the last
1918 * block on the media.
1919 *
1920 * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1921 * was not found.
1922 */
1923static int udf_find_anchor(struct super_block *sb,
1924                           struct kernel_lb_addr *fileset)
1925{
1926        struct udf_sb_info *sbi = UDF_SB(sb);
1927        sector_t lastblock = sbi->s_last_block;
1928        int ret;
1929
1930        ret = udf_scan_anchors(sb, &lastblock, fileset);
1931        if (ret != -EAGAIN)
1932                goto out;
1933
1934        /* No anchor found? Try VARCONV conversion of block numbers */
1935        UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1936        lastblock = udf_variable_to_fixed(sbi->s_last_block);
1937        /* Firstly, we try to not convert number of the last block */
1938        ret = udf_scan_anchors(sb, &lastblock, fileset);
1939        if (ret != -EAGAIN)
1940                goto out;
1941
1942        lastblock = sbi->s_last_block;
1943        /* Secondly, we try with converted number of the last block */
1944        ret = udf_scan_anchors(sb, &lastblock, fileset);
1945        if (ret < 0) {
1946                /* VARCONV didn't help. Clear it. */
1947                UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1948        }
1949out:
1950        if (ret == 0)
1951                sbi->s_last_block = lastblock;
1952        return ret;
1953}
1954
1955/*
1956 * Check Volume Structure Descriptor, find Anchor block and load Volume
1957 * Descriptor Sequence.
1958 *
1959 * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1960 * block was not found.
1961 */
1962static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1963                        int silent, struct kernel_lb_addr *fileset)
1964{
1965        struct udf_sb_info *sbi = UDF_SB(sb);
1966        int nsr = 0;
1967        int ret;
1968
1969        if (!sb_set_blocksize(sb, uopt->blocksize)) {
1970                if (!silent)
1971                        udf_warn(sb, "Bad block size\n");
1972                return -EINVAL;
1973        }
1974        sbi->s_last_block = uopt->lastblock;
1975        if (!uopt->novrs) {
1976                /* Check that it is NSR02 compliant */
1977                nsr = udf_check_vsd(sb);
1978                if (!nsr) {
1979                        if (!silent)
1980                                udf_warn(sb, "No VRS found\n");
1981                        return -EINVAL;
1982                }
1983                if (nsr == -1)
1984                        udf_debug("Failed to read sector at offset %d. "
1985                                  "Assuming open disc. Skipping validity "
1986                                  "check\n", VSD_FIRST_SECTOR_OFFSET);
1987                if (!sbi->s_last_block)
1988                        sbi->s_last_block = udf_get_last_block(sb);
1989        } else {
1990                udf_debug("Validity check skipped because of novrs option\n");
1991        }
1992
1993        /* Look for anchor block and load Volume Descriptor Sequence */
1994        sbi->s_anchor = uopt->anchor;
1995        ret = udf_find_anchor(sb, fileset);
1996        if (ret < 0) {
1997                if (!silent && ret == -EAGAIN)
1998                        udf_warn(sb, "No anchor found\n");
1999                return ret;
2000        }
2001        return 0;
2002}
2003
2004static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
2005{
2006        struct timespec64 ts;
2007
2008        ktime_get_real_ts64(&ts);
2009        udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
2010        lvid->descTag.descCRC = cpu_to_le16(
2011                crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2012                        le16_to_cpu(lvid->descTag.descCRCLength)));
2013        lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2014}
2015
2016static void udf_open_lvid(struct super_block *sb)
2017{
2018        struct udf_sb_info *sbi = UDF_SB(sb);
2019        struct buffer_head *bh = sbi->s_lvid_bh;
2020        struct logicalVolIntegrityDesc *lvid;
2021        struct logicalVolIntegrityDescImpUse *lvidiu;
2022
2023        if (!bh)
2024                return;
2025        lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2026        lvidiu = udf_sb_lvidiu(sb);
2027        if (!lvidiu)
2028                return;
2029
2030        mutex_lock(&sbi->s_alloc_mutex);
2031        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2032        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2033        if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
2034                lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
2035        else
2036                UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
2037
2038        udf_finalize_lvid(lvid);
2039        mark_buffer_dirty(bh);
2040        sbi->s_lvid_dirty = 0;
2041        mutex_unlock(&sbi->s_alloc_mutex);
2042        /* Make opening of filesystem visible on the media immediately */
2043        sync_dirty_buffer(bh);
2044}
2045
2046static void udf_close_lvid(struct super_block *sb)
2047{
2048        struct udf_sb_info *sbi = UDF_SB(sb);
2049        struct buffer_head *bh = sbi->s_lvid_bh;
2050        struct logicalVolIntegrityDesc *lvid;
2051        struct logicalVolIntegrityDescImpUse *lvidiu;
2052
2053        if (!bh)
2054                return;
2055        lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2056        lvidiu = udf_sb_lvidiu(sb);
2057        if (!lvidiu)
2058                return;
2059
2060        mutex_lock(&sbi->s_alloc_mutex);
2061        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2062        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2063        if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
2064                lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
2065        if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
2066                lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
2067        if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
2068                lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
2069        if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
2070                lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
2071
2072        /*
2073         * We set buffer uptodate unconditionally here to avoid spurious
2074         * warnings from mark_buffer_dirty() when previous EIO has marked
2075         * the buffer as !uptodate
2076         */
2077        set_buffer_uptodate(bh);
2078        udf_finalize_lvid(lvid);
2079        mark_buffer_dirty(bh);
2080        sbi->s_lvid_dirty = 0;
2081        mutex_unlock(&sbi->s_alloc_mutex);
2082        /* Make closing of filesystem visible on the media immediately */
2083        sync_dirty_buffer(bh);
2084}
2085
2086u64 lvid_get_unique_id(struct super_block *sb)
2087{
2088        struct buffer_head *bh;
2089        struct udf_sb_info *sbi = UDF_SB(sb);
2090        struct logicalVolIntegrityDesc *lvid;
2091        struct logicalVolHeaderDesc *lvhd;
2092        u64 uniqueID;
2093        u64 ret;
2094
2095        bh = sbi->s_lvid_bh;
2096        if (!bh)
2097                return 0;
2098
2099        lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2100        lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2101
2102        mutex_lock(&sbi->s_alloc_mutex);
2103        ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2104        if (!(++uniqueID & 0xFFFFFFFF))
2105                uniqueID += 16;
2106        lvhd->uniqueID = cpu_to_le64(uniqueID);
2107        udf_updated_lvid(sb);
2108        mutex_unlock(&sbi->s_alloc_mutex);
2109
2110        return ret;
2111}
2112
2113static int udf_fill_super(struct super_block *sb, void *options, int silent)
2114{
2115        int ret = -EINVAL;
2116        struct inode *inode = NULL;
2117        struct udf_options uopt;
2118        struct kernel_lb_addr rootdir, fileset;
2119        struct udf_sb_info *sbi;
2120        bool lvid_open = false;
2121
2122        uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2123        /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
2124        uopt.uid = make_kuid(current_user_ns(), overflowuid);
2125        uopt.gid = make_kgid(current_user_ns(), overflowgid);
2126        uopt.umask = 0;
2127        uopt.fmode = UDF_INVALID_MODE;
2128        uopt.dmode = UDF_INVALID_MODE;
2129        uopt.nls_map = NULL;
2130
2131        sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2132        if (!sbi)
2133                return -ENOMEM;
2134
2135        sb->s_fs_info = sbi;
2136
2137        mutex_init(&sbi->s_alloc_mutex);
2138
2139        if (!udf_parse_options((char *)options, &uopt, false))
2140                goto parse_options_failure;
2141
2142        if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
2143            uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
2144                udf_err(sb, "utf8 cannot be combined with iocharset\n");
2145                goto parse_options_failure;
2146        }
2147        if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
2148                uopt.nls_map = load_nls_default();
2149                if (!uopt.nls_map)
2150                        uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
2151                else
2152                        udf_debug("Using default NLS map\n");
2153        }
2154        if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
2155                uopt.flags |= (1 << UDF_FLAG_UTF8);
2156
2157        fileset.logicalBlockNum = 0xFFFFFFFF;
2158        fileset.partitionReferenceNum = 0xFFFF;
2159
2160        sbi->s_flags = uopt.flags;
2161        sbi->s_uid = uopt.uid;
2162        sbi->s_gid = uopt.gid;
2163        sbi->s_umask = uopt.umask;
2164        sbi->s_fmode = uopt.fmode;
2165        sbi->s_dmode = uopt.dmode;
2166        sbi->s_nls_map = uopt.nls_map;
2167        rwlock_init(&sbi->s_cred_lock);
2168
2169        if (uopt.session == 0xFFFFFFFF)
2170                sbi->s_session = udf_get_last_session(sb);
2171        else
2172                sbi->s_session = uopt.session;
2173
2174        udf_debug("Multi-session=%d\n", sbi->s_session);
2175
2176        /* Fill in the rest of the superblock */
2177        sb->s_op = &udf_sb_ops;
2178        sb->s_export_op = &udf_export_ops;
2179
2180        sb->s_magic = UDF_SUPER_MAGIC;
2181        sb->s_time_gran = 1000;
2182
2183        if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2184                ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2185        } else {
2186                uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2187                while (uopt.blocksize <= 4096) {
2188                        ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2189                        if (ret < 0) {
2190                                if (!silent && ret != -EACCES) {
2191                                        pr_notice("Scanning with blocksize %u failed\n",
2192                                                  uopt.blocksize);
2193                                }
2194                                brelse(sbi->s_lvid_bh);
2195                                sbi->s_lvid_bh = NULL;
2196                                /*
2197                                 * EACCES is special - we want to propagate to
2198                                 * upper layers that we cannot handle RW mount.
2199                                 */
2200                                if (ret == -EACCES)
2201                                        break;
2202                        } else
2203                                break;
2204
2205                        uopt.blocksize <<= 1;
2206                }
2207        }
2208        if (ret < 0) {
2209                if (ret == -EAGAIN) {
2210                        udf_warn(sb, "No partition found (1)\n");
2211                        ret = -EINVAL;
2212                }
2213                goto error_out;
2214        }
2215
2216        udf_debug("Lastblock=%u\n", sbi->s_last_block);
2217
2218        if (sbi->s_lvid_bh) {
2219                struct logicalVolIntegrityDescImpUse *lvidiu =
2220                                                        udf_sb_lvidiu(sb);
2221                uint16_t minUDFReadRev;
2222                uint16_t minUDFWriteRev;
2223
2224                if (!lvidiu) {
2225                        ret = -EINVAL;
2226                        goto error_out;
2227                }
2228                minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2229                minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2230                if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2231                        udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2232                                minUDFReadRev,
2233                                UDF_MAX_READ_VERSION);
2234                        ret = -EINVAL;
2235                        goto error_out;
2236                } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
2237                        if (!sb_rdonly(sb)) {
2238                                ret = -EACCES;
2239                                goto error_out;
2240                        }
2241                        UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2242                }
2243
2244                sbi->s_udfrev = minUDFWriteRev;
2245
2246                if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2247                        UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2248                if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2249                        UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2250        }
2251
2252        if (!sbi->s_partitions) {
2253                udf_warn(sb, "No partition found (2)\n");
2254                ret = -EINVAL;
2255                goto error_out;
2256        }
2257
2258        if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2259                        UDF_PART_FLAG_READ_ONLY) {
2260                if (!sb_rdonly(sb)) {
2261                        ret = -EACCES;
2262                        goto error_out;
2263                }
2264                UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2265        }
2266
2267        ret = udf_find_fileset(sb, &fileset, &rootdir);
2268        if (ret < 0) {
2269                udf_warn(sb, "No fileset found\n");
2270                goto error_out;
2271        }
2272
2273        if (!silent) {
2274                struct timestamp ts;
2275                udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2276                udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2277                         sbi->s_volume_ident,
2278                         le16_to_cpu(ts.year), ts.month, ts.day,
2279                         ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2280        }
2281        if (!sb_rdonly(sb)) {
2282                udf_open_lvid(sb);
2283                lvid_open = true;
2284        }
2285
2286        /* Assign the root inode */
2287        /* assign inodes by physical block number */
2288        /* perhaps it's not extensible enough, but for now ... */
2289        inode = udf_iget(sb, &rootdir);
2290        if (IS_ERR(inode)) {
2291                udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
2292                       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2293                ret = PTR_ERR(inode);
2294                goto error_out;
2295        }
2296
2297        /* Allocate a dentry for the root inode */
2298        sb->s_root = d_make_root(inode);
2299        if (!sb->s_root) {
2300                udf_err(sb, "Couldn't allocate root dentry\n");
2301                ret = -ENOMEM;
2302                goto error_out;
2303        }
2304        sb->s_maxbytes = MAX_LFS_FILESIZE;
2305        sb->s_max_links = UDF_MAX_LINKS;
2306        return 0;
2307
2308error_out:
2309        iput(sbi->s_vat_inode);
2310parse_options_failure:
2311        if (uopt.nls_map)
2312                unload_nls(uopt.nls_map);
2313        if (lvid_open)
2314                udf_close_lvid(sb);
2315        brelse(sbi->s_lvid_bh);
2316        udf_sb_free_partitions(sb);
2317        kfree(sbi);
2318        sb->s_fs_info = NULL;
2319
2320        return ret;
2321}
2322
2323void _udf_err(struct super_block *sb, const char *function,
2324              const char *fmt, ...)
2325{
2326        struct va_format vaf;
2327        va_list args;
2328
2329        va_start(args, fmt);
2330
2331        vaf.fmt = fmt;
2332        vaf.va = &args;
2333
2334        pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2335
2336        va_end(args);
2337}
2338
2339void _udf_warn(struct super_block *sb, const char *function,
2340               const char *fmt, ...)
2341{
2342        struct va_format vaf;
2343        va_list args;
2344
2345        va_start(args, fmt);
2346
2347        vaf.fmt = fmt;
2348        vaf.va = &args;
2349
2350        pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2351
2352        va_end(args);
2353}
2354
2355static void udf_put_super(struct super_block *sb)
2356{
2357        struct udf_sb_info *sbi;
2358
2359        sbi = UDF_SB(sb);
2360
2361        iput(sbi->s_vat_inode);
2362        if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2363                unload_nls(sbi->s_nls_map);
2364        if (!sb_rdonly(sb))
2365                udf_close_lvid(sb);
2366        brelse(sbi->s_lvid_bh);
2367        udf_sb_free_partitions(sb);
2368        mutex_destroy(&sbi->s_alloc_mutex);
2369        kfree(sb->s_fs_info);
2370        sb->s_fs_info = NULL;
2371}
2372
2373static int udf_sync_fs(struct super_block *sb, int wait)
2374{
2375        struct udf_sb_info *sbi = UDF_SB(sb);
2376
2377        mutex_lock(&sbi->s_alloc_mutex);
2378        if (sbi->s_lvid_dirty) {
2379                struct buffer_head *bh = sbi->s_lvid_bh;
2380                struct logicalVolIntegrityDesc *lvid;
2381
2382                lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2383                udf_finalize_lvid(lvid);
2384
2385                /*
2386                 * Blockdevice will be synced later so we don't have to submit
2387                 * the buffer for IO
2388                 */
2389                mark_buffer_dirty(bh);
2390                sbi->s_lvid_dirty = 0;
2391        }
2392        mutex_unlock(&sbi->s_alloc_mutex);
2393
2394        return 0;
2395}
2396
2397static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2398{
2399        struct super_block *sb = dentry->d_sb;
2400        struct udf_sb_info *sbi = UDF_SB(sb);
2401        struct logicalVolIntegrityDescImpUse *lvidiu;
2402        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2403
2404        lvidiu = udf_sb_lvidiu(sb);
2405        buf->f_type = UDF_SUPER_MAGIC;
2406        buf->f_bsize = sb->s_blocksize;
2407        buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2408        buf->f_bfree = udf_count_free(sb);
2409        buf->f_bavail = buf->f_bfree;
2410        /*
2411         * Let's pretend each free block is also a free 'inode' since UDF does
2412         * not have separate preallocated table of inodes.
2413         */
2414        buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2415                                          le32_to_cpu(lvidiu->numDirs)) : 0)
2416                        + buf->f_bfree;
2417        buf->f_ffree = buf->f_bfree;
2418        buf->f_namelen = UDF_NAME_LEN;
2419        buf->f_fsid = u64_to_fsid(id);
2420
2421        return 0;
2422}
2423
2424static unsigned int udf_count_free_bitmap(struct super_block *sb,
2425                                          struct udf_bitmap *bitmap)
2426{
2427        struct buffer_head *bh = NULL;
2428        unsigned int accum = 0;
2429        int index;
2430        udf_pblk_t block = 0, newblock;
2431        struct kernel_lb_addr loc;
2432        uint32_t bytes;
2433        uint8_t *ptr;
2434        uint16_t ident;
2435        struct spaceBitmapDesc *bm;
2436
2437        loc.logicalBlockNum = bitmap->s_extPosition;
2438        loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2439        bh = udf_read_ptagged(sb, &loc, 0, &ident);
2440
2441        if (!bh) {
2442                udf_err(sb, "udf_count_free failed\n");
2443                goto out;
2444        } else if (ident != TAG_IDENT_SBD) {
2445                brelse(bh);
2446                udf_err(sb, "udf_count_free failed\n");
2447                goto out;
2448        }
2449
2450        bm = (struct spaceBitmapDesc *)bh->b_data;
2451        bytes = le32_to_cpu(bm->numOfBytes);
2452        index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2453        ptr = (uint8_t *)bh->b_data;
2454
2455        while (bytes > 0) {
2456                u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2457                accum += bitmap_weight((const unsigned long *)(ptr + index),
2458                                        cur_bytes * 8);
2459                bytes -= cur_bytes;
2460                if (bytes) {
2461                        brelse(bh);
2462                        newblock = udf_get_lb_pblock(sb, &loc, ++block);
2463                        bh = udf_tread(sb, newblock);
2464                        if (!bh) {
2465                                udf_debug("read failed\n");
2466                                goto out;
2467                        }
2468                        index = 0;
2469                        ptr = (uint8_t *)bh->b_data;
2470                }
2471        }
2472        brelse(bh);
2473out:
2474        return accum;
2475}
2476
2477static unsigned int udf_count_free_table(struct super_block *sb,
2478                                         struct inode *table)
2479{
2480        unsigned int accum = 0;
2481        uint32_t elen;
2482        struct kernel_lb_addr eloc;
2483        int8_t etype;
2484        struct extent_position epos;
2485
2486        mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2487        epos.block = UDF_I(table)->i_location;
2488        epos.offset = sizeof(struct unallocSpaceEntry);
2489        epos.bh = NULL;
2490
2491        while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
2492                accum += (elen >> table->i_sb->s_blocksize_bits);
2493
2494        brelse(epos.bh);
2495        mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2496
2497        return accum;
2498}
2499
2500static unsigned int udf_count_free(struct super_block *sb)
2501{
2502        unsigned int accum = 0;
2503        struct udf_sb_info *sbi = UDF_SB(sb);
2504        struct udf_part_map *map;
2505        unsigned int part = sbi->s_partition;
2506        int ptype = sbi->s_partmaps[part].s_partition_type;
2507
2508        if (ptype == UDF_METADATA_MAP25) {
2509                part = sbi->s_partmaps[part].s_type_specific.s_metadata.
2510                                                        s_phys_partition_ref;
2511        } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
2512                /*
2513                 * Filesystems with VAT are append-only and we cannot write to
2514                 * them. Let's just report 0 here.
2515                 */
2516                return 0;
2517        }
2518
2519        if (sbi->s_lvid_bh) {
2520                struct logicalVolIntegrityDesc *lvid =
2521                        (struct logicalVolIntegrityDesc *)
2522                        sbi->s_lvid_bh->b_data;
2523                if (le32_to_cpu(lvid->numOfPartitions) > part) {
2524                        accum = le32_to_cpu(
2525                                        lvid->freeSpaceTable[part]);
2526                        if (accum == 0xFFFFFFFF)
2527                                accum = 0;
2528                }
2529        }
2530
2531        if (accum)
2532                return accum;
2533
2534        map = &sbi->s_partmaps[part];
2535        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2536                accum += udf_count_free_bitmap(sb,
2537                                               map->s_uspace.s_bitmap);
2538        }
2539        if (accum)
2540                return accum;
2541
2542        if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2543                accum += udf_count_free_table(sb,
2544                                              map->s_uspace.s_table);
2545        }
2546        return accum;
2547}
2548
2549MODULE_AUTHOR("Ben Fennema");
2550MODULE_DESCRIPTION("Universal Disk Format Filesystem");
2551MODULE_LICENSE("GPL");
2552module_init(init_udf_fs)
2553module_exit(exit_udf_fs)
2554