linux/fs/ext4/xattr.c
<<
>>
Prefs
   1/*
   2 * linux/fs/ext4/xattr.c
   3 *
   4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
   5 *
   6 * Fix by Harrison Xing <harrison@mountainviewdata.com>.
   7 * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
   8 * Extended attributes for symlinks and special files added per
   9 *  suggestion of Luka Renko <luka.renko@hermes.si>.
  10 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
  11 *  Red Hat Inc.
  12 * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
  13 *  and Andreas Gruenbacher <agruen@suse.de>.
  14 */
  15
  16/*
  17 * Extended attributes are stored directly in inodes (on file systems with
  18 * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
  19 * field contains the block number if an inode uses an additional block. All
  20 * attributes must fit in the inode and one additional block. Blocks that
  21 * contain the identical set of attributes may be shared among several inodes.
  22 * Identical blocks are detected by keeping a cache of blocks that have
  23 * recently been accessed.
  24 *
  25 * The attributes in inodes and on blocks have a different header; the entries
  26 * are stored in the same format:
  27 *
  28 *   +------------------+
  29 *   | header           |
  30 *   | entry 1          | |
  31 *   | entry 2          | | growing downwards
  32 *   | entry 3          | v
  33 *   | four null bytes  |
  34 *   | . . .            |
  35 *   | value 1          | ^
  36 *   | value 3          | | growing upwards
  37 *   | value 2          | |
  38 *   +------------------+
  39 *
  40 * The header is followed by multiple entry descriptors. In disk blocks, the
  41 * entry descriptors are kept sorted. In inodes, they are unsorted. The
  42 * attribute values are aligned to the end of the block in no specific order.
  43 *
  44 * Locking strategy
  45 * ----------------
  46 * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
  47 * EA blocks are only changed if they are exclusive to an inode, so
  48 * holding xattr_sem also means that nothing but the EA block's reference
  49 * count can change. Multiple writers to the same block are synchronized
  50 * by the buffer lock.
  51 */
  52
  53#include <linux/init.h>
  54#include <linux/fs.h>
  55#include <linux/slab.h>
  56#include <linux/mbcache.h>
  57#include <linux/quotaops.h>
  58#include "ext4_jbd2.h"
  59#include "ext4.h"
  60#include "xattr.h"
  61#include "acl.h"
  62
  63#ifdef EXT4_XATTR_DEBUG
  64# define ea_idebug(inode, f...) do { \
  65                printk(KERN_DEBUG "inode %s:%lu: ", \
  66                        inode->i_sb->s_id, inode->i_ino); \
  67                printk(f); \
  68                printk("\n"); \
  69        } while (0)
  70# define ea_bdebug(bh, f...) do { \
  71                printk(KERN_DEBUG "block %pg:%lu: ",               \
  72                       bh->b_bdev, (unsigned long) bh->b_blocknr); \
  73                printk(f); \
  74                printk("\n"); \
  75        } while (0)
  76#else
  77# define ea_idebug(inode, fmt, ...)     no_printk(fmt, ##__VA_ARGS__)
  78# define ea_bdebug(bh, fmt, ...)        no_printk(fmt, ##__VA_ARGS__)
  79#endif
  80
  81static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
  82static struct buffer_head *ext4_xattr_cache_find(struct inode *,
  83                                                 struct ext4_xattr_header *,
  84                                                 struct mb_cache_entry **);
  85static void ext4_xattr_rehash(struct ext4_xattr_header *,
  86                              struct ext4_xattr_entry *);
  87static int ext4_xattr_list(struct dentry *dentry, char *buffer,
  88                           size_t buffer_size);
  89
  90static const struct xattr_handler *ext4_xattr_handler_map[] = {
  91        [EXT4_XATTR_INDEX_USER]              = &ext4_xattr_user_handler,
  92#ifdef CONFIG_EXT4_FS_POSIX_ACL
  93        [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
  94        [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
  95#endif
  96        [EXT4_XATTR_INDEX_TRUSTED]           = &ext4_xattr_trusted_handler,
  97#ifdef CONFIG_EXT4_FS_SECURITY
  98        [EXT4_XATTR_INDEX_SECURITY]          = &ext4_xattr_security_handler,
  99#endif
 100};
 101
 102const struct xattr_handler *ext4_xattr_handlers[] = {
 103        &ext4_xattr_user_handler,
 104        &ext4_xattr_trusted_handler,
 105#ifdef CONFIG_EXT4_FS_POSIX_ACL
 106        &posix_acl_access_xattr_handler,
 107        &posix_acl_default_xattr_handler,
 108#endif
 109#ifdef CONFIG_EXT4_FS_SECURITY
 110        &ext4_xattr_security_handler,
 111#endif
 112        NULL
 113};
 114
 115#define EXT4_GET_MB_CACHE(inode)        (((struct ext4_sb_info *) \
 116                                inode->i_sb->s_fs_info)->s_mb_cache)
 117
 118static __le32 ext4_xattr_block_csum(struct inode *inode,
 119                                    sector_t block_nr,
 120                                    struct ext4_xattr_header *hdr)
 121{
 122        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 123        __u32 csum;
 124        __le32 save_csum;
 125        __le64 dsk_block_nr = cpu_to_le64(block_nr);
 126
 127        save_csum = hdr->h_checksum;
 128        hdr->h_checksum = 0;
 129        csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
 130                           sizeof(dsk_block_nr));
 131        csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
 132                           EXT4_BLOCK_SIZE(inode->i_sb));
 133
 134        hdr->h_checksum = save_csum;
 135        return cpu_to_le32(csum);
 136}
 137
 138static int ext4_xattr_block_csum_verify(struct inode *inode,
 139                                        sector_t block_nr,
 140                                        struct ext4_xattr_header *hdr)
 141{
 142        if (ext4_has_metadata_csum(inode->i_sb) &&
 143            (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
 144                return 0;
 145        return 1;
 146}
 147
 148static void ext4_xattr_block_csum_set(struct inode *inode,
 149                                      sector_t block_nr,
 150                                      struct ext4_xattr_header *hdr)
 151{
 152        if (!ext4_has_metadata_csum(inode->i_sb))
 153                return;
 154
 155        hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
 156}
 157
 158static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
 159                                                struct inode *inode,
 160                                                struct buffer_head *bh)
 161{
 162        ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
 163        return ext4_handle_dirty_metadata(handle, inode, bh);
 164}
 165
 166static inline const struct xattr_handler *
 167ext4_xattr_handler(int name_index)
 168{
 169        const struct xattr_handler *handler = NULL;
 170
 171        if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
 172                handler = ext4_xattr_handler_map[name_index];
 173        return handler;
 174}
 175
 176/*
 177 * Inode operation listxattr()
 178 *
 179 * d_inode(dentry)->i_mutex: don't care
 180 */
 181ssize_t
 182ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
 183{
 184        return ext4_xattr_list(dentry, buffer, size);
 185}
 186
 187static int
 188ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
 189                       void *value_start)
 190{
 191        struct ext4_xattr_entry *e = entry;
 192
 193        while (!IS_LAST_ENTRY(e)) {
 194                struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
 195                if ((void *)next >= end)
 196                        return -EFSCORRUPTED;
 197                e = next;
 198        }
 199
 200        while (!IS_LAST_ENTRY(entry)) {
 201                if (entry->e_value_size != 0 &&
 202                    (value_start + le16_to_cpu(entry->e_value_offs) <
 203                     (void *)e + sizeof(__u32) ||
 204                     value_start + le16_to_cpu(entry->e_value_offs) +
 205                    le32_to_cpu(entry->e_value_size) > end))
 206                        return -EFSCORRUPTED;
 207                entry = EXT4_XATTR_NEXT(entry);
 208        }
 209
 210        return 0;
 211}
 212
 213static inline int
 214ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
 215{
 216        int error;
 217
 218        if (buffer_verified(bh))
 219                return 0;
 220
 221        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
 222            BHDR(bh)->h_blocks != cpu_to_le32(1))
 223                return -EFSCORRUPTED;
 224        if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
 225                return -EFSBADCRC;
 226        error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
 227                                       bh->b_data);
 228        if (!error)
 229                set_buffer_verified(bh);
 230        return error;
 231}
 232
 233static int
 234__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
 235                         void *end, const char *function, unsigned int line)
 236{
 237        struct ext4_xattr_entry *entry = IFIRST(header);
 238        int error = -EFSCORRUPTED;
 239
 240        if (((void *) header >= end) ||
 241            (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
 242                goto errout;
 243        error = ext4_xattr_check_names(entry, end, entry);
 244errout:
 245        if (error)
 246                __ext4_error_inode(inode, function, line, 0,
 247                                   "corrupted in-inode xattr");
 248        return error;
 249}
 250
 251#define xattr_check_inode(inode, header, end) \
 252        __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
 253
 254static inline int
 255ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
 256{
 257        size_t value_size = le32_to_cpu(entry->e_value_size);
 258
 259        if (entry->e_value_block != 0 || value_size > size ||
 260            le16_to_cpu(entry->e_value_offs) + value_size > size)
 261                return -EFSCORRUPTED;
 262        return 0;
 263}
 264
 265static int
 266ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
 267                      const char *name, size_t size, int sorted)
 268{
 269        struct ext4_xattr_entry *entry;
 270        size_t name_len;
 271        int cmp = 1;
 272
 273        if (name == NULL)
 274                return -EINVAL;
 275        name_len = strlen(name);
 276        entry = *pentry;
 277        for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
 278                cmp = name_index - entry->e_name_index;
 279                if (!cmp)
 280                        cmp = name_len - entry->e_name_len;
 281                if (!cmp)
 282                        cmp = memcmp(name, entry->e_name, name_len);
 283                if (cmp <= 0 && (sorted || cmp == 0))
 284                        break;
 285        }
 286        *pentry = entry;
 287        if (!cmp && ext4_xattr_check_entry(entry, size))
 288                return -EFSCORRUPTED;
 289        return cmp ? -ENODATA : 0;
 290}
 291
 292static int
 293ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
 294                     void *buffer, size_t buffer_size)
 295{
 296        struct buffer_head *bh = NULL;
 297        struct ext4_xattr_entry *entry;
 298        size_t size;
 299        int error;
 300        struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 301
 302        ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
 303                  name_index, name, buffer, (long)buffer_size);
 304
 305        error = -ENODATA;
 306        if (!EXT4_I(inode)->i_file_acl)
 307                goto cleanup;
 308        ea_idebug(inode, "reading block %llu",
 309                  (unsigned long long)EXT4_I(inode)->i_file_acl);
 310        bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
 311        if (!bh)
 312                goto cleanup;
 313        ea_bdebug(bh, "b_count=%d, refcount=%d",
 314                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
 315        if (ext4_xattr_check_block(inode, bh)) {
 316bad_block:
 317                EXT4_ERROR_INODE(inode, "bad block %llu",
 318                                 EXT4_I(inode)->i_file_acl);
 319                error = -EFSCORRUPTED;
 320                goto cleanup;
 321        }
 322        ext4_xattr_cache_insert(ext4_mb_cache, bh);
 323        entry = BFIRST(bh);
 324        error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
 325        if (error == -EFSCORRUPTED)
 326                goto bad_block;
 327        if (error)
 328                goto cleanup;
 329        size = le32_to_cpu(entry->e_value_size);
 330        if (buffer) {
 331                error = -ERANGE;
 332                if (size > buffer_size)
 333                        goto cleanup;
 334                memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
 335                       size);
 336        }
 337        error = size;
 338
 339cleanup:
 340        brelse(bh);
 341        return error;
 342}
 343
 344int
 345ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
 346                     void *buffer, size_t buffer_size)
 347{
 348        struct ext4_xattr_ibody_header *header;
 349        struct ext4_xattr_entry *entry;
 350        struct ext4_inode *raw_inode;
 351        struct ext4_iloc iloc;
 352        size_t size;
 353        void *end;
 354        int error;
 355
 356        if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
 357                return -ENODATA;
 358        error = ext4_get_inode_loc(inode, &iloc);
 359        if (error)
 360                return error;
 361        raw_inode = ext4_raw_inode(&iloc);
 362        header = IHDR(inode, raw_inode);
 363        entry = IFIRST(header);
 364        end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
 365        error = xattr_check_inode(inode, header, end);
 366        if (error)
 367                goto cleanup;
 368        error = ext4_xattr_find_entry(&entry, name_index, name,
 369                                      end - (void *)entry, 0);
 370        if (error)
 371                goto cleanup;
 372        size = le32_to_cpu(entry->e_value_size);
 373        if (buffer) {
 374                error = -ERANGE;
 375                if (size > buffer_size)
 376                        goto cleanup;
 377                memcpy(buffer, (void *)IFIRST(header) +
 378                       le16_to_cpu(entry->e_value_offs), size);
 379        }
 380        error = size;
 381
 382cleanup:
 383        brelse(iloc.bh);
 384        return error;
 385}
 386
 387/*
 388 * ext4_xattr_get()
 389 *
 390 * Copy an extended attribute into the buffer
 391 * provided, or compute the buffer size required.
 392 * Buffer is NULL to compute the size of the buffer required.
 393 *
 394 * Returns a negative error number on failure, or the number of bytes
 395 * used / required on success.
 396 */
 397int
 398ext4_xattr_get(struct inode *inode, int name_index, const char *name,
 399               void *buffer, size_t buffer_size)
 400{
 401        int error;
 402
 403        if (strlen(name) > 255)
 404                return -ERANGE;
 405
 406        down_read(&EXT4_I(inode)->xattr_sem);
 407        error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
 408                                     buffer_size);
 409        if (error == -ENODATA)
 410                error = ext4_xattr_block_get(inode, name_index, name, buffer,
 411                                             buffer_size);
 412        up_read(&EXT4_I(inode)->xattr_sem);
 413        return error;
 414}
 415
 416static int
 417ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
 418                        char *buffer, size_t buffer_size)
 419{
 420        size_t rest = buffer_size;
 421
 422        for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
 423                const struct xattr_handler *handler =
 424                        ext4_xattr_handler(entry->e_name_index);
 425
 426                if (handler && (!handler->list || handler->list(dentry))) {
 427                        const char *prefix = handler->prefix ?: handler->name;
 428                        size_t prefix_len = strlen(prefix);
 429                        size_t size = prefix_len + entry->e_name_len + 1;
 430
 431                        if (buffer) {
 432                                if (size > rest)
 433                                        return -ERANGE;
 434                                memcpy(buffer, prefix, prefix_len);
 435                                buffer += prefix_len;
 436                                memcpy(buffer, entry->e_name, entry->e_name_len);
 437                                buffer += entry->e_name_len;
 438                                *buffer++ = 0;
 439                        }
 440                        rest -= size;
 441                }
 442        }
 443        return buffer_size - rest;  /* total size */
 444}
 445
 446static int
 447ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
 448{
 449        struct inode *inode = d_inode(dentry);
 450        struct buffer_head *bh = NULL;
 451        int error;
 452        struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 453
 454        ea_idebug(inode, "buffer=%p, buffer_size=%ld",
 455                  buffer, (long)buffer_size);
 456
 457        error = 0;
 458        if (!EXT4_I(inode)->i_file_acl)
 459                goto cleanup;
 460        ea_idebug(inode, "reading block %llu",
 461                  (unsigned long long)EXT4_I(inode)->i_file_acl);
 462        bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
 463        error = -EIO;
 464        if (!bh)
 465                goto cleanup;
 466        ea_bdebug(bh, "b_count=%d, refcount=%d",
 467                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
 468        if (ext4_xattr_check_block(inode, bh)) {
 469                EXT4_ERROR_INODE(inode, "bad block %llu",
 470                                 EXT4_I(inode)->i_file_acl);
 471                error = -EFSCORRUPTED;
 472                goto cleanup;
 473        }
 474        ext4_xattr_cache_insert(ext4_mb_cache, bh);
 475        error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
 476
 477cleanup:
 478        brelse(bh);
 479
 480        return error;
 481}
 482
 483static int
 484ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
 485{
 486        struct inode *inode = d_inode(dentry);
 487        struct ext4_xattr_ibody_header *header;
 488        struct ext4_inode *raw_inode;
 489        struct ext4_iloc iloc;
 490        void *end;
 491        int error;
 492
 493        if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
 494                return 0;
 495        error = ext4_get_inode_loc(inode, &iloc);
 496        if (error)
 497                return error;
 498        raw_inode = ext4_raw_inode(&iloc);
 499        header = IHDR(inode, raw_inode);
 500        end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
 501        error = xattr_check_inode(inode, header, end);
 502        if (error)
 503                goto cleanup;
 504        error = ext4_xattr_list_entries(dentry, IFIRST(header),
 505                                        buffer, buffer_size);
 506
 507cleanup:
 508        brelse(iloc.bh);
 509        return error;
 510}
 511
 512/*
 513 * ext4_xattr_list()
 514 *
 515 * Copy a list of attribute names into the buffer
 516 * provided, or compute the buffer size required.
 517 * Buffer is NULL to compute the size of the buffer required.
 518 *
 519 * Returns a negative error number on failure, or the number of bytes
 520 * used / required on success.
 521 */
 522static int
 523ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
 524{
 525        int ret, ret2;
 526
 527        down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
 528        ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
 529        if (ret < 0)
 530                goto errout;
 531        if (buffer) {
 532                buffer += ret;
 533                buffer_size -= ret;
 534        }
 535        ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
 536        if (ret < 0)
 537                goto errout;
 538        ret += ret2;
 539errout:
 540        up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
 541        return ret;
 542}
 543
 544/*
 545 * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
 546 * not set, set it.
 547 */
 548static void ext4_xattr_update_super_block(handle_t *handle,
 549                                          struct super_block *sb)
 550{
 551        if (ext4_has_feature_xattr(sb))
 552                return;
 553
 554        BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
 555        if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
 556                ext4_set_feature_xattr(sb);
 557                ext4_handle_dirty_super(handle, sb);
 558        }
 559}
 560
 561/*
 562 * Release the xattr block BH: If the reference count is > 1, decrement it;
 563 * otherwise free the block.
 564 */
 565static void
 566ext4_xattr_release_block(handle_t *handle, struct inode *inode,
 567                         struct buffer_head *bh)
 568{
 569        struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 570        u32 hash, ref;
 571        int error = 0;
 572
 573        BUFFER_TRACE(bh, "get_write_access");
 574        error = ext4_journal_get_write_access(handle, bh);
 575        if (error)
 576                goto out;
 577
 578        lock_buffer(bh);
 579        hash = le32_to_cpu(BHDR(bh)->h_hash);
 580        ref = le32_to_cpu(BHDR(bh)->h_refcount);
 581        if (ref == 1) {
 582                ea_bdebug(bh, "refcount now=0; freeing");
 583                /*
 584                 * This must happen under buffer lock for
 585                 * ext4_xattr_block_set() to reliably detect freed block
 586                 */
 587                mb_cache_entry_delete_block(ext4_mb_cache, hash, bh->b_blocknr);
 588                get_bh(bh);
 589                unlock_buffer(bh);
 590                ext4_free_blocks(handle, inode, bh, 0, 1,
 591                                 EXT4_FREE_BLOCKS_METADATA |
 592                                 EXT4_FREE_BLOCKS_FORGET);
 593        } else {
 594                ref--;
 595                BHDR(bh)->h_refcount = cpu_to_le32(ref);
 596                if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
 597                        struct mb_cache_entry *ce;
 598
 599                        ce = mb_cache_entry_get(ext4_mb_cache, hash,
 600                                                bh->b_blocknr);
 601                        if (ce) {
 602                                ce->e_reusable = 1;
 603                                mb_cache_entry_put(ext4_mb_cache, ce);
 604                        }
 605                }
 606
 607                /*
 608                 * Beware of this ugliness: Releasing of xattr block references
 609                 * from different inodes can race and so we have to protect
 610                 * from a race where someone else frees the block (and releases
 611                 * its journal_head) before we are done dirtying the buffer. In
 612                 * nojournal mode this race is harmless and we actually cannot
 613                 * call ext4_handle_dirty_xattr_block() with locked buffer as
 614                 * that function can call sync_dirty_buffer() so for that case
 615                 * we handle the dirtying after unlocking the buffer.
 616                 */
 617                if (ext4_handle_valid(handle))
 618                        error = ext4_handle_dirty_xattr_block(handle, inode,
 619                                                              bh);
 620                unlock_buffer(bh);
 621                if (!ext4_handle_valid(handle))
 622                        error = ext4_handle_dirty_xattr_block(handle, inode,
 623                                                              bh);
 624                if (IS_SYNC(inode))
 625                        ext4_handle_sync(handle);
 626                dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
 627                ea_bdebug(bh, "refcount now=%d; releasing",
 628                          le32_to_cpu(BHDR(bh)->h_refcount));
 629        }
 630out:
 631        ext4_std_error(inode->i_sb, error);
 632        return;
 633}
 634
 635/*
 636 * Find the available free space for EAs. This also returns the total number of
 637 * bytes used by EA entries.
 638 */
 639static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
 640                                    size_t *min_offs, void *base, int *total)
 641{
 642        for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
 643                if (!last->e_value_block && last->e_value_size) {
 644                        size_t offs = le16_to_cpu(last->e_value_offs);
 645                        if (offs < *min_offs)
 646                                *min_offs = offs;
 647                }
 648                if (total)
 649                        *total += EXT4_XATTR_LEN(last->e_name_len);
 650        }
 651        return (*min_offs - ((void *)last - base) - sizeof(__u32));
 652}
 653
 654static int
 655ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
 656{
 657        struct ext4_xattr_entry *last;
 658        size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
 659
 660        /* Compute min_offs and last. */
 661        last = s->first;
 662        for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
 663                if (!last->e_value_block && last->e_value_size) {
 664                        size_t offs = le16_to_cpu(last->e_value_offs);
 665                        if (offs < min_offs)
 666                                min_offs = offs;
 667                }
 668        }
 669        free = min_offs - ((void *)last - s->base) - sizeof(__u32);
 670        if (!s->not_found) {
 671                if (!s->here->e_value_block && s->here->e_value_size) {
 672                        size_t size = le32_to_cpu(s->here->e_value_size);
 673                        free += EXT4_XATTR_SIZE(size);
 674                }
 675                free += EXT4_XATTR_LEN(name_len);
 676        }
 677        if (i->value) {
 678                if (free < EXT4_XATTR_LEN(name_len) +
 679                           EXT4_XATTR_SIZE(i->value_len))
 680                        return -ENOSPC;
 681        }
 682
 683        if (i->value && s->not_found) {
 684                /* Insert the new name. */
 685                size_t size = EXT4_XATTR_LEN(name_len);
 686                size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
 687                memmove((void *)s->here + size, s->here, rest);
 688                memset(s->here, 0, size);
 689                s->here->e_name_index = i->name_index;
 690                s->here->e_name_len = name_len;
 691                memcpy(s->here->e_name, i->name, name_len);
 692        } else {
 693                if (!s->here->e_value_block && s->here->e_value_size) {
 694                        void *first_val = s->base + min_offs;
 695                        size_t offs = le16_to_cpu(s->here->e_value_offs);
 696                        void *val = s->base + offs;
 697                        size_t size = EXT4_XATTR_SIZE(
 698                                le32_to_cpu(s->here->e_value_size));
 699
 700                        if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) {
 701                                /* The old and the new value have the same
 702                                   size. Just replace. */
 703                                s->here->e_value_size =
 704                                        cpu_to_le32(i->value_len);
 705                                if (i->value == EXT4_ZERO_XATTR_VALUE) {
 706                                        memset(val, 0, size);
 707                                } else {
 708                                        /* Clear pad bytes first. */
 709                                        memset(val + size - EXT4_XATTR_PAD, 0,
 710                                               EXT4_XATTR_PAD);
 711                                        memcpy(val, i->value, i->value_len);
 712                                }
 713                                return 0;
 714                        }
 715
 716                        /* Remove the old value. */
 717                        memmove(first_val + size, first_val, val - first_val);
 718                        memset(first_val, 0, size);
 719                        s->here->e_value_size = 0;
 720                        s->here->e_value_offs = 0;
 721                        min_offs += size;
 722
 723                        /* Adjust all value offsets. */
 724                        last = s->first;
 725                        while (!IS_LAST_ENTRY(last)) {
 726                                size_t o = le16_to_cpu(last->e_value_offs);
 727                                if (!last->e_value_block &&
 728                                    last->e_value_size && o < offs)
 729                                        last->e_value_offs =
 730                                                cpu_to_le16(o + size);
 731                                last = EXT4_XATTR_NEXT(last);
 732                        }
 733                }
 734                if (!i->value) {
 735                        /* Remove the old name. */
 736                        size_t size = EXT4_XATTR_LEN(name_len);
 737                        last = ENTRY((void *)last - size);
 738                        memmove(s->here, (void *)s->here + size,
 739                                (void *)last - (void *)s->here + sizeof(__u32));
 740                        memset(last, 0, size);
 741                }
 742        }
 743
 744        if (i->value) {
 745                /* Insert the new value. */
 746                s->here->e_value_size = cpu_to_le32(i->value_len);
 747                if (i->value_len) {
 748                        size_t size = EXT4_XATTR_SIZE(i->value_len);
 749                        void *val = s->base + min_offs - size;
 750                        s->here->e_value_offs = cpu_to_le16(min_offs - size);
 751                        if (i->value == EXT4_ZERO_XATTR_VALUE) {
 752                                memset(val, 0, size);
 753                        } else {
 754                                /* Clear the pad bytes first. */
 755                                memset(val + size - EXT4_XATTR_PAD, 0,
 756                                       EXT4_XATTR_PAD);
 757                                memcpy(val, i->value, i->value_len);
 758                        }
 759                }
 760        }
 761        return 0;
 762}
 763
 764struct ext4_xattr_block_find {
 765        struct ext4_xattr_search s;
 766        struct buffer_head *bh;
 767};
 768
 769static int
 770ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
 771                      struct ext4_xattr_block_find *bs)
 772{
 773        struct super_block *sb = inode->i_sb;
 774        int error;
 775
 776        ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
 777                  i->name_index, i->name, i->value, (long)i->value_len);
 778
 779        if (EXT4_I(inode)->i_file_acl) {
 780                /* The inode already has an extended attribute block. */
 781                bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
 782                error = -EIO;
 783                if (!bs->bh)
 784                        goto cleanup;
 785                ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
 786                        atomic_read(&(bs->bh->b_count)),
 787                        le32_to_cpu(BHDR(bs->bh)->h_refcount));
 788                if (ext4_xattr_check_block(inode, bs->bh)) {
 789                        EXT4_ERROR_INODE(inode, "bad block %llu",
 790                                         EXT4_I(inode)->i_file_acl);
 791                        error = -EFSCORRUPTED;
 792                        goto cleanup;
 793                }
 794                /* Find the named attribute. */
 795                bs->s.base = BHDR(bs->bh);
 796                bs->s.first = BFIRST(bs->bh);
 797                bs->s.end = bs->bh->b_data + bs->bh->b_size;
 798                bs->s.here = bs->s.first;
 799                error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
 800                                              i->name, bs->bh->b_size, 1);
 801                if (error && error != -ENODATA)
 802                        goto cleanup;
 803                bs->s.not_found = error;
 804        }
 805        error = 0;
 806
 807cleanup:
 808        return error;
 809}
 810
 811static int
 812ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 813                     struct ext4_xattr_info *i,
 814                     struct ext4_xattr_block_find *bs)
 815{
 816        struct super_block *sb = inode->i_sb;
 817        struct buffer_head *new_bh = NULL;
 818        struct ext4_xattr_search *s = &bs->s;
 819        struct mb_cache_entry *ce = NULL;
 820        int error = 0;
 821        struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 822
 823#define header(x) ((struct ext4_xattr_header *)(x))
 824
 825        if (i->value && i->value_len > sb->s_blocksize)
 826                return -ENOSPC;
 827        if (s->base) {
 828                BUFFER_TRACE(bs->bh, "get_write_access");
 829                error = ext4_journal_get_write_access(handle, bs->bh);
 830                if (error)
 831                        goto cleanup;
 832                lock_buffer(bs->bh);
 833
 834                if (header(s->base)->h_refcount == cpu_to_le32(1)) {
 835                        __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
 836
 837                        /*
 838                         * This must happen under buffer lock for
 839                         * ext4_xattr_block_set() to reliably detect modified
 840                         * block
 841                         */
 842                        mb_cache_entry_delete_block(ext4_mb_cache, hash,
 843                                                    bs->bh->b_blocknr);
 844                        ea_bdebug(bs->bh, "modifying in-place");
 845                        error = ext4_xattr_set_entry(i, s);
 846                        if (!error) {
 847                                if (!IS_LAST_ENTRY(s->first))
 848                                        ext4_xattr_rehash(header(s->base),
 849                                                          s->here);
 850                                ext4_xattr_cache_insert(ext4_mb_cache,
 851                                        bs->bh);
 852                        }
 853                        unlock_buffer(bs->bh);
 854                        if (error == -EFSCORRUPTED)
 855                                goto bad_block;
 856                        if (!error)
 857                                error = ext4_handle_dirty_xattr_block(handle,
 858                                                                      inode,
 859                                                                      bs->bh);
 860                        if (error)
 861                                goto cleanup;
 862                        goto inserted;
 863                } else {
 864                        int offset = (char *)s->here - bs->bh->b_data;
 865
 866                        unlock_buffer(bs->bh);
 867                        ea_bdebug(bs->bh, "cloning");
 868                        s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
 869                        error = -ENOMEM;
 870                        if (s->base == NULL)
 871                                goto cleanup;
 872                        memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
 873                        s->first = ENTRY(header(s->base)+1);
 874                        header(s->base)->h_refcount = cpu_to_le32(1);
 875                        s->here = ENTRY(s->base + offset);
 876                        s->end = s->base + bs->bh->b_size;
 877                }
 878        } else {
 879                /* Allocate a buffer where we construct the new block. */
 880                s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
 881                /* assert(header == s->base) */
 882                error = -ENOMEM;
 883                if (s->base == NULL)
 884                        goto cleanup;
 885                header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
 886                header(s->base)->h_blocks = cpu_to_le32(1);
 887                header(s->base)->h_refcount = cpu_to_le32(1);
 888                s->first = ENTRY(header(s->base)+1);
 889                s->here = ENTRY(header(s->base)+1);
 890                s->end = s->base + sb->s_blocksize;
 891        }
 892
 893        error = ext4_xattr_set_entry(i, s);
 894        if (error == -EFSCORRUPTED)
 895                goto bad_block;
 896        if (error)
 897                goto cleanup;
 898        if (!IS_LAST_ENTRY(s->first))
 899                ext4_xattr_rehash(header(s->base), s->here);
 900
 901inserted:
 902        if (!IS_LAST_ENTRY(s->first)) {
 903                new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce);
 904                if (new_bh) {
 905                        /* We found an identical block in the cache. */
 906                        if (new_bh == bs->bh)
 907                                ea_bdebug(new_bh, "keeping");
 908                        else {
 909                                u32 ref;
 910
 911                                /* The old block is released after updating
 912                                   the inode. */
 913                                error = dquot_alloc_block(inode,
 914                                                EXT4_C2B(EXT4_SB(sb), 1));
 915                                if (error)
 916                                        goto cleanup;
 917                                BUFFER_TRACE(new_bh, "get_write_access");
 918                                error = ext4_journal_get_write_access(handle,
 919                                                                      new_bh);
 920                                if (error)
 921                                        goto cleanup_dquot;
 922                                lock_buffer(new_bh);
 923                                /*
 924                                 * We have to be careful about races with
 925                                 * freeing, rehashing or adding references to
 926                                 * xattr block. Once we hold buffer lock xattr
 927                                 * block's state is stable so we can check
 928                                 * whether the block got freed / rehashed or
 929                                 * not.  Since we unhash mbcache entry under
 930                                 * buffer lock when freeing / rehashing xattr
 931                                 * block, checking whether entry is still
 932                                 * hashed is reliable. Same rules hold for
 933                                 * e_reusable handling.
 934                                 */
 935                                if (hlist_bl_unhashed(&ce->e_hash_list) ||
 936                                    !ce->e_reusable) {
 937                                        /*
 938                                         * Undo everything and check mbcache
 939                                         * again.
 940                                         */
 941                                        unlock_buffer(new_bh);
 942                                        dquot_free_block(inode,
 943                                                         EXT4_C2B(EXT4_SB(sb),
 944                                                                  1));
 945                                        brelse(new_bh);
 946                                        mb_cache_entry_put(ext4_mb_cache, ce);
 947                                        ce = NULL;
 948                                        new_bh = NULL;
 949                                        goto inserted;
 950                                }
 951                                ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
 952                                BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
 953                                if (ref >= EXT4_XATTR_REFCOUNT_MAX)
 954                                        ce->e_reusable = 0;
 955                                ea_bdebug(new_bh, "reusing; refcount now=%d",
 956                                          ref);
 957                                unlock_buffer(new_bh);
 958                                error = ext4_handle_dirty_xattr_block(handle,
 959                                                                      inode,
 960                                                                      new_bh);
 961                                if (error)
 962                                        goto cleanup_dquot;
 963                        }
 964                        mb_cache_entry_touch(ext4_mb_cache, ce);
 965                        mb_cache_entry_put(ext4_mb_cache, ce);
 966                        ce = NULL;
 967                } else if (bs->bh && s->base == bs->bh->b_data) {
 968                        /* We were modifying this block in-place. */
 969                        ea_bdebug(bs->bh, "keeping this block");
 970                        new_bh = bs->bh;
 971                        get_bh(new_bh);
 972                } else {
 973                        /* We need to allocate a new block */
 974                        ext4_fsblk_t goal, block;
 975
 976                        goal = ext4_group_first_block_no(sb,
 977                                                EXT4_I(inode)->i_block_group);
 978
 979                        /* non-extent files can't have physical blocks past 2^32 */
 980                        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 981                                goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
 982
 983                        block = ext4_new_meta_blocks(handle, inode, goal, 0,
 984                                                     NULL, &error);
 985                        if (error)
 986                                goto cleanup;
 987
 988                        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 989                                BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
 990
 991                        ea_idebug(inode, "creating block %llu",
 992                                  (unsigned long long)block);
 993
 994                        new_bh = sb_getblk(sb, block);
 995                        if (unlikely(!new_bh)) {
 996                                error = -ENOMEM;
 997getblk_failed:
 998                                ext4_free_blocks(handle, inode, NULL, block, 1,
 999                                                 EXT4_FREE_BLOCKS_METADATA);
1000                                goto cleanup;
1001                        }
1002                        lock_buffer(new_bh);
1003                        error = ext4_journal_get_create_access(handle, new_bh);
1004                        if (error) {
1005                                unlock_buffer(new_bh);
1006                                error = -EIO;
1007                                goto getblk_failed;
1008                        }
1009                        memcpy(new_bh->b_data, s->base, new_bh->b_size);
1010                        set_buffer_uptodate(new_bh);
1011                        unlock_buffer(new_bh);
1012                        ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
1013                        error = ext4_handle_dirty_xattr_block(handle,
1014                                                              inode, new_bh);
1015                        if (error)
1016                                goto cleanup;
1017                }
1018        }
1019
1020        /* Update the inode. */
1021        EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
1022
1023        /* Drop the previous xattr block. */
1024        if (bs->bh && bs->bh != new_bh)
1025                ext4_xattr_release_block(handle, inode, bs->bh);
1026        error = 0;
1027
1028cleanup:
1029        if (ce)
1030                mb_cache_entry_put(ext4_mb_cache, ce);
1031        brelse(new_bh);
1032        if (!(bs->bh && s->base == bs->bh->b_data))
1033                kfree(s->base);
1034
1035        return error;
1036
1037cleanup_dquot:
1038        dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
1039        goto cleanup;
1040
1041bad_block:
1042        EXT4_ERROR_INODE(inode, "bad block %llu",
1043                         EXT4_I(inode)->i_file_acl);
1044        goto cleanup;
1045
1046#undef header
1047}
1048
1049int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
1050                          struct ext4_xattr_ibody_find *is)
1051{
1052        struct ext4_xattr_ibody_header *header;
1053        struct ext4_inode *raw_inode;
1054        int error;
1055
1056        if (EXT4_I(inode)->i_extra_isize == 0)
1057                return 0;
1058        raw_inode = ext4_raw_inode(&is->iloc);
1059        header = IHDR(inode, raw_inode);
1060        is->s.base = is->s.first = IFIRST(header);
1061        is->s.here = is->s.first;
1062        is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
1063        if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
1064                error = xattr_check_inode(inode, header, is->s.end);
1065                if (error)
1066                        return error;
1067                /* Find the named attribute. */
1068                error = ext4_xattr_find_entry(&is->s.here, i->name_index,
1069                                              i->name, is->s.end -
1070                                              (void *)is->s.base, 0);
1071                if (error && error != -ENODATA)
1072                        return error;
1073                is->s.not_found = error;
1074        }
1075        return 0;
1076}
1077
1078int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
1079                                struct ext4_xattr_info *i,
1080                                struct ext4_xattr_ibody_find *is)
1081{
1082        struct ext4_xattr_ibody_header *header;
1083        struct ext4_xattr_search *s = &is->s;
1084        int error;
1085
1086        if (EXT4_I(inode)->i_extra_isize == 0)
1087                return -ENOSPC;
1088        error = ext4_xattr_set_entry(i, s);
1089        if (error) {
1090                if (error == -ENOSPC &&
1091                    ext4_has_inline_data(inode)) {
1092                        error = ext4_try_to_evict_inline_data(handle, inode,
1093                                        EXT4_XATTR_LEN(strlen(i->name) +
1094                                        EXT4_XATTR_SIZE(i->value_len)));
1095                        if (error)
1096                                return error;
1097                        error = ext4_xattr_ibody_find(inode, i, is);
1098                        if (error)
1099                                return error;
1100                        error = ext4_xattr_set_entry(i, s);
1101                }
1102                if (error)
1103                        return error;
1104        }
1105        header = IHDR(inode, ext4_raw_inode(&is->iloc));
1106        if (!IS_LAST_ENTRY(s->first)) {
1107                header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1108                ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1109        } else {
1110                header->h_magic = cpu_to_le32(0);
1111                ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
1112        }
1113        return 0;
1114}
1115
1116static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
1117                                struct ext4_xattr_info *i,
1118                                struct ext4_xattr_ibody_find *is)
1119{
1120        struct ext4_xattr_ibody_header *header;
1121        struct ext4_xattr_search *s = &is->s;
1122        int error;
1123
1124        if (EXT4_I(inode)->i_extra_isize == 0)
1125                return -ENOSPC;
1126        error = ext4_xattr_set_entry(i, s);
1127        if (error)
1128                return error;
1129        header = IHDR(inode, ext4_raw_inode(&is->iloc));
1130        if (!IS_LAST_ENTRY(s->first)) {
1131                header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1132                ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1133        } else {
1134                header->h_magic = cpu_to_le32(0);
1135                ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
1136        }
1137        return 0;
1138}
1139
1140static int ext4_xattr_value_same(struct ext4_xattr_search *s,
1141                                 struct ext4_xattr_info *i)
1142{
1143        void *value;
1144
1145        if (le32_to_cpu(s->here->e_value_size) != i->value_len)
1146                return 0;
1147        value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
1148        return !memcmp(value, i->value, i->value_len);
1149}
1150
1151/*
1152 * ext4_xattr_set_handle()
1153 *
1154 * Create, replace or remove an extended attribute for this inode.  Value
1155 * is NULL to remove an existing extended attribute, and non-NULL to
1156 * either replace an existing extended attribute, or create a new extended
1157 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
1158 * specify that an extended attribute must exist and must not exist
1159 * previous to the call, respectively.
1160 *
1161 * Returns 0, or a negative error number on failure.
1162 */
1163int
1164ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1165                      const char *name, const void *value, size_t value_len,
1166                      int flags)
1167{
1168        struct ext4_xattr_info i = {
1169                .name_index = name_index,
1170                .name = name,
1171                .value = value,
1172                .value_len = value_len,
1173
1174        };
1175        struct ext4_xattr_ibody_find is = {
1176                .s = { .not_found = -ENODATA, },
1177        };
1178        struct ext4_xattr_block_find bs = {
1179                .s = { .not_found = -ENODATA, },
1180        };
1181        unsigned long no_expand;
1182        int error;
1183
1184        if (!name)
1185                return -EINVAL;
1186        if (strlen(name) > 255)
1187                return -ERANGE;
1188        down_write(&EXT4_I(inode)->xattr_sem);
1189        no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
1190        ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
1191
1192        error = ext4_reserve_inode_write(handle, inode, &is.iloc);
1193        if (error)
1194                goto cleanup;
1195
1196        if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
1197                struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
1198                memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
1199                ext4_clear_inode_state(inode, EXT4_STATE_NEW);
1200        }
1201
1202        error = ext4_xattr_ibody_find(inode, &i, &is);
1203        if (error)
1204                goto cleanup;
1205        if (is.s.not_found)
1206                error = ext4_xattr_block_find(inode, &i, &bs);
1207        if (error)
1208                goto cleanup;
1209        if (is.s.not_found && bs.s.not_found) {
1210                error = -ENODATA;
1211                if (flags & XATTR_REPLACE)
1212                        goto cleanup;
1213                error = 0;
1214                if (!value)
1215                        goto cleanup;
1216        } else {
1217                error = -EEXIST;
1218                if (flags & XATTR_CREATE)
1219                        goto cleanup;
1220        }
1221        if (!value) {
1222                if (!is.s.not_found)
1223                        error = ext4_xattr_ibody_set(handle, inode, &i, &is);
1224                else if (!bs.s.not_found)
1225                        error = ext4_xattr_block_set(handle, inode, &i, &bs);
1226        } else {
1227                error = 0;
1228                /* Xattr value did not change? Save us some work and bail out */
1229                if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
1230                        goto cleanup;
1231                if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
1232                        goto cleanup;
1233
1234                error = ext4_xattr_ibody_set(handle, inode, &i, &is);
1235                if (!error && !bs.s.not_found) {
1236                        i.value = NULL;
1237                        error = ext4_xattr_block_set(handle, inode, &i, &bs);
1238                } else if (error == -ENOSPC) {
1239                        if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
1240                                error = ext4_xattr_block_find(inode, &i, &bs);
1241                                if (error)
1242                                        goto cleanup;
1243                        }
1244                        error = ext4_xattr_block_set(handle, inode, &i, &bs);
1245                        if (error)
1246                                goto cleanup;
1247                        if (!is.s.not_found) {
1248                                i.value = NULL;
1249                                error = ext4_xattr_ibody_set(handle, inode, &i,
1250                                                             &is);
1251                        }
1252                }
1253        }
1254        if (!error) {
1255                ext4_xattr_update_super_block(handle, inode->i_sb);
1256                inode->i_ctime = ext4_current_time(inode);
1257                if (!value)
1258                        ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1259                error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
1260                /*
1261                 * The bh is consumed by ext4_mark_iloc_dirty, even with
1262                 * error != 0.
1263                 */
1264                is.iloc.bh = NULL;
1265                if (IS_SYNC(inode))
1266                        ext4_handle_sync(handle);
1267        }
1268
1269cleanup:
1270        brelse(is.iloc.bh);
1271        brelse(bs.bh);
1272        if (no_expand == 0)
1273                ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1274        up_write(&EXT4_I(inode)->xattr_sem);
1275        return error;
1276}
1277
1278/*
1279 * ext4_xattr_set()
1280 *
1281 * Like ext4_xattr_set_handle, but start from an inode. This extended
1282 * attribute modification is a filesystem transaction by itself.
1283 *
1284 * Returns 0, or a negative error number on failure.
1285 */
1286int
1287ext4_xattr_set(struct inode *inode, int name_index, const char *name,
1288               const void *value, size_t value_len, int flags)
1289{
1290        handle_t *handle;
1291        int error, retries = 0;
1292        int credits = ext4_jbd2_credits_xattr(inode);
1293
1294retry:
1295        handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
1296        if (IS_ERR(handle)) {
1297                error = PTR_ERR(handle);
1298        } else {
1299                int error2;
1300
1301                error = ext4_xattr_set_handle(handle, inode, name_index, name,
1302                                              value, value_len, flags);
1303                error2 = ext4_journal_stop(handle);
1304                if (error == -ENOSPC &&
1305                    ext4_should_retry_alloc(inode->i_sb, &retries))
1306                        goto retry;
1307                if (error == 0)
1308                        error = error2;
1309        }
1310
1311        return error;
1312}
1313
1314/*
1315 * Shift the EA entries in the inode to create space for the increased
1316 * i_extra_isize.
1317 */
1318static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
1319                                     int value_offs_shift, void *to,
1320                                     void *from, size_t n, int blocksize)
1321{
1322        struct ext4_xattr_entry *last = entry;
1323        int new_offs;
1324
1325        /* Adjust the value offsets of the entries */
1326        for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1327                if (!last->e_value_block && last->e_value_size) {
1328                        new_offs = le16_to_cpu(last->e_value_offs) +
1329                                                        value_offs_shift;
1330                        BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
1331                                 > blocksize);
1332                        last->e_value_offs = cpu_to_le16(new_offs);
1333                }
1334        }
1335        /* Shift the entries by n bytes */
1336        memmove(to, from, n);
1337}
1338
1339/*
1340 * Expand an inode by new_extra_isize bytes when EAs are present.
1341 * Returns 0 on success or negative error number on failure.
1342 */
1343int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
1344                               struct ext4_inode *raw_inode, handle_t *handle)
1345{
1346        struct ext4_xattr_ibody_header *header;
1347        struct ext4_xattr_entry *entry, *last, *first;
1348        struct buffer_head *bh = NULL;
1349        struct ext4_xattr_ibody_find *is = NULL;
1350        struct ext4_xattr_block_find *bs = NULL;
1351        char *buffer = NULL, *b_entry_name = NULL;
1352        size_t min_offs, free;
1353        int total_ino;
1354        void *base, *start, *end;
1355        int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
1356        int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
1357
1358        down_write(&EXT4_I(inode)->xattr_sem);
1359retry:
1360        if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
1361                up_write(&EXT4_I(inode)->xattr_sem);
1362                return 0;
1363        }
1364
1365        header = IHDR(inode, raw_inode);
1366        entry = IFIRST(header);
1367
1368        /*
1369         * Check if enough free space is available in the inode to shift the
1370         * entries ahead by new_extra_isize.
1371         */
1372
1373        base = start = entry;
1374        end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
1375        min_offs = end - base;
1376        last = entry;
1377        total_ino = sizeof(struct ext4_xattr_ibody_header);
1378
1379        error = xattr_check_inode(inode, header, end);
1380        if (error)
1381                goto cleanup;
1382
1383        free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
1384        if (free >= new_extra_isize) {
1385                entry = IFIRST(header);
1386                ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
1387                                - new_extra_isize, (void *)raw_inode +
1388                                EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
1389                                (void *)header, total_ino,
1390                                inode->i_sb->s_blocksize);
1391                EXT4_I(inode)->i_extra_isize = new_extra_isize;
1392                error = 0;
1393                goto cleanup;
1394        }
1395
1396        /*
1397         * Enough free space isn't available in the inode, check if
1398         * EA block can hold new_extra_isize bytes.
1399         */
1400        if (EXT4_I(inode)->i_file_acl) {
1401                bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1402                error = -EIO;
1403                if (!bh)
1404                        goto cleanup;
1405                if (ext4_xattr_check_block(inode, bh)) {
1406                        EXT4_ERROR_INODE(inode, "bad block %llu",
1407                                         EXT4_I(inode)->i_file_acl);
1408                        error = -EFSCORRUPTED;
1409                        goto cleanup;
1410                }
1411                base = BHDR(bh);
1412                first = BFIRST(bh);
1413                end = bh->b_data + bh->b_size;
1414                min_offs = end - base;
1415                free = ext4_xattr_free_space(first, &min_offs, base, NULL);
1416                if (free < new_extra_isize) {
1417                        if (!tried_min_extra_isize && s_min_extra_isize) {
1418                                tried_min_extra_isize++;
1419                                new_extra_isize = s_min_extra_isize;
1420                                brelse(bh);
1421                                goto retry;
1422                        }
1423                        error = -1;
1424                        goto cleanup;
1425                }
1426        } else {
1427                free = inode->i_sb->s_blocksize;
1428        }
1429
1430        while (new_extra_isize > 0) {
1431                size_t offs, size, entry_size;
1432                struct ext4_xattr_entry *small_entry = NULL;
1433                struct ext4_xattr_info i = {
1434                        .value = NULL,
1435                        .value_len = 0,
1436                };
1437                unsigned int total_size;  /* EA entry size + value size */
1438                unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
1439                unsigned int min_total_size = ~0U;
1440
1441                is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
1442                bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
1443                if (!is || !bs) {
1444                        error = -ENOMEM;
1445                        goto cleanup;
1446                }
1447
1448                is->s.not_found = -ENODATA;
1449                bs->s.not_found = -ENODATA;
1450                is->iloc.bh = NULL;
1451                bs->bh = NULL;
1452
1453                last = IFIRST(header);
1454                /* Find the entry best suited to be pushed into EA block */
1455                entry = NULL;
1456                for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1457                        total_size =
1458                        EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
1459                                        EXT4_XATTR_LEN(last->e_name_len);
1460                        if (total_size <= free && total_size < min_total_size) {
1461                                if (total_size < new_extra_isize) {
1462                                        small_entry = last;
1463                                } else {
1464                                        entry = last;
1465                                        min_total_size = total_size;
1466                                }
1467                        }
1468                }
1469
1470                if (entry == NULL) {
1471                        if (small_entry) {
1472                                entry = small_entry;
1473                        } else {
1474                                if (!tried_min_extra_isize &&
1475                                    s_min_extra_isize) {
1476                                        tried_min_extra_isize++;
1477                                        new_extra_isize = s_min_extra_isize;
1478                                        kfree(is); is = NULL;
1479                                        kfree(bs); bs = NULL;
1480                                        brelse(bh);
1481                                        goto retry;
1482                                }
1483                                error = -1;
1484                                goto cleanup;
1485                        }
1486                }
1487                offs = le16_to_cpu(entry->e_value_offs);
1488                size = le32_to_cpu(entry->e_value_size);
1489                entry_size = EXT4_XATTR_LEN(entry->e_name_len);
1490                i.name_index = entry->e_name_index,
1491                buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
1492                b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
1493                if (!buffer || !b_entry_name) {
1494                        error = -ENOMEM;
1495                        goto cleanup;
1496                }
1497                /* Save the entry name and the entry value */
1498                memcpy(buffer, (void *)IFIRST(header) + offs,
1499                       EXT4_XATTR_SIZE(size));
1500                memcpy(b_entry_name, entry->e_name, entry->e_name_len);
1501                b_entry_name[entry->e_name_len] = '\0';
1502                i.name = b_entry_name;
1503
1504                error = ext4_get_inode_loc(inode, &is->iloc);
1505                if (error)
1506                        goto cleanup;
1507
1508                error = ext4_xattr_ibody_find(inode, &i, is);
1509                if (error)
1510                        goto cleanup;
1511
1512                /* Remove the chosen entry from the inode */
1513                error = ext4_xattr_ibody_set(handle, inode, &i, is);
1514                if (error)
1515                        goto cleanup;
1516
1517                entry = IFIRST(header);
1518                if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
1519                        shift_bytes = new_extra_isize;
1520                else
1521                        shift_bytes = entry_size + size;
1522                /* Adjust the offsets and shift the remaining entries ahead */
1523                ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
1524                        shift_bytes, (void *)raw_inode +
1525                        EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
1526                        (void *)header, total_ino - entry_size,
1527                        inode->i_sb->s_blocksize);
1528
1529                extra_isize += shift_bytes;
1530                new_extra_isize -= shift_bytes;
1531                EXT4_I(inode)->i_extra_isize = extra_isize;
1532
1533                i.name = b_entry_name;
1534                i.value = buffer;
1535                i.value_len = size;
1536                error = ext4_xattr_block_find(inode, &i, bs);
1537                if (error)
1538                        goto cleanup;
1539
1540                /* Add entry which was removed from the inode into the block */
1541                error = ext4_xattr_block_set(handle, inode, &i, bs);
1542                if (error)
1543                        goto cleanup;
1544                kfree(b_entry_name);
1545                kfree(buffer);
1546                b_entry_name = NULL;
1547                buffer = NULL;
1548                brelse(is->iloc.bh);
1549                kfree(is);
1550                kfree(bs);
1551        }
1552        brelse(bh);
1553        up_write(&EXT4_I(inode)->xattr_sem);
1554        return 0;
1555
1556cleanup:
1557        kfree(b_entry_name);
1558        kfree(buffer);
1559        if (is)
1560                brelse(is->iloc.bh);
1561        kfree(is);
1562        kfree(bs);
1563        brelse(bh);
1564        up_write(&EXT4_I(inode)->xattr_sem);
1565        return error;
1566}
1567
1568
1569
1570/*
1571 * ext4_xattr_delete_inode()
1572 *
1573 * Free extended attribute resources associated with this inode. This
1574 * is called immediately before an inode is freed. We have exclusive
1575 * access to the inode.
1576 */
1577void
1578ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
1579{
1580        struct buffer_head *bh = NULL;
1581
1582        if (!EXT4_I(inode)->i_file_acl)
1583                goto cleanup;
1584        bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1585        if (!bh) {
1586                EXT4_ERROR_INODE(inode, "block %llu read error",
1587                                 EXT4_I(inode)->i_file_acl);
1588                goto cleanup;
1589        }
1590        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1591            BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1592                EXT4_ERROR_INODE(inode, "bad block %llu",
1593                                 EXT4_I(inode)->i_file_acl);
1594                goto cleanup;
1595        }
1596        ext4_xattr_release_block(handle, inode, bh);
1597        EXT4_I(inode)->i_file_acl = 0;
1598
1599cleanup:
1600        brelse(bh);
1601}
1602
1603/*
1604 * ext4_xattr_cache_insert()
1605 *
1606 * Create a new entry in the extended attribute cache, and insert
1607 * it unless such an entry is already in the cache.
1608 *
1609 * Returns 0, or a negative error number on failure.
1610 */
1611static void
1612ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
1613{
1614        struct ext4_xattr_header *header = BHDR(bh);
1615        __u32 hash = le32_to_cpu(header->h_hash);
1616        int reusable = le32_to_cpu(header->h_refcount) <
1617                       EXT4_XATTR_REFCOUNT_MAX;
1618        int error;
1619
1620        error = mb_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash,
1621                                      bh->b_blocknr, reusable);
1622        if (error) {
1623                if (error == -EBUSY)
1624                        ea_bdebug(bh, "already in cache");
1625        } else
1626                ea_bdebug(bh, "inserting [%x]", (int)hash);
1627}
1628
1629/*
1630 * ext4_xattr_cmp()
1631 *
1632 * Compare two extended attribute blocks for equality.
1633 *
1634 * Returns 0 if the blocks are equal, 1 if they differ, and
1635 * a negative error number on errors.
1636 */
1637static int
1638ext4_xattr_cmp(struct ext4_xattr_header *header1,
1639               struct ext4_xattr_header *header2)
1640{
1641        struct ext4_xattr_entry *entry1, *entry2;
1642
1643        entry1 = ENTRY(header1+1);
1644        entry2 = ENTRY(header2+1);
1645        while (!IS_LAST_ENTRY(entry1)) {
1646                if (IS_LAST_ENTRY(entry2))
1647                        return 1;
1648                if (entry1->e_hash != entry2->e_hash ||
1649                    entry1->e_name_index != entry2->e_name_index ||
1650                    entry1->e_name_len != entry2->e_name_len ||
1651                    entry1->e_value_size != entry2->e_value_size ||
1652                    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
1653                        return 1;
1654                if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
1655                        return -EFSCORRUPTED;
1656                if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
1657                           (char *)header2 + le16_to_cpu(entry2->e_value_offs),
1658                           le32_to_cpu(entry1->e_value_size)))
1659                        return 1;
1660
1661                entry1 = EXT4_XATTR_NEXT(entry1);
1662                entry2 = EXT4_XATTR_NEXT(entry2);
1663        }
1664        if (!IS_LAST_ENTRY(entry2))
1665                return 1;
1666        return 0;
1667}
1668
1669/*
1670 * ext4_xattr_cache_find()
1671 *
1672 * Find an identical extended attribute block.
1673 *
1674 * Returns a pointer to the block found, or NULL if such a block was
1675 * not found or an error occurred.
1676 */
1677static struct buffer_head *
1678ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
1679                      struct mb_cache_entry **pce)
1680{
1681        __u32 hash = le32_to_cpu(header->h_hash);
1682        struct mb_cache_entry *ce;
1683        struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
1684
1685        if (!header->h_hash)
1686                return NULL;  /* never share */
1687        ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
1688        ce = mb_cache_entry_find_first(ext4_mb_cache, hash);
1689        while (ce) {
1690                struct buffer_head *bh;
1691
1692                bh = sb_bread(inode->i_sb, ce->e_block);
1693                if (!bh) {
1694                        EXT4_ERROR_INODE(inode, "block %lu read error",
1695                                         (unsigned long) ce->e_block);
1696                } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
1697                        *pce = ce;
1698                        return bh;
1699                }
1700                brelse(bh);
1701                ce = mb_cache_entry_find_next(ext4_mb_cache, ce);
1702        }
1703        return NULL;
1704}
1705
1706#define NAME_HASH_SHIFT 5
1707#define VALUE_HASH_SHIFT 16
1708
1709/*
1710 * ext4_xattr_hash_entry()
1711 *
1712 * Compute the hash of an extended attribute.
1713 */
1714static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
1715                                         struct ext4_xattr_entry *entry)
1716{
1717        __u32 hash = 0;
1718        char *name = entry->e_name;
1719        int n;
1720
1721        for (n = 0; n < entry->e_name_len; n++) {
1722                hash = (hash << NAME_HASH_SHIFT) ^
1723                       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
1724                       *name++;
1725        }
1726
1727        if (entry->e_value_block == 0 && entry->e_value_size != 0) {
1728                __le32 *value = (__le32 *)((char *)header +
1729                        le16_to_cpu(entry->e_value_offs));
1730                for (n = (le32_to_cpu(entry->e_value_size) +
1731                     EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
1732                        hash = (hash << VALUE_HASH_SHIFT) ^
1733                               (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
1734                               le32_to_cpu(*value++);
1735                }
1736        }
1737        entry->e_hash = cpu_to_le32(hash);
1738}
1739
1740#undef NAME_HASH_SHIFT
1741#undef VALUE_HASH_SHIFT
1742
1743#define BLOCK_HASH_SHIFT 16
1744
1745/*
1746 * ext4_xattr_rehash()
1747 *
1748 * Re-compute the extended attribute hash value after an entry has changed.
1749 */
1750static void ext4_xattr_rehash(struct ext4_xattr_header *header,
1751                              struct ext4_xattr_entry *entry)
1752{
1753        struct ext4_xattr_entry *here;
1754        __u32 hash = 0;
1755
1756        ext4_xattr_hash_entry(header, entry);
1757        here = ENTRY(header+1);
1758        while (!IS_LAST_ENTRY(here)) {
1759                if (!here->e_hash) {
1760                        /* Block is not shared if an entry's hash value == 0 */
1761                        hash = 0;
1762                        break;
1763                }
1764                hash = (hash << BLOCK_HASH_SHIFT) ^
1765                       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
1766                       le32_to_cpu(here->e_hash);
1767                here = EXT4_XATTR_NEXT(here);
1768        }
1769        header->h_hash = cpu_to_le32(hash);
1770}
1771
1772#undef BLOCK_HASH_SHIFT
1773
1774#define HASH_BUCKET_BITS        10
1775
1776struct mb_cache *
1777ext4_xattr_create_cache(void)
1778{
1779        return mb_cache_create(HASH_BUCKET_BITS);
1780}
1781
1782void ext4_xattr_destroy_cache(struct mb_cache *cache)
1783{
1784        if (cache)
1785                mb_cache_destroy(cache);
1786}
1787
1788