linux/fs/ntfs3/attrib.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
   5 *
   6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
   7 */
   8
   9#include <linux/fs.h>
  10#include <linux/slab.h>
  11#include <linux/kernel.h>
  12
  13#include "debug.h"
  14#include "ntfs.h"
  15#include "ntfs_fs.h"
  16
  17/*
  18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
  19 * preallocate algorithm.
  20 */
  21#ifndef NTFS_MIN_LOG2_OF_CLUMP
  22#define NTFS_MIN_LOG2_OF_CLUMP 16
  23#endif
  24
  25#ifndef NTFS_MAX_LOG2_OF_CLUMP
  26#define NTFS_MAX_LOG2_OF_CLUMP 26
  27#endif
  28
  29// 16M
  30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
  31// 16G
  32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
  33
  34static inline u64 get_pre_allocated(u64 size)
  35{
  36        u32 clump;
  37        u8 align_shift;
  38        u64 ret;
  39
  40        if (size <= NTFS_CLUMP_MIN) {
  41                clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
  42                align_shift = NTFS_MIN_LOG2_OF_CLUMP;
  43        } else if (size >= NTFS_CLUMP_MAX) {
  44                clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
  45                align_shift = NTFS_MAX_LOG2_OF_CLUMP;
  46        } else {
  47                align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
  48                              __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
  49                clump = 1u << align_shift;
  50        }
  51
  52        ret = (((size + clump - 1) >> align_shift)) << align_shift;
  53
  54        return ret;
  55}
  56
  57/*
  58 * attr_must_be_resident
  59 *
  60 * Return: True if attribute must be resident.
  61 */
  62static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
  63                                         enum ATTR_TYPE type)
  64{
  65        const struct ATTR_DEF_ENTRY *de;
  66
  67        switch (type) {
  68        case ATTR_STD:
  69        case ATTR_NAME:
  70        case ATTR_ID:
  71        case ATTR_LABEL:
  72        case ATTR_VOL_INFO:
  73        case ATTR_ROOT:
  74        case ATTR_EA_INFO:
  75                return true;
  76        default:
  77                de = ntfs_query_def(sbi, type);
  78                if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
  79                        return true;
  80                return false;
  81        }
  82}
  83
  84/*
  85 * attr_load_runs - Load all runs stored in @attr.
  86 */
  87int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
  88                   struct runs_tree *run, const CLST *vcn)
  89{
  90        int err;
  91        CLST svcn = le64_to_cpu(attr->nres.svcn);
  92        CLST evcn = le64_to_cpu(attr->nres.evcn);
  93        u32 asize;
  94        u16 run_off;
  95
  96        if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
  97                return 0;
  98
  99        if (vcn && (evcn < *vcn || *vcn < svcn))
 100                return -EINVAL;
 101
 102        asize = le32_to_cpu(attr->size);
 103        run_off = le16_to_cpu(attr->nres.run_off);
 104        err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
 105                            vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
 106                            asize - run_off);
 107        if (err < 0)
 108                return err;
 109
 110        return 0;
 111}
 112
 113/*
 114 * run_deallocate_ex - Deallocate clusters.
 115 */
 116static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
 117                             CLST vcn, CLST len, CLST *done, bool trim)
 118{
 119        int err = 0;
 120        CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
 121        size_t idx;
 122
 123        if (!len)
 124                goto out;
 125
 126        if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
 127failed:
 128                run_truncate(run, vcn0);
 129                err = -EINVAL;
 130                goto out;
 131        }
 132
 133        for (;;) {
 134                if (clen > len)
 135                        clen = len;
 136
 137                if (!clen) {
 138                        err = -EINVAL;
 139                        goto out;
 140                }
 141
 142                if (lcn != SPARSE_LCN) {
 143                        mark_as_free_ex(sbi, lcn, clen, trim);
 144                        dn += clen;
 145                }
 146
 147                len -= clen;
 148                if (!len)
 149                        break;
 150
 151                vcn_next = vcn + clen;
 152                if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
 153                    vcn != vcn_next) {
 154                        /* Save memory - don't load entire run. */
 155                        goto failed;
 156                }
 157        }
 158
 159out:
 160        if (done)
 161                *done += dn;
 162
 163        return err;
 164}
 165
 166/*
 167 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
 168 */
 169int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
 170                           CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
 171                           enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
 172                           CLST *new_lcn)
 173{
 174        int err;
 175        CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
 176        struct wnd_bitmap *wnd = &sbi->used.bitmap;
 177        size_t cnt = run->count;
 178
 179        for (;;) {
 180                err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
 181                                               opt);
 182
 183                if (err == -ENOSPC && pre) {
 184                        pre = 0;
 185                        if (*pre_alloc)
 186                                *pre_alloc = 0;
 187                        continue;
 188                }
 189
 190                if (err)
 191                        goto out;
 192
 193                if (new_lcn && vcn == vcn0)
 194                        *new_lcn = lcn;
 195
 196                /* Add new fragment into run storage. */
 197                if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
 198                        /* Undo last 'ntfs_look_for_free_space' */
 199                        down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
 200                        wnd_set_free(wnd, lcn, flen);
 201                        up_write(&wnd->rw_lock);
 202                        err = -ENOMEM;
 203                        goto out;
 204                }
 205
 206                vcn += flen;
 207
 208                if (flen >= len || opt == ALLOCATE_MFT ||
 209                    (fr && run->count - cnt >= fr)) {
 210                        *alen = vcn - vcn0;
 211                        return 0;
 212                }
 213
 214                len -= flen;
 215        }
 216
 217out:
 218        /* Undo 'ntfs_look_for_free_space' */
 219        if (vcn - vcn0) {
 220                run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
 221                run_truncate(run, vcn0);
 222        }
 223
 224        return err;
 225}
 226
 227/*
 228 * attr_make_nonresident
 229 *
 230 * If page is not NULL - it is already contains resident data
 231 * and locked (called from ni_write_frame()).
 232 */
 233int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
 234                          struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 235                          u64 new_size, struct runs_tree *run,
 236                          struct ATTRIB **ins_attr, struct page *page)
 237{
 238        struct ntfs_sb_info *sbi;
 239        struct ATTRIB *attr_s;
 240        struct MFT_REC *rec;
 241        u32 used, asize, rsize, aoff, align;
 242        bool is_data;
 243        CLST len, alen;
 244        char *next;
 245        int err;
 246
 247        if (attr->non_res) {
 248                *ins_attr = attr;
 249                return 0;
 250        }
 251
 252        sbi = mi->sbi;
 253        rec = mi->mrec;
 254        attr_s = NULL;
 255        used = le32_to_cpu(rec->used);
 256        asize = le32_to_cpu(attr->size);
 257        next = Add2Ptr(attr, asize);
 258        aoff = PtrOffset(rec, attr);
 259        rsize = le32_to_cpu(attr->res.data_size);
 260        is_data = attr->type == ATTR_DATA && !attr->name_len;
 261
 262        align = sbi->cluster_size;
 263        if (is_attr_compressed(attr))
 264                align <<= COMPRESSION_UNIT;
 265        len = (rsize + align - 1) >> sbi->cluster_bits;
 266
 267        run_init(run);
 268
 269        /* Make a copy of original attribute. */
 270        attr_s = kmemdup(attr, asize, GFP_NOFS);
 271        if (!attr_s) {
 272                err = -ENOMEM;
 273                goto out;
 274        }
 275
 276        if (!len) {
 277                /* Empty resident -> Empty nonresident. */
 278                alen = 0;
 279        } else {
 280                const char *data = resident_data(attr);
 281
 282                err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
 283                                             ALLOCATE_DEF, &alen, 0, NULL);
 284                if (err)
 285                        goto out1;
 286
 287                if (!rsize) {
 288                        /* Empty resident -> Non empty nonresident. */
 289                } else if (!is_data) {
 290                        err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
 291                        if (err)
 292                                goto out2;
 293                } else if (!page) {
 294                        char *kaddr;
 295
 296                        page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
 297                        if (!page) {
 298                                err = -ENOMEM;
 299                                goto out2;
 300                        }
 301                        kaddr = kmap_atomic(page);
 302                        memcpy(kaddr, data, rsize);
 303                        memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
 304                        kunmap_atomic(kaddr);
 305                        flush_dcache_page(page);
 306                        SetPageUptodate(page);
 307                        set_page_dirty(page);
 308                        unlock_page(page);
 309                        put_page(page);
 310                }
 311        }
 312
 313        /* Remove original attribute. */
 314        used -= asize;
 315        memmove(attr, Add2Ptr(attr, asize), used - aoff);
 316        rec->used = cpu_to_le32(used);
 317        mi->dirty = true;
 318        if (le)
 319                al_remove_le(ni, le);
 320
 321        err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
 322                                    attr_s->name_len, run, 0, alen,
 323                                    attr_s->flags, &attr, NULL);
 324        if (err)
 325                goto out3;
 326
 327        kfree(attr_s);
 328        attr->nres.data_size = cpu_to_le64(rsize);
 329        attr->nres.valid_size = attr->nres.data_size;
 330
 331        *ins_attr = attr;
 332
 333        if (is_data)
 334                ni->ni_flags &= ~NI_FLAG_RESIDENT;
 335
 336        /* Resident attribute becomes non resident. */
 337        return 0;
 338
 339out3:
 340        attr = Add2Ptr(rec, aoff);
 341        memmove(next, attr, used - aoff);
 342        memcpy(attr, attr_s, asize);
 343        rec->used = cpu_to_le32(used + asize);
 344        mi->dirty = true;
 345out2:
 346        /* Undo: do not trim new allocated clusters. */
 347        run_deallocate(sbi, run, false);
 348        run_close(run);
 349out1:
 350        kfree(attr_s);
 351out:
 352        return err;
 353}
 354
 355/*
 356 * attr_set_size_res - Helper for attr_set_size().
 357 */
 358static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
 359                             struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
 360                             u64 new_size, struct runs_tree *run,
 361                             struct ATTRIB **ins_attr)
 362{
 363        struct ntfs_sb_info *sbi = mi->sbi;
 364        struct MFT_REC *rec = mi->mrec;
 365        u32 used = le32_to_cpu(rec->used);
 366        u32 asize = le32_to_cpu(attr->size);
 367        u32 aoff = PtrOffset(rec, attr);
 368        u32 rsize = le32_to_cpu(attr->res.data_size);
 369        u32 tail = used - aoff - asize;
 370        char *next = Add2Ptr(attr, asize);
 371        s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
 372
 373        if (dsize < 0) {
 374                memmove(next + dsize, next, tail);
 375        } else if (dsize > 0) {
 376                if (used + dsize > sbi->max_bytes_per_attr)
 377                        return attr_make_nonresident(ni, attr, le, mi, new_size,
 378                                                     run, ins_attr, NULL);
 379
 380                memmove(next + dsize, next, tail);
 381                memset(next, 0, dsize);
 382        }
 383
 384        if (new_size > rsize)
 385                memset(Add2Ptr(resident_data(attr), rsize), 0,
 386                       new_size - rsize);
 387
 388        rec->used = cpu_to_le32(used + dsize);
 389        attr->size = cpu_to_le32(asize + dsize);
 390        attr->res.data_size = cpu_to_le32(new_size);
 391        mi->dirty = true;
 392        *ins_attr = attr;
 393
 394        return 0;
 395}
 396
 397/*
 398 * attr_set_size - Change the size of attribute.
 399 *
 400 * Extend:
 401 *   - Sparse/compressed: No allocated clusters.
 402 *   - Normal: Append allocated and preallocated new clusters.
 403 * Shrink:
 404 *   - No deallocate if @keep_prealloc is set.
 405 */
 406int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
 407                  const __le16 *name, u8 name_len, struct runs_tree *run,
 408                  u64 new_size, const u64 *new_valid, bool keep_prealloc,
 409                  struct ATTRIB **ret)
 410{
 411        int err = 0;
 412        struct ntfs_sb_info *sbi = ni->mi.sbi;
 413        u8 cluster_bits = sbi->cluster_bits;
 414        bool is_mft =
 415                ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
 416        u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
 417        struct ATTRIB *attr = NULL, *attr_b;
 418        struct ATTR_LIST_ENTRY *le, *le_b;
 419        struct mft_inode *mi, *mi_b;
 420        CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
 421        CLST next_svcn, pre_alloc = -1, done = 0;
 422        bool is_ext;
 423        u32 align;
 424        struct MFT_REC *rec;
 425
 426again:
 427        le_b = NULL;
 428        attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
 429                              &mi_b);
 430        if (!attr_b) {
 431                err = -ENOENT;
 432                goto out;
 433        }
 434
 435        if (!attr_b->non_res) {
 436                err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
 437                                        &attr_b);
 438                if (err || !attr_b->non_res)
 439                        goto out;
 440
 441                /* Layout of records may be changed, so do a full search. */
 442                goto again;
 443        }
 444
 445        is_ext = is_attr_ext(attr_b);
 446
 447again_1:
 448        align = sbi->cluster_size;
 449
 450        if (is_ext)
 451                align <<= attr_b->nres.c_unit;
 452
 453        old_valid = le64_to_cpu(attr_b->nres.valid_size);
 454        old_size = le64_to_cpu(attr_b->nres.data_size);
 455        old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 456        old_alen = old_alloc >> cluster_bits;
 457
 458        new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
 459        new_alen = new_alloc >> cluster_bits;
 460
 461        if (keep_prealloc && new_size < old_size) {
 462                attr_b->nres.data_size = cpu_to_le64(new_size);
 463                mi_b->dirty = true;
 464                goto ok;
 465        }
 466
 467        vcn = old_alen - 1;
 468
 469        svcn = le64_to_cpu(attr_b->nres.svcn);
 470        evcn = le64_to_cpu(attr_b->nres.evcn);
 471
 472        if (svcn <= vcn && vcn <= evcn) {
 473                attr = attr_b;
 474                le = le_b;
 475                mi = mi_b;
 476        } else if (!le_b) {
 477                err = -EINVAL;
 478                goto out;
 479        } else {
 480                le = le_b;
 481                attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
 482                                    &mi);
 483                if (!attr) {
 484                        err = -EINVAL;
 485                        goto out;
 486                }
 487
 488next_le_1:
 489                svcn = le64_to_cpu(attr->nres.svcn);
 490                evcn = le64_to_cpu(attr->nres.evcn);
 491        }
 492
 493next_le:
 494        rec = mi->mrec;
 495
 496        err = attr_load_runs(attr, ni, run, NULL);
 497        if (err)
 498                goto out;
 499
 500        if (new_size > old_size) {
 501                CLST to_allocate;
 502                size_t free;
 503
 504                if (new_alloc <= old_alloc) {
 505                        attr_b->nres.data_size = cpu_to_le64(new_size);
 506                        mi_b->dirty = true;
 507                        goto ok;
 508                }
 509
 510                to_allocate = new_alen - old_alen;
 511add_alloc_in_same_attr_seg:
 512                lcn = 0;
 513                if (is_mft) {
 514                        /* MFT allocates clusters from MFT zone. */
 515                        pre_alloc = 0;
 516                } else if (is_ext) {
 517                        /* No preallocate for sparse/compress. */
 518                        pre_alloc = 0;
 519                } else if (pre_alloc == -1) {
 520                        pre_alloc = 0;
 521                        if (type == ATTR_DATA && !name_len &&
 522                            sbi->options->prealloc) {
 523                                CLST new_alen2 = bytes_to_cluster(
 524                                        sbi, get_pre_allocated(new_size));
 525                                pre_alloc = new_alen2 - new_alen;
 526                        }
 527
 528                        /* Get the last LCN to allocate from. */
 529                        if (old_alen &&
 530                            !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
 531                                lcn = SPARSE_LCN;
 532                        }
 533
 534                        if (lcn == SPARSE_LCN)
 535                                lcn = 0;
 536                        else if (lcn)
 537                                lcn += 1;
 538
 539                        free = wnd_zeroes(&sbi->used.bitmap);
 540                        if (to_allocate > free) {
 541                                err = -ENOSPC;
 542                                goto out;
 543                        }
 544
 545                        if (pre_alloc && to_allocate + pre_alloc > free)
 546                                pre_alloc = 0;
 547                }
 548
 549                vcn = old_alen;
 550
 551                if (is_ext) {
 552                        if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
 553                                           false)) {
 554                                err = -ENOMEM;
 555                                goto out;
 556                        }
 557                        alen = to_allocate;
 558                } else {
 559                        /* ~3 bytes per fragment. */
 560                        err = attr_allocate_clusters(
 561                                sbi, run, vcn, lcn, to_allocate, &pre_alloc,
 562                                is_mft ? ALLOCATE_MFT : 0, &alen,
 563                                is_mft ? 0
 564                                       : (sbi->record_size -
 565                                          le32_to_cpu(rec->used) + 8) /
 566                                                         3 +
 567                                                 1,
 568                                NULL);
 569                        if (err)
 570                                goto out;
 571                }
 572
 573                done += alen;
 574                vcn += alen;
 575                if (to_allocate > alen)
 576                        to_allocate -= alen;
 577                else
 578                        to_allocate = 0;
 579
 580pack_runs:
 581                err = mi_pack_runs(mi, attr, run, vcn - svcn);
 582                if (err)
 583                        goto out;
 584
 585                next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 586                new_alloc_tmp = (u64)next_svcn << cluster_bits;
 587                attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 588                mi_b->dirty = true;
 589
 590                if (next_svcn >= vcn && !to_allocate) {
 591                        /* Normal way. Update attribute and exit. */
 592                        attr_b->nres.data_size = cpu_to_le64(new_size);
 593                        goto ok;
 594                }
 595
 596                /* At least two MFT to avoid recursive loop. */
 597                if (is_mft && next_svcn == vcn &&
 598                    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
 599                        new_size = new_alloc_tmp;
 600                        attr_b->nres.data_size = attr_b->nres.alloc_size;
 601                        goto ok;
 602                }
 603
 604                if (le32_to_cpu(rec->used) < sbi->record_size) {
 605                        old_alen = next_svcn;
 606                        evcn = old_alen - 1;
 607                        goto add_alloc_in_same_attr_seg;
 608                }
 609
 610                attr_b->nres.data_size = attr_b->nres.alloc_size;
 611                if (new_alloc_tmp < old_valid)
 612                        attr_b->nres.valid_size = attr_b->nres.data_size;
 613
 614                if (type == ATTR_LIST) {
 615                        err = ni_expand_list(ni);
 616                        if (err)
 617                                goto out;
 618                        if (next_svcn < vcn)
 619                                goto pack_runs;
 620
 621                        /* Layout of records is changed. */
 622                        goto again;
 623                }
 624
 625                if (!ni->attr_list.size) {
 626                        err = ni_create_attr_list(ni);
 627                        if (err)
 628                                goto out;
 629                        /* Layout of records is changed. */
 630                }
 631
 632                if (next_svcn >= vcn) {
 633                        /* This is MFT data, repeat. */
 634                        goto again;
 635                }
 636
 637                /* Insert new attribute segment. */
 638                err = ni_insert_nonresident(ni, type, name, name_len, run,
 639                                            next_svcn, vcn - next_svcn,
 640                                            attr_b->flags, &attr, &mi);
 641                if (err)
 642                        goto out;
 643
 644                if (!is_mft)
 645                        run_truncate_head(run, evcn + 1);
 646
 647                svcn = le64_to_cpu(attr->nres.svcn);
 648                evcn = le64_to_cpu(attr->nres.evcn);
 649
 650                le_b = NULL;
 651                /*
 652                 * Layout of records maybe changed.
 653                 * Find base attribute to update.
 654                 */
 655                attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
 656                                      NULL, &mi_b);
 657                if (!attr_b) {
 658                        err = -ENOENT;
 659                        goto out;
 660                }
 661
 662                attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
 663                attr_b->nres.data_size = attr_b->nres.alloc_size;
 664                attr_b->nres.valid_size = attr_b->nres.alloc_size;
 665                mi_b->dirty = true;
 666                goto again_1;
 667        }
 668
 669        if (new_size != old_size ||
 670            (new_alloc != old_alloc && !keep_prealloc)) {
 671                vcn = max(svcn, new_alen);
 672                new_alloc_tmp = (u64)vcn << cluster_bits;
 673
 674                alen = 0;
 675                err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
 676                                        true);
 677                if (err)
 678                        goto out;
 679
 680                run_truncate(run, vcn);
 681
 682                if (vcn > svcn) {
 683                        err = mi_pack_runs(mi, attr, run, vcn - svcn);
 684                        if (err)
 685                                goto out;
 686                } else if (le && le->vcn) {
 687                        u16 le_sz = le16_to_cpu(le->size);
 688
 689                        /*
 690                         * NOTE: List entries for one attribute are always
 691                         * the same size. We deal with last entry (vcn==0)
 692                         * and it is not first in entries array
 693                         * (list entry for std attribute always first).
 694                         * So it is safe to step back.
 695                         */
 696                        mi_remove_attr(NULL, mi, attr);
 697
 698                        if (!al_remove_le(ni, le)) {
 699                                err = -EINVAL;
 700                                goto out;
 701                        }
 702
 703                        le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
 704                } else {
 705                        attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
 706                        mi->dirty = true;
 707                }
 708
 709                attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
 710
 711                if (vcn == new_alen) {
 712                        attr_b->nres.data_size = cpu_to_le64(new_size);
 713                        if (new_size < old_valid)
 714                                attr_b->nres.valid_size =
 715                                        attr_b->nres.data_size;
 716                } else {
 717                        if (new_alloc_tmp <=
 718                            le64_to_cpu(attr_b->nres.data_size))
 719                                attr_b->nres.data_size =
 720                                        attr_b->nres.alloc_size;
 721                        if (new_alloc_tmp <
 722                            le64_to_cpu(attr_b->nres.valid_size))
 723                                attr_b->nres.valid_size =
 724                                        attr_b->nres.alloc_size;
 725                }
 726
 727                if (is_ext)
 728                        le64_sub_cpu(&attr_b->nres.total_size,
 729                                     ((u64)alen << cluster_bits));
 730
 731                mi_b->dirty = true;
 732
 733                if (new_alloc_tmp <= new_alloc)
 734                        goto ok;
 735
 736                old_size = new_alloc_tmp;
 737                vcn = svcn - 1;
 738
 739                if (le == le_b) {
 740                        attr = attr_b;
 741                        mi = mi_b;
 742                        evcn = svcn - 1;
 743                        svcn = 0;
 744                        goto next_le;
 745                }
 746
 747                if (le->type != type || le->name_len != name_len ||
 748                    memcmp(le_name(le), name, name_len * sizeof(short))) {
 749                        err = -EINVAL;
 750                        goto out;
 751                }
 752
 753                err = ni_load_mi(ni, le, &mi);
 754                if (err)
 755                        goto out;
 756
 757                attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
 758                if (!attr) {
 759                        err = -EINVAL;
 760                        goto out;
 761                }
 762                goto next_le_1;
 763        }
 764
 765ok:
 766        if (new_valid) {
 767                __le64 valid = cpu_to_le64(min(*new_valid, new_size));
 768
 769                if (attr_b->nres.valid_size != valid) {
 770                        attr_b->nres.valid_size = valid;
 771                        mi_b->dirty = true;
 772                }
 773        }
 774
 775out:
 776        if (!err && attr_b && ret)
 777                *ret = attr_b;
 778
 779        /* Update inode_set_bytes. */
 780        if (!err && ((type == ATTR_DATA && !name_len) ||
 781                     (type == ATTR_ALLOC && name == I30_NAME))) {
 782                bool dirty = false;
 783
 784                if (ni->vfs_inode.i_size != new_size) {
 785                        ni->vfs_inode.i_size = new_size;
 786                        dirty = true;
 787                }
 788
 789                if (attr_b && attr_b->non_res) {
 790                        new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
 791                        if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
 792                                inode_set_bytes(&ni->vfs_inode, new_alloc);
 793                                dirty = true;
 794                        }
 795                }
 796
 797                if (dirty) {
 798                        ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 799                        mark_inode_dirty(&ni->vfs_inode);
 800                }
 801        }
 802
 803        return err;
 804}
 805
 806int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
 807                        CLST *len, bool *new)
 808{
 809        int err = 0;
 810        struct runs_tree *run = &ni->file.run;
 811        struct ntfs_sb_info *sbi;
 812        u8 cluster_bits;
 813        struct ATTRIB *attr = NULL, *attr_b;
 814        struct ATTR_LIST_ENTRY *le, *le_b;
 815        struct mft_inode *mi, *mi_b;
 816        CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
 817        u64 total_size;
 818        u32 clst_per_frame;
 819        bool ok;
 820
 821        if (new)
 822                *new = false;
 823
 824        down_read(&ni->file.run_lock);
 825        ok = run_lookup_entry(run, vcn, lcn, len, NULL);
 826        up_read(&ni->file.run_lock);
 827
 828        if (ok && (*lcn != SPARSE_LCN || !new)) {
 829                /* Normal way. */
 830                return 0;
 831        }
 832
 833        if (!clen)
 834                clen = 1;
 835
 836        if (ok && clen > *len)
 837                clen = *len;
 838
 839        sbi = ni->mi.sbi;
 840        cluster_bits = sbi->cluster_bits;
 841
 842        ni_lock(ni);
 843        down_write(&ni->file.run_lock);
 844
 845        le_b = NULL;
 846        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
 847        if (!attr_b) {
 848                err = -ENOENT;
 849                goto out;
 850        }
 851
 852        if (!attr_b->non_res) {
 853                *lcn = RESIDENT_LCN;
 854                *len = 1;
 855                goto out;
 856        }
 857
 858        asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
 859        if (vcn >= asize) {
 860                err = -EINVAL;
 861                goto out;
 862        }
 863
 864        clst_per_frame = 1u << attr_b->nres.c_unit;
 865        to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
 866
 867        if (vcn + to_alloc > asize)
 868                to_alloc = asize - vcn;
 869
 870        svcn = le64_to_cpu(attr_b->nres.svcn);
 871        evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
 872
 873        attr = attr_b;
 874        le = le_b;
 875        mi = mi_b;
 876
 877        if (le_b && (vcn < svcn || evcn1 <= vcn)) {
 878                attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
 879                                    &mi);
 880                if (!attr) {
 881                        err = -EINVAL;
 882                        goto out;
 883                }
 884                svcn = le64_to_cpu(attr->nres.svcn);
 885                evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
 886        }
 887
 888        err = attr_load_runs(attr, ni, run, NULL);
 889        if (err)
 890                goto out;
 891
 892        if (!ok) {
 893                ok = run_lookup_entry(run, vcn, lcn, len, NULL);
 894                if (ok && (*lcn != SPARSE_LCN || !new)) {
 895                        /* Normal way. */
 896                        err = 0;
 897                        goto ok;
 898                }
 899
 900                if (!ok && !new) {
 901                        *len = 0;
 902                        err = 0;
 903                        goto ok;
 904                }
 905
 906                if (ok && clen > *len) {
 907                        clen = *len;
 908                        to_alloc = (clen + clst_per_frame - 1) &
 909                                   ~(clst_per_frame - 1);
 910                }
 911        }
 912
 913        if (!is_attr_ext(attr_b)) {
 914                err = -EINVAL;
 915                goto out;
 916        }
 917
 918        /* Get the last LCN to allocate from. */
 919        hint = 0;
 920
 921        if (vcn > evcn1) {
 922                if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
 923                                   false)) {
 924                        err = -ENOMEM;
 925                        goto out;
 926                }
 927        } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
 928                hint = -1;
 929        }
 930
 931        err = attr_allocate_clusters(
 932                sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
 933                (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
 934                lcn);
 935        if (err)
 936                goto out;
 937        *new = true;
 938
 939        end = vcn + *len;
 940
 941        total_size = le64_to_cpu(attr_b->nres.total_size) +
 942                     ((u64)*len << cluster_bits);
 943
 944repack:
 945        err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
 946        if (err)
 947                goto out;
 948
 949        attr_b->nres.total_size = cpu_to_le64(total_size);
 950        inode_set_bytes(&ni->vfs_inode, total_size);
 951        ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
 952
 953        mi_b->dirty = true;
 954        mark_inode_dirty(&ni->vfs_inode);
 955
 956        /* Stored [vcn : next_svcn) from [vcn : end). */
 957        next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
 958
 959        if (end <= evcn1) {
 960                if (next_svcn == evcn1) {
 961                        /* Normal way. Update attribute and exit. */
 962                        goto ok;
 963                }
 964                /* Add new segment [next_svcn : evcn1 - next_svcn). */
 965                if (!ni->attr_list.size) {
 966                        err = ni_create_attr_list(ni);
 967                        if (err)
 968                                goto out;
 969                        /* Layout of records is changed. */
 970                        le_b = NULL;
 971                        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
 972                                              0, NULL, &mi_b);
 973                        if (!attr_b) {
 974                                err = -ENOENT;
 975                                goto out;
 976                        }
 977
 978                        attr = attr_b;
 979                        le = le_b;
 980                        mi = mi_b;
 981                        goto repack;
 982                }
 983        }
 984
 985        svcn = evcn1;
 986
 987        /* Estimate next attribute. */
 988        attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
 989
 990        if (attr) {
 991                CLST alloc = bytes_to_cluster(
 992                        sbi, le64_to_cpu(attr_b->nres.alloc_size));
 993                CLST evcn = le64_to_cpu(attr->nres.evcn);
 994
 995                if (end < next_svcn)
 996                        end = next_svcn;
 997                while (end > evcn) {
 998                        /* Remove segment [svcn : evcn). */
 999                        mi_remove_attr(NULL, mi, attr);
1000
1001                        if (!al_remove_le(ni, le)) {
1002                                err = -EINVAL;
1003                                goto out;
1004                        }
1005
1006                        if (evcn + 1 >= alloc) {
1007                                /* Last attribute segment. */
1008                                evcn1 = evcn + 1;
1009                                goto ins_ext;
1010                        }
1011
1012                        if (ni_load_mi(ni, le, &mi)) {
1013                                attr = NULL;
1014                                goto out;
1015                        }
1016
1017                        attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1018                                            &le->id);
1019                        if (!attr) {
1020                                err = -EINVAL;
1021                                goto out;
1022                        }
1023                        svcn = le64_to_cpu(attr->nres.svcn);
1024                        evcn = le64_to_cpu(attr->nres.evcn);
1025                }
1026
1027                if (end < svcn)
1028                        end = svcn;
1029
1030                err = attr_load_runs(attr, ni, run, &end);
1031                if (err)
1032                        goto out;
1033
1034                evcn1 = evcn + 1;
1035                attr->nres.svcn = cpu_to_le64(next_svcn);
1036                err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1037                if (err)
1038                        goto out;
1039
1040                le->vcn = cpu_to_le64(next_svcn);
1041                ni->attr_list.dirty = true;
1042                mi->dirty = true;
1043
1044                next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1045        }
1046ins_ext:
1047        if (evcn1 > next_svcn) {
1048                err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1049                                            next_svcn, evcn1 - next_svcn,
1050                                            attr_b->flags, &attr, &mi);
1051                if (err)
1052                        goto out;
1053        }
1054ok:
1055        run_truncate_around(run, vcn);
1056out:
1057        up_write(&ni->file.run_lock);
1058        ni_unlock(ni);
1059
1060        return err;
1061}
1062
1063int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1064{
1065        u64 vbo;
1066        struct ATTRIB *attr;
1067        u32 data_size;
1068
1069        attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1070        if (!attr)
1071                return -EINVAL;
1072
1073        if (attr->non_res)
1074                return E_NTFS_NONRESIDENT;
1075
1076        vbo = page->index << PAGE_SHIFT;
1077        data_size = le32_to_cpu(attr->res.data_size);
1078        if (vbo < data_size) {
1079                const char *data = resident_data(attr);
1080                char *kaddr = kmap_atomic(page);
1081                u32 use = data_size - vbo;
1082
1083                if (use > PAGE_SIZE)
1084                        use = PAGE_SIZE;
1085
1086                memcpy(kaddr, data + vbo, use);
1087                memset(kaddr + use, 0, PAGE_SIZE - use);
1088                kunmap_atomic(kaddr);
1089                flush_dcache_page(page);
1090                SetPageUptodate(page);
1091        } else if (!PageUptodate(page)) {
1092                zero_user_segment(page, 0, PAGE_SIZE);
1093                SetPageUptodate(page);
1094        }
1095
1096        return 0;
1097}
1098
1099int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1100{
1101        u64 vbo;
1102        struct mft_inode *mi;
1103        struct ATTRIB *attr;
1104        u32 data_size;
1105
1106        attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1107        if (!attr)
1108                return -EINVAL;
1109
1110        if (attr->non_res) {
1111                /* Return special error code to check this case. */
1112                return E_NTFS_NONRESIDENT;
1113        }
1114
1115        vbo = page->index << PAGE_SHIFT;
1116        data_size = le32_to_cpu(attr->res.data_size);
1117        if (vbo < data_size) {
1118                char *data = resident_data(attr);
1119                char *kaddr = kmap_atomic(page);
1120                u32 use = data_size - vbo;
1121
1122                if (use > PAGE_SIZE)
1123                        use = PAGE_SIZE;
1124                memcpy(data + vbo, kaddr, use);
1125                kunmap_atomic(kaddr);
1126                mi->dirty = true;
1127        }
1128        ni->i_valid = data_size;
1129
1130        return 0;
1131}
1132
1133/*
1134 * attr_load_runs_vcn - Load runs with VCN.
1135 */
1136int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1137                       const __le16 *name, u8 name_len, struct runs_tree *run,
1138                       CLST vcn)
1139{
1140        struct ATTRIB *attr;
1141        int err;
1142        CLST svcn, evcn;
1143        u16 ro;
1144
1145        attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1146        if (!attr) {
1147                /* Is record corrupted? */
1148                return -ENOENT;
1149        }
1150
1151        svcn = le64_to_cpu(attr->nres.svcn);
1152        evcn = le64_to_cpu(attr->nres.evcn);
1153
1154        if (evcn < vcn || vcn < svcn) {
1155                /* Is record corrupted? */
1156                return -EINVAL;
1157        }
1158
1159        ro = le16_to_cpu(attr->nres.run_off);
1160        err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1161                            Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1162        if (err < 0)
1163                return err;
1164        return 0;
1165}
1166
1167/*
1168 * attr_load_runs_range - Load runs for given range [from to).
1169 */
1170int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1171                         const __le16 *name, u8 name_len, struct runs_tree *run,
1172                         u64 from, u64 to)
1173{
1174        struct ntfs_sb_info *sbi = ni->mi.sbi;
1175        u8 cluster_bits = sbi->cluster_bits;
1176        CLST vcn = from >> cluster_bits;
1177        CLST vcn_last = (to - 1) >> cluster_bits;
1178        CLST lcn, clen;
1179        int err;
1180
1181        for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1182                if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1183                        err = attr_load_runs_vcn(ni, type, name, name_len, run,
1184                                                 vcn);
1185                        if (err)
1186                                return err;
1187                        clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1188                }
1189        }
1190
1191        return 0;
1192}
1193
1194#ifdef CONFIG_NTFS3_LZX_XPRESS
1195/*
1196 * attr_wof_frame_info
1197 *
1198 * Read header of Xpress/LZX file to get info about frame.
1199 */
1200int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1201                        struct runs_tree *run, u64 frame, u64 frames,
1202                        u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1203{
1204        struct ntfs_sb_info *sbi = ni->mi.sbi;
1205        u64 vbo[2], off[2], wof_size;
1206        u32 voff;
1207        u8 bytes_per_off;
1208        char *addr;
1209        struct page *page;
1210        int i, err;
1211        __le32 *off32;
1212        __le64 *off64;
1213
1214        if (ni->vfs_inode.i_size < 0x100000000ull) {
1215                /* File starts with array of 32 bit offsets. */
1216                bytes_per_off = sizeof(__le32);
1217                vbo[1] = frame << 2;
1218                *vbo_data = frames << 2;
1219        } else {
1220                /* File starts with array of 64 bit offsets. */
1221                bytes_per_off = sizeof(__le64);
1222                vbo[1] = frame << 3;
1223                *vbo_data = frames << 3;
1224        }
1225
1226        /*
1227         * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1228         * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1229         */
1230        if (!attr->non_res) {
1231                if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1232                        ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1233                        return -EINVAL;
1234                }
1235                addr = resident_data(attr);
1236
1237                if (bytes_per_off == sizeof(__le32)) {
1238                        off32 = Add2Ptr(addr, vbo[1]);
1239                        off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1240                        off[1] = le32_to_cpu(off32[0]);
1241                } else {
1242                        off64 = Add2Ptr(addr, vbo[1]);
1243                        off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1244                        off[1] = le64_to_cpu(off64[0]);
1245                }
1246
1247                *vbo_data += off[0];
1248                *ondisk_size = off[1] - off[0];
1249                return 0;
1250        }
1251
1252        wof_size = le64_to_cpu(attr->nres.data_size);
1253        down_write(&ni->file.run_lock);
1254        page = ni->file.offs_page;
1255        if (!page) {
1256                page = alloc_page(GFP_KERNEL);
1257                if (!page) {
1258                        err = -ENOMEM;
1259                        goto out;
1260                }
1261                page->index = -1;
1262                ni->file.offs_page = page;
1263        }
1264        lock_page(page);
1265        addr = page_address(page);
1266
1267        if (vbo[1]) {
1268                voff = vbo[1] & (PAGE_SIZE - 1);
1269                vbo[0] = vbo[1] - bytes_per_off;
1270                i = 0;
1271        } else {
1272                voff = 0;
1273                vbo[0] = 0;
1274                off[0] = 0;
1275                i = 1;
1276        }
1277
1278        do {
1279                pgoff_t index = vbo[i] >> PAGE_SHIFT;
1280
1281                if (index != page->index) {
1282                        u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1283                        u64 to = min(from + PAGE_SIZE, wof_size);
1284
1285                        err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1286                                                   ARRAY_SIZE(WOF_NAME), run,
1287                                                   from, to);
1288                        if (err)
1289                                goto out1;
1290
1291                        err = ntfs_bio_pages(sbi, run, &page, 1, from,
1292                                             to - from, REQ_OP_READ);
1293                        if (err) {
1294                                page->index = -1;
1295                                goto out1;
1296                        }
1297                        page->index = index;
1298                }
1299
1300                if (i) {
1301                        if (bytes_per_off == sizeof(__le32)) {
1302                                off32 = Add2Ptr(addr, voff);
1303                                off[1] = le32_to_cpu(*off32);
1304                        } else {
1305                                off64 = Add2Ptr(addr, voff);
1306                                off[1] = le64_to_cpu(*off64);
1307                        }
1308                } else if (!voff) {
1309                        if (bytes_per_off == sizeof(__le32)) {
1310                                off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1311                                off[0] = le32_to_cpu(*off32);
1312                        } else {
1313                                off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1314                                off[0] = le64_to_cpu(*off64);
1315                        }
1316                } else {
1317                        /* Two values in one page. */
1318                        if (bytes_per_off == sizeof(__le32)) {
1319                                off32 = Add2Ptr(addr, voff);
1320                                off[0] = le32_to_cpu(off32[-1]);
1321                                off[1] = le32_to_cpu(off32[0]);
1322                        } else {
1323                                off64 = Add2Ptr(addr, voff);
1324                                off[0] = le64_to_cpu(off64[-1]);
1325                                off[1] = le64_to_cpu(off64[0]);
1326                        }
1327                        break;
1328                }
1329        } while (++i < 2);
1330
1331        *vbo_data += off[0];
1332        *ondisk_size = off[1] - off[0];
1333
1334out1:
1335        unlock_page(page);
1336out:
1337        up_write(&ni->file.run_lock);
1338        return err;
1339}
1340#endif
1341
1342/*
1343 * attr_is_frame_compressed - Used to detect compressed frame.
1344 */
1345int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1346                             CLST frame, CLST *clst_data)
1347{
1348        int err;
1349        u32 clst_frame;
1350        CLST clen, lcn, vcn, alen, slen, vcn_next;
1351        size_t idx;
1352        struct runs_tree *run;
1353
1354        *clst_data = 0;
1355
1356        if (!is_attr_compressed(attr))
1357                return 0;
1358
1359        if (!attr->non_res)
1360                return 0;
1361
1362        clst_frame = 1u << attr->nres.c_unit;
1363        vcn = frame * clst_frame;
1364        run = &ni->file.run;
1365
1366        if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1367                err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1368                                         attr->name_len, run, vcn);
1369                if (err)
1370                        return err;
1371
1372                if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1373                        return -EINVAL;
1374        }
1375
1376        if (lcn == SPARSE_LCN) {
1377                /* Sparsed frame. */
1378                return 0;
1379        }
1380
1381        if (clen >= clst_frame) {
1382                /*
1383                 * The frame is not compressed 'cause
1384                 * it does not contain any sparse clusters.
1385                 */
1386                *clst_data = clst_frame;
1387                return 0;
1388        }
1389
1390        alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1391        slen = 0;
1392        *clst_data = clen;
1393
1394        /*
1395         * The frame is compressed if *clst_data + slen >= clst_frame.
1396         * Check next fragments.
1397         */
1398        while ((vcn += clen) < alen) {
1399                vcn_next = vcn;
1400
1401                if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1402                    vcn_next != vcn) {
1403                        err = attr_load_runs_vcn(ni, attr->type,
1404                                                 attr_name(attr),
1405                                                 attr->name_len, run, vcn_next);
1406                        if (err)
1407                                return err;
1408                        vcn = vcn_next;
1409
1410                        if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1411                                return -EINVAL;
1412                }
1413
1414                if (lcn == SPARSE_LCN) {
1415                        slen += clen;
1416                } else {
1417                        if (slen) {
1418                                /*
1419                                 * Data_clusters + sparse_clusters =
1420                                 * not enough for frame.
1421                                 */
1422                                return -EINVAL;
1423                        }
1424                        *clst_data += clen;
1425                }
1426
1427                if (*clst_data + slen >= clst_frame) {
1428                        if (!slen) {
1429                                /*
1430                                 * There is no sparsed clusters in this frame
1431                                 * so it is not compressed.
1432                                 */
1433                                *clst_data = clst_frame;
1434                        } else {
1435                                /* Frame is compressed. */
1436                        }
1437                        break;
1438                }
1439        }
1440
1441        return 0;
1442}
1443
1444/*
1445 * attr_allocate_frame - Allocate/free clusters for @frame.
1446 *
1447 * Assumed: down_write(&ni->file.run_lock);
1448 */
1449int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1450                        u64 new_valid)
1451{
1452        int err = 0;
1453        struct runs_tree *run = &ni->file.run;
1454        struct ntfs_sb_info *sbi = ni->mi.sbi;
1455        struct ATTRIB *attr = NULL, *attr_b;
1456        struct ATTR_LIST_ENTRY *le, *le_b;
1457        struct mft_inode *mi, *mi_b;
1458        CLST svcn, evcn1, next_svcn, lcn, len;
1459        CLST vcn, end, clst_data;
1460        u64 total_size, valid_size, data_size;
1461
1462        le_b = NULL;
1463        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1464        if (!attr_b)
1465                return -ENOENT;
1466
1467        if (!is_attr_ext(attr_b))
1468                return -EINVAL;
1469
1470        vcn = frame << NTFS_LZNT_CUNIT;
1471        total_size = le64_to_cpu(attr_b->nres.total_size);
1472
1473        svcn = le64_to_cpu(attr_b->nres.svcn);
1474        evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1475        data_size = le64_to_cpu(attr_b->nres.data_size);
1476
1477        if (svcn <= vcn && vcn < evcn1) {
1478                attr = attr_b;
1479                le = le_b;
1480                mi = mi_b;
1481        } else if (!le_b) {
1482                err = -EINVAL;
1483                goto out;
1484        } else {
1485                le = le_b;
1486                attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1487                                    &mi);
1488                if (!attr) {
1489                        err = -EINVAL;
1490                        goto out;
1491                }
1492                svcn = le64_to_cpu(attr->nres.svcn);
1493                evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1494        }
1495
1496        err = attr_load_runs(attr, ni, run, NULL);
1497        if (err)
1498                goto out;
1499
1500        err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1501        if (err)
1502                goto out;
1503
1504        total_size -= (u64)clst_data << sbi->cluster_bits;
1505
1506        len = bytes_to_cluster(sbi, compr_size);
1507
1508        if (len == clst_data)
1509                goto out;
1510
1511        if (len < clst_data) {
1512                err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1513                                        NULL, true);
1514                if (err)
1515                        goto out;
1516
1517                if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1518                                   false)) {
1519                        err = -ENOMEM;
1520                        goto out;
1521                }
1522                end = vcn + clst_data;
1523                /* Run contains updated range [vcn + len : end). */
1524        } else {
1525                CLST alen, hint = 0;
1526                /* Get the last LCN to allocate from. */
1527                if (vcn + clst_data &&
1528                    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1529                                      NULL)) {
1530                        hint = -1;
1531                }
1532
1533                err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1534                                             hint + 1, len - clst_data, NULL, 0,
1535                                             &alen, 0, &lcn);
1536                if (err)
1537                        goto out;
1538
1539                end = vcn + len;
1540                /* Run contains updated range [vcn + clst_data : end). */
1541        }
1542
1543        total_size += (u64)len << sbi->cluster_bits;
1544
1545repack:
1546        err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1547        if (err)
1548                goto out;
1549
1550        attr_b->nres.total_size = cpu_to_le64(total_size);
1551        inode_set_bytes(&ni->vfs_inode, total_size);
1552
1553        mi_b->dirty = true;
1554        mark_inode_dirty(&ni->vfs_inode);
1555
1556        /* Stored [vcn : next_svcn) from [vcn : end). */
1557        next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1558
1559        if (end <= evcn1) {
1560                if (next_svcn == evcn1) {
1561                        /* Normal way. Update attribute and exit. */
1562                        goto ok;
1563                }
1564                /* Add new segment [next_svcn : evcn1 - next_svcn). */
1565                if (!ni->attr_list.size) {
1566                        err = ni_create_attr_list(ni);
1567                        if (err)
1568                                goto out;
1569                        /* Layout of records is changed. */
1570                        le_b = NULL;
1571                        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1572                                              0, NULL, &mi_b);
1573                        if (!attr_b) {
1574                                err = -ENOENT;
1575                                goto out;
1576                        }
1577
1578                        attr = attr_b;
1579                        le = le_b;
1580                        mi = mi_b;
1581                        goto repack;
1582                }
1583        }
1584
1585        svcn = evcn1;
1586
1587        /* Estimate next attribute. */
1588        attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1589
1590        if (attr) {
1591                CLST alloc = bytes_to_cluster(
1592                        sbi, le64_to_cpu(attr_b->nres.alloc_size));
1593                CLST evcn = le64_to_cpu(attr->nres.evcn);
1594
1595                if (end < next_svcn)
1596                        end = next_svcn;
1597                while (end > evcn) {
1598                        /* Remove segment [svcn : evcn). */
1599                        mi_remove_attr(NULL, mi, attr);
1600
1601                        if (!al_remove_le(ni, le)) {
1602                                err = -EINVAL;
1603                                goto out;
1604                        }
1605
1606                        if (evcn + 1 >= alloc) {
1607                                /* Last attribute segment. */
1608                                evcn1 = evcn + 1;
1609                                goto ins_ext;
1610                        }
1611
1612                        if (ni_load_mi(ni, le, &mi)) {
1613                                attr = NULL;
1614                                goto out;
1615                        }
1616
1617                        attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1618                                            &le->id);
1619                        if (!attr) {
1620                                err = -EINVAL;
1621                                goto out;
1622                        }
1623                        svcn = le64_to_cpu(attr->nres.svcn);
1624                        evcn = le64_to_cpu(attr->nres.evcn);
1625                }
1626
1627                if (end < svcn)
1628                        end = svcn;
1629
1630                err = attr_load_runs(attr, ni, run, &end);
1631                if (err)
1632                        goto out;
1633
1634                evcn1 = evcn + 1;
1635                attr->nres.svcn = cpu_to_le64(next_svcn);
1636                err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1637                if (err)
1638                        goto out;
1639
1640                le->vcn = cpu_to_le64(next_svcn);
1641                ni->attr_list.dirty = true;
1642                mi->dirty = true;
1643
1644                next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1645        }
1646ins_ext:
1647        if (evcn1 > next_svcn) {
1648                err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1649                                            next_svcn, evcn1 - next_svcn,
1650                                            attr_b->flags, &attr, &mi);
1651                if (err)
1652                        goto out;
1653        }
1654ok:
1655        run_truncate_around(run, vcn);
1656out:
1657        if (new_valid > data_size)
1658                new_valid = data_size;
1659
1660        valid_size = le64_to_cpu(attr_b->nres.valid_size);
1661        if (new_valid != valid_size) {
1662                attr_b->nres.valid_size = cpu_to_le64(valid_size);
1663                mi_b->dirty = true;
1664        }
1665
1666        return err;
1667}
1668
1669/*
1670 * attr_collapse_range - Collapse range in file.
1671 */
1672int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1673{
1674        int err = 0;
1675        struct runs_tree *run = &ni->file.run;
1676        struct ntfs_sb_info *sbi = ni->mi.sbi;
1677        struct ATTRIB *attr = NULL, *attr_b;
1678        struct ATTR_LIST_ENTRY *le, *le_b;
1679        struct mft_inode *mi, *mi_b;
1680        CLST svcn, evcn1, len, dealloc, alen;
1681        CLST vcn, end;
1682        u64 valid_size, data_size, alloc_size, total_size;
1683        u32 mask;
1684        __le16 a_flags;
1685
1686        if (!bytes)
1687                return 0;
1688
1689        le_b = NULL;
1690        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1691        if (!attr_b)
1692                return -ENOENT;
1693
1694        if (!attr_b->non_res) {
1695                /* Attribute is resident. Nothing to do? */
1696                return 0;
1697        }
1698
1699        data_size = le64_to_cpu(attr_b->nres.data_size);
1700        alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1701        a_flags = attr_b->flags;
1702
1703        if (is_attr_ext(attr_b)) {
1704                total_size = le64_to_cpu(attr_b->nres.total_size);
1705                mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1706        } else {
1707                total_size = alloc_size;
1708                mask = sbi->cluster_mask;
1709        }
1710
1711        if ((vbo & mask) || (bytes & mask)) {
1712                /* Allow to collapse only cluster aligned ranges. */
1713                return -EINVAL;
1714        }
1715
1716        if (vbo > data_size)
1717                return -EINVAL;
1718
1719        down_write(&ni->file.run_lock);
1720
1721        if (vbo + bytes >= data_size) {
1722                u64 new_valid = min(ni->i_valid, vbo);
1723
1724                /* Simple truncate file at 'vbo'. */
1725                truncate_setsize(&ni->vfs_inode, vbo);
1726                err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1727                                    &new_valid, true, NULL);
1728
1729                if (!err && new_valid < ni->i_valid)
1730                        ni->i_valid = new_valid;
1731
1732                goto out;
1733        }
1734
1735        /*
1736         * Enumerate all attribute segments and collapse.
1737         */
1738        alen = alloc_size >> sbi->cluster_bits;
1739        vcn = vbo >> sbi->cluster_bits;
1740        len = bytes >> sbi->cluster_bits;
1741        end = vcn + len;
1742        dealloc = 0;
1743
1744        svcn = le64_to_cpu(attr_b->nres.svcn);
1745        evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1746
1747        if (svcn <= vcn && vcn < evcn1) {
1748                attr = attr_b;
1749                le = le_b;
1750                mi = mi_b;
1751        } else if (!le_b) {
1752                err = -EINVAL;
1753                goto out;
1754        } else {
1755                le = le_b;
1756                attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1757                                    &mi);
1758                if (!attr) {
1759                        err = -EINVAL;
1760                        goto out;
1761                }
1762
1763                svcn = le64_to_cpu(attr->nres.svcn);
1764                evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1765        }
1766
1767        for (;;) {
1768                if (svcn >= end) {
1769                        /* Shift VCN- */
1770                        attr->nres.svcn = cpu_to_le64(svcn - len);
1771                        attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1772                        if (le) {
1773                                le->vcn = attr->nres.svcn;
1774                                ni->attr_list.dirty = true;
1775                        }
1776                        mi->dirty = true;
1777                } else if (svcn < vcn || end < evcn1) {
1778                        CLST vcn1, eat, next_svcn;
1779
1780                        /* Collapse a part of this attribute segment. */
1781                        err = attr_load_runs(attr, ni, run, &svcn);
1782                        if (err)
1783                                goto out;
1784                        vcn1 = max(vcn, svcn);
1785                        eat = min(end, evcn1) - vcn1;
1786
1787                        err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1788                                                true);
1789                        if (err)
1790                                goto out;
1791
1792                        if (!run_collapse_range(run, vcn1, eat)) {
1793                                err = -ENOMEM;
1794                                goto out;
1795                        }
1796
1797                        if (svcn >= vcn) {
1798                                /* Shift VCN */
1799                                attr->nres.svcn = cpu_to_le64(vcn);
1800                                if (le) {
1801                                        le->vcn = attr->nres.svcn;
1802                                        ni->attr_list.dirty = true;
1803                                }
1804                        }
1805
1806                        err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1807                        if (err)
1808                                goto out;
1809
1810                        next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1811                        if (next_svcn + eat < evcn1) {
1812                                err = ni_insert_nonresident(
1813                                        ni, ATTR_DATA, NULL, 0, run, next_svcn,
1814                                        evcn1 - eat - next_svcn, a_flags, &attr,
1815                                        &mi);
1816                                if (err)
1817                                        goto out;
1818
1819                                /* Layout of records maybe changed. */
1820                                attr_b = NULL;
1821                                le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
1822                                                &next_svcn);
1823                                if (!le) {
1824                                        err = -EINVAL;
1825                                        goto out;
1826                                }
1827                        }
1828
1829                        /* Free all allocated memory. */
1830                        run_truncate(run, 0);
1831                } else {
1832                        u16 le_sz;
1833                        u16 roff = le16_to_cpu(attr->nres.run_off);
1834
1835                        run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1836                                      evcn1 - 1, svcn, Add2Ptr(attr, roff),
1837                                      le32_to_cpu(attr->size) - roff);
1838
1839                        /* Delete this attribute segment. */
1840                        mi_remove_attr(NULL, mi, attr);
1841                        if (!le)
1842                                break;
1843
1844                        le_sz = le16_to_cpu(le->size);
1845                        if (!al_remove_le(ni, le)) {
1846                                err = -EINVAL;
1847                                goto out;
1848                        }
1849
1850                        if (evcn1 >= alen)
1851                                break;
1852
1853                        if (!svcn) {
1854                                /* Load next record that contains this attribute. */
1855                                if (ni_load_mi(ni, le, &mi)) {
1856                                        err = -EINVAL;
1857                                        goto out;
1858                                }
1859
1860                                /* Look for required attribute. */
1861                                attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1862                                                    0, &le->id);
1863                                if (!attr) {
1864                                        err = -EINVAL;
1865                                        goto out;
1866                                }
1867                                goto next_attr;
1868                        }
1869                        le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1870                }
1871
1872                if (evcn1 >= alen)
1873                        break;
1874
1875                attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1876                if (!attr) {
1877                        err = -EINVAL;
1878                        goto out;
1879                }
1880
1881next_attr:
1882                svcn = le64_to_cpu(attr->nres.svcn);
1883                evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1884        }
1885
1886        if (!attr_b) {
1887                le_b = NULL;
1888                attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1889                                      &mi_b);
1890                if (!attr_b) {
1891                        err = -ENOENT;
1892                        goto out;
1893                }
1894        }
1895
1896        data_size -= bytes;
1897        valid_size = ni->i_valid;
1898        if (vbo + bytes <= valid_size)
1899                valid_size -= bytes;
1900        else if (vbo < valid_size)
1901                valid_size = vbo;
1902
1903        attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1904        attr_b->nres.data_size = cpu_to_le64(data_size);
1905        attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1906        total_size -= (u64)dealloc << sbi->cluster_bits;
1907        if (is_attr_ext(attr_b))
1908                attr_b->nres.total_size = cpu_to_le64(total_size);
1909        mi_b->dirty = true;
1910
1911        /* Update inode size. */
1912        ni->i_valid = valid_size;
1913        ni->vfs_inode.i_size = data_size;
1914        inode_set_bytes(&ni->vfs_inode, total_size);
1915        ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1916        mark_inode_dirty(&ni->vfs_inode);
1917
1918out:
1919        up_write(&ni->file.run_lock);
1920        if (err)
1921                make_bad_inode(&ni->vfs_inode);
1922
1923        return err;
1924}
1925
1926/*
1927 * attr_punch_hole
1928 *
1929 * Not for normal files.
1930 */
1931int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
1932{
1933        int err = 0;
1934        struct runs_tree *run = &ni->file.run;
1935        struct ntfs_sb_info *sbi = ni->mi.sbi;
1936        struct ATTRIB *attr = NULL, *attr_b;
1937        struct ATTR_LIST_ENTRY *le, *le_b;
1938        struct mft_inode *mi, *mi_b;
1939        CLST svcn, evcn1, vcn, len, end, alen, dealloc;
1940        u64 total_size, alloc_size;
1941        u32 mask;
1942
1943        if (!bytes)
1944                return 0;
1945
1946        le_b = NULL;
1947        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1948        if (!attr_b)
1949                return -ENOENT;
1950
1951        if (!attr_b->non_res) {
1952                u32 data_size = le32_to_cpu(attr->res.data_size);
1953                u32 from, to;
1954
1955                if (vbo > data_size)
1956                        return 0;
1957
1958                from = vbo;
1959                to = min_t(u64, vbo + bytes, data_size);
1960                memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
1961                return 0;
1962        }
1963
1964        if (!is_attr_ext(attr_b))
1965                return -EOPNOTSUPP;
1966
1967        alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1968        total_size = le64_to_cpu(attr_b->nres.total_size);
1969
1970        if (vbo >= alloc_size) {
1971                /* NOTE: It is allowed. */
1972                return 0;
1973        }
1974
1975        mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1976
1977        bytes += vbo;
1978        if (bytes > alloc_size)
1979                bytes = alloc_size;
1980        bytes -= vbo;
1981
1982        if ((vbo & mask) || (bytes & mask)) {
1983                /* We have to zero a range(s). */
1984                if (frame_size == NULL) {
1985                        /* Caller insists range is aligned. */
1986                        return -EINVAL;
1987                }
1988                *frame_size = mask + 1;
1989                return E_NTFS_NOTALIGNED;
1990        }
1991
1992        down_write(&ni->file.run_lock);
1993        /*
1994         * Enumerate all attribute segments and punch hole where necessary.
1995         */
1996        alen = alloc_size >> sbi->cluster_bits;
1997        vcn = vbo >> sbi->cluster_bits;
1998        len = bytes >> sbi->cluster_bits;
1999        end = vcn + len;
2000        dealloc = 0;
2001
2002        svcn = le64_to_cpu(attr_b->nres.svcn);
2003        evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2004
2005        if (svcn <= vcn && vcn < evcn1) {
2006                attr = attr_b;
2007                le = le_b;
2008                mi = mi_b;
2009        } else if (!le_b) {
2010                err = -EINVAL;
2011                goto out;
2012        } else {
2013                le = le_b;
2014                attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2015                                    &mi);
2016                if (!attr) {
2017                        err = -EINVAL;
2018                        goto out;
2019                }
2020
2021                svcn = le64_to_cpu(attr->nres.svcn);
2022                evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2023        }
2024
2025        while (svcn < end) {
2026                CLST vcn1, zero, dealloc2;
2027
2028                err = attr_load_runs(attr, ni, run, &svcn);
2029                if (err)
2030                        goto out;
2031                vcn1 = max(vcn, svcn);
2032                zero = min(end, evcn1) - vcn1;
2033
2034                dealloc2 = dealloc;
2035                err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
2036                if (err)
2037                        goto out;
2038
2039                if (dealloc2 == dealloc) {
2040                        /* Looks like the required range is already sparsed. */
2041                } else {
2042                        if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
2043                                           false)) {
2044                                err = -ENOMEM;
2045                                goto out;
2046                        }
2047
2048                        err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2049                        if (err)
2050                                goto out;
2051                }
2052                /* Free all allocated memory. */
2053                run_truncate(run, 0);
2054
2055                if (evcn1 >= alen)
2056                        break;
2057
2058                attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2059                if (!attr) {
2060                        err = -EINVAL;
2061                        goto out;
2062                }
2063
2064                svcn = le64_to_cpu(attr->nres.svcn);
2065                evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2066        }
2067
2068        total_size -= (u64)dealloc << sbi->cluster_bits;
2069        attr_b->nres.total_size = cpu_to_le64(total_size);
2070        mi_b->dirty = true;
2071
2072        /* Update inode size. */
2073        inode_set_bytes(&ni->vfs_inode, total_size);
2074        ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2075        mark_inode_dirty(&ni->vfs_inode);
2076
2077out:
2078        up_write(&ni->file.run_lock);
2079        if (err)
2080                make_bad_inode(&ni->vfs_inode);
2081
2082        return err;
2083}
2084