linux/fs/erofs/zmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2018-2019 HUAWEI, Inc.
   4 *             https://www.huawei.com/
   5 */
   6#include "internal.h"
   7#include <asm/unaligned.h>
   8#include <trace/events/erofs.h>
   9
  10int z_erofs_fill_inode(struct inode *inode)
  11{
  12        struct erofs_inode *const vi = EROFS_I(inode);
  13        struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
  14
  15        if (!erofs_sb_has_big_pcluster(sbi) &&
  16            vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
  17                vi->z_advise = 0;
  18                vi->z_algorithmtype[0] = 0;
  19                vi->z_algorithmtype[1] = 0;
  20                vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
  21                set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
  22        }
  23        inode->i_mapping->a_ops = &z_erofs_aops;
  24        return 0;
  25}
  26
  27static int z_erofs_fill_inode_lazy(struct inode *inode)
  28{
  29        struct erofs_inode *const vi = EROFS_I(inode);
  30        struct super_block *const sb = inode->i_sb;
  31        int err;
  32        erofs_off_t pos;
  33        struct page *page;
  34        void *kaddr;
  35        struct z_erofs_map_header *h;
  36
  37        if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
  38                /*
  39                 * paired with smp_mb() at the end of the function to ensure
  40                 * fields will only be observed after the bit is set.
  41                 */
  42                smp_mb();
  43                return 0;
  44        }
  45
  46        if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
  47                return -ERESTARTSYS;
  48
  49        err = 0;
  50        if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
  51                goto out_unlock;
  52
  53        DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
  54                  vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
  55
  56        pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
  57                    vi->xattr_isize, 8);
  58        page = erofs_get_meta_page(sb, erofs_blknr(pos));
  59        if (IS_ERR(page)) {
  60                err = PTR_ERR(page);
  61                goto out_unlock;
  62        }
  63
  64        kaddr = kmap_atomic(page);
  65
  66        h = kaddr + erofs_blkoff(pos);
  67        vi->z_advise = le16_to_cpu(h->h_advise);
  68        vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
  69        vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
  70
  71        if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
  72                erofs_err(sb, "unknown compression format %u for nid %llu, please upgrade kernel",
  73                          vi->z_algorithmtype[0], vi->nid);
  74                err = -EOPNOTSUPP;
  75                goto unmap_done;
  76        }
  77
  78        vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
  79        if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
  80            vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
  81                            Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
  82                erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
  83                          vi->nid);
  84                err = -EFSCORRUPTED;
  85                goto unmap_done;
  86        }
  87        if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
  88            !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
  89            !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
  90                erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
  91                          vi->nid);
  92                err = -EFSCORRUPTED;
  93                goto unmap_done;
  94        }
  95        /* paired with smp_mb() at the beginning of the function */
  96        smp_mb();
  97        set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
  98unmap_done:
  99        kunmap_atomic(kaddr);
 100        unlock_page(page);
 101        put_page(page);
 102out_unlock:
 103        clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
 104        return err;
 105}
 106
 107struct z_erofs_maprecorder {
 108        struct inode *inode;
 109        struct erofs_map_blocks *map;
 110        void *kaddr;
 111
 112        unsigned long lcn;
 113        /* compression extent information gathered */
 114        u8  type;
 115        u16 clusterofs;
 116        u16 delta[2];
 117        erofs_blk_t pblk, compressedlcs;
 118};
 119
 120static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
 121                                  erofs_blk_t eblk)
 122{
 123        struct super_block *const sb = m->inode->i_sb;
 124        struct erofs_map_blocks *const map = m->map;
 125        struct page *mpage = map->mpage;
 126
 127        if (mpage) {
 128                if (mpage->index == eblk) {
 129                        if (!m->kaddr)
 130                                m->kaddr = kmap_atomic(mpage);
 131                        return 0;
 132                }
 133
 134                if (m->kaddr) {
 135                        kunmap_atomic(m->kaddr);
 136                        m->kaddr = NULL;
 137                }
 138                put_page(mpage);
 139        }
 140
 141        mpage = erofs_get_meta_page(sb, eblk);
 142        if (IS_ERR(mpage)) {
 143                map->mpage = NULL;
 144                return PTR_ERR(mpage);
 145        }
 146        m->kaddr = kmap_atomic(mpage);
 147        unlock_page(mpage);
 148        map->mpage = mpage;
 149        return 0;
 150}
 151
 152static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
 153                                         unsigned long lcn)
 154{
 155        struct inode *const inode = m->inode;
 156        struct erofs_inode *const vi = EROFS_I(inode);
 157        const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
 158        const erofs_off_t pos =
 159                Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
 160                                               vi->xattr_isize) +
 161                lcn * sizeof(struct z_erofs_vle_decompressed_index);
 162        struct z_erofs_vle_decompressed_index *di;
 163        unsigned int advise, type;
 164        int err;
 165
 166        err = z_erofs_reload_indexes(m, erofs_blknr(pos));
 167        if (err)
 168                return err;
 169
 170        m->lcn = lcn;
 171        di = m->kaddr + erofs_blkoff(pos);
 172
 173        advise = le16_to_cpu(di->di_advise);
 174        type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
 175                ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
 176        switch (type) {
 177        case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
 178                m->clusterofs = 1 << vi->z_logical_clusterbits;
 179                m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
 180                if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
 181                        if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
 182                                DBG_BUGON(1);
 183                                return -EFSCORRUPTED;
 184                        }
 185                        m->compressedlcs = m->delta[0] &
 186                                ~Z_EROFS_VLE_DI_D0_CBLKCNT;
 187                        m->delta[0] = 1;
 188                }
 189                m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
 190                break;
 191        case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
 192        case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
 193                m->clusterofs = le16_to_cpu(di->di_clusterofs);
 194                m->pblk = le32_to_cpu(di->di_u.blkaddr);
 195                break;
 196        default:
 197                DBG_BUGON(1);
 198                return -EOPNOTSUPP;
 199        }
 200        m->type = type;
 201        return 0;
 202}
 203
 204static unsigned int decode_compactedbits(unsigned int lobits,
 205                                         unsigned int lomask,
 206                                         u8 *in, unsigned int pos, u8 *type)
 207{
 208        const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
 209        const unsigned int lo = v & lomask;
 210
 211        *type = (v >> lobits) & 3;
 212        return lo;
 213}
 214
 215static int get_compacted_la_distance(unsigned int lclusterbits,
 216                                     unsigned int encodebits,
 217                                     unsigned int vcnt, u8 *in, int i)
 218{
 219        const unsigned int lomask = (1 << lclusterbits) - 1;
 220        unsigned int lo, d1 = 0;
 221        u8 type;
 222
 223        DBG_BUGON(i >= vcnt);
 224
 225        do {
 226                lo = decode_compactedbits(lclusterbits, lomask,
 227                                          in, encodebits * i, &type);
 228
 229                if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
 230                        return d1;
 231                ++d1;
 232        } while (++i < vcnt);
 233
 234        /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
 235        if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
 236                d1 += lo - 1;
 237        return d1;
 238}
 239
 240static int unpack_compacted_index(struct z_erofs_maprecorder *m,
 241                                  unsigned int amortizedshift,
 242                                  unsigned int eofs, bool lookahead)
 243{
 244        struct erofs_inode *const vi = EROFS_I(m->inode);
 245        const unsigned int lclusterbits = vi->z_logical_clusterbits;
 246        const unsigned int lomask = (1 << lclusterbits) - 1;
 247        unsigned int vcnt, base, lo, encodebits, nblk;
 248        int i;
 249        u8 *in, type;
 250        bool big_pcluster;
 251
 252        if (1 << amortizedshift == 4)
 253                vcnt = 2;
 254        else if (1 << amortizedshift == 2 && lclusterbits == 12)
 255                vcnt = 16;
 256        else
 257                return -EOPNOTSUPP;
 258
 259        big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
 260        encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
 261        base = round_down(eofs, vcnt << amortizedshift);
 262        in = m->kaddr + base;
 263
 264        i = (eofs - base) >> amortizedshift;
 265
 266        lo = decode_compactedbits(lclusterbits, lomask,
 267                                  in, encodebits * i, &type);
 268        m->type = type;
 269        if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
 270                m->clusterofs = 1 << lclusterbits;
 271
 272                /* figure out lookahead_distance: delta[1] if needed */
 273                if (lookahead)
 274                        m->delta[1] = get_compacted_la_distance(lclusterbits,
 275                                                encodebits, vcnt, in, i);
 276                if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
 277                        if (!big_pcluster) {
 278                                DBG_BUGON(1);
 279                                return -EFSCORRUPTED;
 280                        }
 281                        m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
 282                        m->delta[0] = 1;
 283                        return 0;
 284                } else if (i + 1 != (int)vcnt) {
 285                        m->delta[0] = lo;
 286                        return 0;
 287                }
 288                /*
 289                 * since the last lcluster in the pack is special,
 290                 * of which lo saves delta[1] rather than delta[0].
 291                 * Hence, get delta[0] by the previous lcluster indirectly.
 292                 */
 293                lo = decode_compactedbits(lclusterbits, lomask,
 294                                          in, encodebits * (i - 1), &type);
 295                if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
 296                        lo = 0;
 297                else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
 298                        lo = 1;
 299                m->delta[0] = lo + 1;
 300                return 0;
 301        }
 302        m->clusterofs = lo;
 303        m->delta[0] = 0;
 304        /* figout out blkaddr (pblk) for HEAD lclusters */
 305        if (!big_pcluster) {
 306                nblk = 1;
 307                while (i > 0) {
 308                        --i;
 309                        lo = decode_compactedbits(lclusterbits, lomask,
 310                                                  in, encodebits * i, &type);
 311                        if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
 312                                i -= lo;
 313
 314                        if (i >= 0)
 315                                ++nblk;
 316                }
 317        } else {
 318                nblk = 0;
 319                while (i > 0) {
 320                        --i;
 321                        lo = decode_compactedbits(lclusterbits, lomask,
 322                                                  in, encodebits * i, &type);
 323                        if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
 324                                if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
 325                                        --i;
 326                                        nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
 327                                        continue;
 328                                }
 329                                /* bigpcluster shouldn't have plain d0 == 1 */
 330                                if (lo <= 1) {
 331                                        DBG_BUGON(1);
 332                                        return -EFSCORRUPTED;
 333                                }
 334                                i -= lo - 2;
 335                                continue;
 336                        }
 337                        ++nblk;
 338                }
 339        }
 340        in += (vcnt << amortizedshift) - sizeof(__le32);
 341        m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
 342        return 0;
 343}
 344
 345static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
 346                                            unsigned long lcn, bool lookahead)
 347{
 348        struct inode *const inode = m->inode;
 349        struct erofs_inode *const vi = EROFS_I(inode);
 350        const unsigned int lclusterbits = vi->z_logical_clusterbits;
 351        const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
 352                                        vi->inode_isize + vi->xattr_isize, 8) +
 353                sizeof(struct z_erofs_map_header);
 354        const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
 355        unsigned int compacted_4b_initial, compacted_2b;
 356        unsigned int amortizedshift;
 357        erofs_off_t pos;
 358        int err;
 359
 360        if (lclusterbits != 12)
 361                return -EOPNOTSUPP;
 362
 363        if (lcn >= totalidx)
 364                return -EINVAL;
 365
 366        m->lcn = lcn;
 367        /* used to align to 32-byte (compacted_2b) alignment */
 368        compacted_4b_initial = (32 - ebase % 32) / 4;
 369        if (compacted_4b_initial == 32 / 4)
 370                compacted_4b_initial = 0;
 371
 372        if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
 373            compacted_4b_initial < totalidx)
 374                compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
 375        else
 376                compacted_2b = 0;
 377
 378        pos = ebase;
 379        if (lcn < compacted_4b_initial) {
 380                amortizedshift = 2;
 381                goto out;
 382        }
 383        pos += compacted_4b_initial * 4;
 384        lcn -= compacted_4b_initial;
 385
 386        if (lcn < compacted_2b) {
 387                amortizedshift = 1;
 388                goto out;
 389        }
 390        pos += compacted_2b * 2;
 391        lcn -= compacted_2b;
 392        amortizedshift = 2;
 393out:
 394        pos += lcn * (1 << amortizedshift);
 395        err = z_erofs_reload_indexes(m, erofs_blknr(pos));
 396        if (err)
 397                return err;
 398        return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos),
 399                                      lookahead);
 400}
 401
 402static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
 403                                          unsigned int lcn, bool lookahead)
 404{
 405        const unsigned int datamode = EROFS_I(m->inode)->datalayout;
 406
 407        if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
 408                return legacy_load_cluster_from_disk(m, lcn);
 409
 410        if (datamode == EROFS_INODE_FLAT_COMPRESSION)
 411                return compacted_load_cluster_from_disk(m, lcn, lookahead);
 412
 413        return -EINVAL;
 414}
 415
 416static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
 417                                   unsigned int lookback_distance)
 418{
 419        struct erofs_inode *const vi = EROFS_I(m->inode);
 420        struct erofs_map_blocks *const map = m->map;
 421        const unsigned int lclusterbits = vi->z_logical_clusterbits;
 422        unsigned long lcn = m->lcn;
 423        int err;
 424
 425        if (lcn < lookback_distance) {
 426                erofs_err(m->inode->i_sb,
 427                          "bogus lookback distance @ nid %llu", vi->nid);
 428                DBG_BUGON(1);
 429                return -EFSCORRUPTED;
 430        }
 431
 432        /* load extent head logical cluster if needed */
 433        lcn -= lookback_distance;
 434        err = z_erofs_load_cluster_from_disk(m, lcn, false);
 435        if (err)
 436                return err;
 437
 438        switch (m->type) {
 439        case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
 440                if (!m->delta[0]) {
 441                        erofs_err(m->inode->i_sb,
 442                                  "invalid lookback distance 0 @ nid %llu",
 443                                  vi->nid);
 444                        DBG_BUGON(1);
 445                        return -EFSCORRUPTED;
 446                }
 447                return z_erofs_extent_lookback(m, m->delta[0]);
 448        case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
 449                map->m_flags &= ~EROFS_MAP_ZIPPED;
 450                fallthrough;
 451        case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
 452                map->m_la = (lcn << lclusterbits) | m->clusterofs;
 453                break;
 454        default:
 455                erofs_err(m->inode->i_sb,
 456                          "unknown type %u @ lcn %lu of nid %llu",
 457                          m->type, lcn, vi->nid);
 458                DBG_BUGON(1);
 459                return -EOPNOTSUPP;
 460        }
 461        return 0;
 462}
 463
 464static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
 465                                            unsigned int initial_lcn)
 466{
 467        struct erofs_inode *const vi = EROFS_I(m->inode);
 468        struct erofs_map_blocks *const map = m->map;
 469        const unsigned int lclusterbits = vi->z_logical_clusterbits;
 470        unsigned long lcn;
 471        int err;
 472
 473        DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
 474                  m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD);
 475        if (!(map->m_flags & EROFS_MAP_ZIPPED) ||
 476            !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
 477                map->m_plen = 1 << lclusterbits;
 478                return 0;
 479        }
 480
 481        lcn = m->lcn + 1;
 482        if (m->compressedlcs)
 483                goto out;
 484
 485        err = z_erofs_load_cluster_from_disk(m, lcn, false);
 486        if (err)
 487                return err;
 488
 489        /*
 490         * If the 1st NONHEAD lcluster has already been handled initially w/o
 491         * valid compressedlcs, which means at least it mustn't be CBLKCNT, or
 492         * an internal implemenatation error is detected.
 493         *
 494         * The following code can also handle it properly anyway, but let's
 495         * BUG_ON in the debugging mode only for developers to notice that.
 496         */
 497        DBG_BUGON(lcn == initial_lcn &&
 498                  m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
 499
 500        switch (m->type) {
 501        case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
 502        case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
 503                /*
 504                 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
 505                 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
 506                 */
 507                m->compressedlcs = 1;
 508                break;
 509        case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
 510                if (m->delta[0] != 1)
 511                        goto err_bonus_cblkcnt;
 512                if (m->compressedlcs)
 513                        break;
 514                fallthrough;
 515        default:
 516                erofs_err(m->inode->i_sb,
 517                          "cannot found CBLKCNT @ lcn %lu of nid %llu",
 518                          lcn, vi->nid);
 519                DBG_BUGON(1);
 520                return -EFSCORRUPTED;
 521        }
 522out:
 523        map->m_plen = m->compressedlcs << lclusterbits;
 524        return 0;
 525err_bonus_cblkcnt:
 526        erofs_err(m->inode->i_sb,
 527                  "bogus CBLKCNT @ lcn %lu of nid %llu",
 528                  lcn, vi->nid);
 529        DBG_BUGON(1);
 530        return -EFSCORRUPTED;
 531}
 532
 533static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
 534{
 535        struct inode *inode = m->inode;
 536        struct erofs_inode *vi = EROFS_I(inode);
 537        struct erofs_map_blocks *map = m->map;
 538        unsigned int lclusterbits = vi->z_logical_clusterbits;
 539        u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
 540        int err;
 541
 542        do {
 543                /* handle the last EOF pcluster (no next HEAD lcluster) */
 544                if ((lcn << lclusterbits) >= inode->i_size) {
 545                        map->m_llen = inode->i_size - map->m_la;
 546                        return 0;
 547                }
 548
 549                err = z_erofs_load_cluster_from_disk(m, lcn, true);
 550                if (err)
 551                        return err;
 552
 553                if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
 554                        DBG_BUGON(!m->delta[1] &&
 555                                  m->clusterofs != 1 << lclusterbits);
 556                } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
 557                           m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD) {
 558                        /* go on until the next HEAD lcluster */
 559                        if (lcn != headlcn)
 560                                break;
 561                        m->delta[1] = 1;
 562                } else {
 563                        erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
 564                                  m->type, lcn, vi->nid);
 565                        DBG_BUGON(1);
 566                        return -EOPNOTSUPP;
 567                }
 568                lcn += m->delta[1];
 569        } while (m->delta[1]);
 570
 571        map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
 572        return 0;
 573}
 574
 575int z_erofs_map_blocks_iter(struct inode *inode,
 576                            struct erofs_map_blocks *map,
 577                            int flags)
 578{
 579        struct erofs_inode *const vi = EROFS_I(inode);
 580        struct z_erofs_maprecorder m = {
 581                .inode = inode,
 582                .map = map,
 583        };
 584        int err = 0;
 585        unsigned int lclusterbits, endoff;
 586        unsigned long initial_lcn;
 587        unsigned long long ofs, end;
 588
 589        trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
 590
 591        /* when trying to read beyond EOF, leave it unmapped */
 592        if (map->m_la >= inode->i_size) {
 593                map->m_llen = map->m_la + 1 - inode->i_size;
 594                map->m_la = inode->i_size;
 595                map->m_flags = 0;
 596                goto out;
 597        }
 598
 599        err = z_erofs_fill_inode_lazy(inode);
 600        if (err)
 601                goto out;
 602
 603        lclusterbits = vi->z_logical_clusterbits;
 604        ofs = map->m_la;
 605        initial_lcn = ofs >> lclusterbits;
 606        endoff = ofs & ((1 << lclusterbits) - 1);
 607
 608        err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
 609        if (err)
 610                goto unmap_out;
 611
 612        map->m_flags = EROFS_MAP_ZIPPED;        /* by default, compressed */
 613        end = (m.lcn + 1ULL) << lclusterbits;
 614
 615        switch (m.type) {
 616        case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
 617                if (endoff >= m.clusterofs)
 618                        map->m_flags &= ~EROFS_MAP_ZIPPED;
 619                fallthrough;
 620        case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
 621                if (endoff >= m.clusterofs) {
 622                        map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
 623                        break;
 624                }
 625                /* m.lcn should be >= 1 if endoff < m.clusterofs */
 626                if (!m.lcn) {
 627                        erofs_err(inode->i_sb,
 628                                  "invalid logical cluster 0 at nid %llu",
 629                                  vi->nid);
 630                        err = -EFSCORRUPTED;
 631                        goto unmap_out;
 632                }
 633                end = (m.lcn << lclusterbits) | m.clusterofs;
 634                map->m_flags |= EROFS_MAP_FULL_MAPPED;
 635                m.delta[0] = 1;
 636                fallthrough;
 637        case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
 638                /* get the corresponding first chunk */
 639                err = z_erofs_extent_lookback(&m, m.delta[0]);
 640                if (err)
 641                        goto unmap_out;
 642                break;
 643        default:
 644                erofs_err(inode->i_sb,
 645                          "unknown type %u @ offset %llu of nid %llu",
 646                          m.type, ofs, vi->nid);
 647                err = -EOPNOTSUPP;
 648                goto unmap_out;
 649        }
 650
 651        map->m_llen = end - map->m_la;
 652        map->m_pa = blknr_to_addr(m.pblk);
 653        map->m_flags |= EROFS_MAP_MAPPED;
 654
 655        err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
 656        if (err)
 657                goto out;
 658
 659        if (flags & EROFS_GET_BLOCKS_FIEMAP) {
 660                err = z_erofs_get_extent_decompressedlen(&m);
 661                if (!err)
 662                        map->m_flags |= EROFS_MAP_FULL_MAPPED;
 663        }
 664unmap_out:
 665        if (m.kaddr)
 666                kunmap_atomic(m.kaddr);
 667
 668out:
 669        erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
 670                  __func__, map->m_la, map->m_pa,
 671                  map->m_llen, map->m_plen, map->m_flags);
 672
 673        trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
 674
 675        /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
 676        DBG_BUGON(err < 0 && err != -ENOMEM);
 677        return err;
 678}
 679
 680static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
 681                                loff_t length, unsigned int flags,
 682                                struct iomap *iomap, struct iomap *srcmap)
 683{
 684        int ret;
 685        struct erofs_map_blocks map = { .m_la = offset };
 686
 687        ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
 688        if (map.mpage)
 689                put_page(map.mpage);
 690        if (ret < 0)
 691                return ret;
 692
 693        iomap->bdev = inode->i_sb->s_bdev;
 694        iomap->offset = map.m_la;
 695        iomap->length = map.m_llen;
 696        if (map.m_flags & EROFS_MAP_MAPPED) {
 697                iomap->type = IOMAP_MAPPED;
 698                iomap->addr = map.m_pa;
 699        } else {
 700                iomap->type = IOMAP_HOLE;
 701                iomap->addr = IOMAP_NULL_ADDR;
 702                /*
 703                 * No strict rule how to describe extents for post EOF, yet
 704                 * we need do like below. Otherwise, iomap itself will get
 705                 * into an endless loop on post EOF.
 706                 */
 707                if (iomap->offset >= inode->i_size)
 708                        iomap->length = length + map.m_la - offset;
 709        }
 710        iomap->flags = 0;
 711        return 0;
 712}
 713
 714const struct iomap_ops z_erofs_iomap_report_ops = {
 715        .iomap_begin = z_erofs_iomap_begin_report,
 716};
 717