linux/fs/erofs/zdata.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2018 HUAWEI, Inc.
   4 *             https://www.huawei.com/
   5 * Created by Gao Xiang <gaoxiang25@huawei.com>
   6 */
   7#include "zdata.h"
   8#include "compress.h"
   9#include <linux/prefetch.h>
  10
  11#include <trace/events/erofs.h>
  12
  13/*
  14 * a compressed_pages[] placeholder in order to avoid
  15 * being filled with file pages for in-place decompression.
  16 */
  17#define PAGE_UNALLOCATED     ((void *)0x5F0E4B1D)
  18
  19/* how to allocate cached pages for a pcluster */
  20enum z_erofs_cache_alloctype {
  21        DONTALLOC,      /* don't allocate any cached pages */
  22        DELAYEDALLOC,   /* delayed allocation (at the time of submitting io) */
  23};
  24
  25/*
  26 * tagged pointer with 1-bit tag for all compressed pages
  27 * tag 0 - the page is just found with an extra page reference
  28 */
  29typedef tagptr1_t compressed_page_t;
  30
  31#define tag_compressed_page_justfound(page) \
  32        tagptr_fold(compressed_page_t, page, 1)
  33
  34static struct workqueue_struct *z_erofs_workqueue __read_mostly;
  35static struct kmem_cache *pcluster_cachep __read_mostly;
  36
  37void z_erofs_exit_zip_subsystem(void)
  38{
  39        destroy_workqueue(z_erofs_workqueue);
  40        kmem_cache_destroy(pcluster_cachep);
  41}
  42
  43static inline int z_erofs_init_workqueue(void)
  44{
  45        const unsigned int onlinecpus = num_possible_cpus();
  46
  47        /*
  48         * no need to spawn too many threads, limiting threads could minimum
  49         * scheduling overhead, perhaps per-CPU threads should be better?
  50         */
  51        z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
  52                                            WQ_UNBOUND | WQ_HIGHPRI,
  53                                            onlinecpus + onlinecpus / 4);
  54        return z_erofs_workqueue ? 0 : -ENOMEM;
  55}
  56
  57static void z_erofs_pcluster_init_once(void *ptr)
  58{
  59        struct z_erofs_pcluster *pcl = ptr;
  60        struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
  61        unsigned int i;
  62
  63        mutex_init(&cl->lock);
  64        cl->nr_pages = 0;
  65        cl->vcnt = 0;
  66        for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
  67                pcl->compressed_pages[i] = NULL;
  68}
  69
  70int __init z_erofs_init_zip_subsystem(void)
  71{
  72        pcluster_cachep = kmem_cache_create("erofs_compress",
  73                                            Z_EROFS_WORKGROUP_SIZE, 0,
  74                                            SLAB_RECLAIM_ACCOUNT,
  75                                            z_erofs_pcluster_init_once);
  76        if (pcluster_cachep) {
  77                if (!z_erofs_init_workqueue())
  78                        return 0;
  79
  80                kmem_cache_destroy(pcluster_cachep);
  81        }
  82        return -ENOMEM;
  83}
  84
  85enum z_erofs_collectmode {
  86        COLLECT_SECONDARY,
  87        COLLECT_PRIMARY,
  88        /*
  89         * The current collection was the tail of an exist chain, in addition
  90         * that the previous processed chained collections are all decided to
  91         * be hooked up to it.
  92         * A new chain will be created for the remaining collections which are
  93         * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
  94         * the next collection cannot reuse the whole page safely in
  95         * the following scenario:
  96         *  ________________________________________________________________
  97         * |      tail (partial) page     |       head (partial) page       |
  98         * |   (belongs to the next cl)   |   (belongs to the current cl)   |
  99         * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
 100         */
 101        COLLECT_PRIMARY_HOOKED,
 102        COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
 103        /*
 104         * The current collection has been linked with the owned chain, and
 105         * could also be linked with the remaining collections, which means
 106         * if the processing page is the tail page of the collection, thus
 107         * the current collection can safely use the whole page (since
 108         * the previous collection is under control) for in-place I/O, as
 109         * illustrated below:
 110         *  ________________________________________________________________
 111         * |  tail (partial) page |          head (partial) page           |
 112         * |  (of the current cl) |      (of the previous collection)      |
 113         * |  PRIMARY_FOLLOWED or |                                        |
 114         * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
 115         *
 116         * [  (*) the above page can be used as inplace I/O.               ]
 117         */
 118        COLLECT_PRIMARY_FOLLOWED,
 119};
 120
 121struct z_erofs_collector {
 122        struct z_erofs_pagevec_ctor vector;
 123
 124        struct z_erofs_pcluster *pcl, *tailpcl;
 125        struct z_erofs_collection *cl;
 126        struct page **compressedpages;
 127        z_erofs_next_pcluster_t owned_head;
 128
 129        enum z_erofs_collectmode mode;
 130};
 131
 132struct z_erofs_decompress_frontend {
 133        struct inode *const inode;
 134
 135        struct z_erofs_collector clt;
 136        struct erofs_map_blocks map;
 137
 138        bool readahead;
 139        /* used for applying cache strategy on the fly */
 140        bool backmost;
 141        erofs_off_t headoffset;
 142};
 143
 144#define COLLECTOR_INIT() { \
 145        .owned_head = Z_EROFS_PCLUSTER_TAIL, \
 146        .mode = COLLECT_PRIMARY_FOLLOWED }
 147
 148#define DECOMPRESS_FRONTEND_INIT(__i) { \
 149        .inode = __i, .clt = COLLECTOR_INIT(), \
 150        .backmost = true, }
 151
 152static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
 153static DEFINE_MUTEX(z_pagemap_global_lock);
 154
 155static void preload_compressed_pages(struct z_erofs_collector *clt,
 156                                     struct address_space *mc,
 157                                     enum z_erofs_cache_alloctype type)
 158{
 159        const struct z_erofs_pcluster *pcl = clt->pcl;
 160        const unsigned int clusterpages = BIT(pcl->clusterbits);
 161        struct page **pages = clt->compressedpages;
 162        pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages);
 163        bool standalone = true;
 164
 165        if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
 166                return;
 167
 168        for (; pages < pcl->compressed_pages + clusterpages; ++pages) {
 169                struct page *page;
 170                compressed_page_t t;
 171
 172                /* the compressed page was loaded before */
 173                if (READ_ONCE(*pages))
 174                        continue;
 175
 176                page = find_get_page(mc, index);
 177
 178                if (page) {
 179                        t = tag_compressed_page_justfound(page);
 180                } else if (type == DELAYEDALLOC) {
 181                        t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
 182                } else {        /* DONTALLOC */
 183                        if (standalone)
 184                                clt->compressedpages = pages;
 185                        standalone = false;
 186                        continue;
 187                }
 188
 189                if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
 190                        continue;
 191
 192                if (page)
 193                        put_page(page);
 194        }
 195
 196        if (standalone)         /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */
 197                clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
 198}
 199
 200/* called by erofs_shrinker to get rid of all compressed_pages */
 201int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
 202                                       struct erofs_workgroup *grp)
 203{
 204        struct z_erofs_pcluster *const pcl =
 205                container_of(grp, struct z_erofs_pcluster, obj);
 206        struct address_space *const mapping = MNGD_MAPPING(sbi);
 207        const unsigned int clusterpages = BIT(pcl->clusterbits);
 208        int i;
 209
 210        /*
 211         * refcount of workgroup is now freezed as 1,
 212         * therefore no need to worry about available decompression users.
 213         */
 214        for (i = 0; i < clusterpages; ++i) {
 215                struct page *page = pcl->compressed_pages[i];
 216
 217                if (!page)
 218                        continue;
 219
 220                /* block other users from reclaiming or migrating the page */
 221                if (!trylock_page(page))
 222                        return -EBUSY;
 223
 224                if (page->mapping != mapping)
 225                        continue;
 226
 227                /* barrier is implied in the following 'unlock_page' */
 228                WRITE_ONCE(pcl->compressed_pages[i], NULL);
 229                set_page_private(page, 0);
 230                ClearPagePrivate(page);
 231
 232                unlock_page(page);
 233                put_page(page);
 234        }
 235        return 0;
 236}
 237
 238int erofs_try_to_free_cached_page(struct address_space *mapping,
 239                                  struct page *page)
 240{
 241        struct z_erofs_pcluster *const pcl = (void *)page_private(page);
 242        const unsigned int clusterpages = BIT(pcl->clusterbits);
 243        int ret = 0;    /* 0 - busy */
 244
 245        if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
 246                unsigned int i;
 247
 248                for (i = 0; i < clusterpages; ++i) {
 249                        if (pcl->compressed_pages[i] == page) {
 250                                WRITE_ONCE(pcl->compressed_pages[i], NULL);
 251                                ret = 1;
 252                                break;
 253                        }
 254                }
 255                erofs_workgroup_unfreeze(&pcl->obj, 1);
 256
 257                if (ret) {
 258                        ClearPagePrivate(page);
 259                        put_page(page);
 260                }
 261        }
 262        return ret;
 263}
 264
 265/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
 266static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
 267                                          struct page *page)
 268{
 269        struct z_erofs_pcluster *const pcl = clt->pcl;
 270        const unsigned int clusterpages = BIT(pcl->clusterbits);
 271
 272        while (clt->compressedpages < pcl->compressed_pages + clusterpages) {
 273                if (!cmpxchg(clt->compressedpages++, NULL, page))
 274                        return true;
 275        }
 276        return false;
 277}
 278
 279/* callers must be with collection lock held */
 280static int z_erofs_attach_page(struct z_erofs_collector *clt,
 281                               struct page *page,
 282                               enum z_erofs_page_type type)
 283{
 284        int ret;
 285        bool occupied;
 286
 287        /* give priority for inplaceio */
 288        if (clt->mode >= COLLECT_PRIMARY &&
 289            type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
 290            z_erofs_try_inplace_io(clt, page))
 291                return 0;
 292
 293        ret = z_erofs_pagevec_enqueue(&clt->vector,
 294                                      page, type, &occupied);
 295        clt->cl->vcnt += (unsigned int)ret;
 296
 297        return ret ? 0 : -EAGAIN;
 298}
 299
 300static enum z_erofs_collectmode
 301try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
 302                      z_erofs_next_pcluster_t *owned_head)
 303{
 304        /* let's claim these following types of pclusters */
 305retry:
 306        if (pcl->next == Z_EROFS_PCLUSTER_NIL) {
 307                /* type 1, nil pcluster */
 308                if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
 309                            *owned_head) != Z_EROFS_PCLUSTER_NIL)
 310                        goto retry;
 311
 312                *owned_head = &pcl->next;
 313                /* lucky, I am the followee :) */
 314                return COLLECT_PRIMARY_FOLLOWED;
 315        } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) {
 316                /*
 317                 * type 2, link to the end of a existing open chain,
 318                 * be careful that its submission itself is governed
 319                 * by the original owned chain.
 320                 */
 321                if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
 322                            *owned_head) != Z_EROFS_PCLUSTER_TAIL)
 323                        goto retry;
 324                *owned_head = Z_EROFS_PCLUSTER_TAIL;
 325                return COLLECT_PRIMARY_HOOKED;
 326        }
 327        return COLLECT_PRIMARY; /* :( better luck next time */
 328}
 329
 330static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
 331                                     struct inode *inode,
 332                                     struct erofs_map_blocks *map)
 333{
 334        struct z_erofs_pcluster *pcl = clt->pcl;
 335        struct z_erofs_collection *cl;
 336        unsigned int length;
 337
 338        /* to avoid unexpected loop formed by corrupted images */
 339        if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
 340                DBG_BUGON(1);
 341                return -EFSCORRUPTED;
 342        }
 343
 344        cl = z_erofs_primarycollection(pcl);
 345        if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
 346                DBG_BUGON(1);
 347                return -EFSCORRUPTED;
 348        }
 349
 350        length = READ_ONCE(pcl->length);
 351        if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
 352                if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
 353                        DBG_BUGON(1);
 354                        return -EFSCORRUPTED;
 355                }
 356        } else {
 357                unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
 358
 359                if (map->m_flags & EROFS_MAP_FULL_MAPPED)
 360                        llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
 361
 362                while (llen > length &&
 363                       length != cmpxchg_relaxed(&pcl->length, length, llen)) {
 364                        cpu_relax();
 365                        length = READ_ONCE(pcl->length);
 366                }
 367        }
 368        mutex_lock(&cl->lock);
 369        /* used to check tail merging loop due to corrupted images */
 370        if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
 371                clt->tailpcl = pcl;
 372        clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head);
 373        /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */
 374        if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
 375                clt->tailpcl = NULL;
 376        clt->cl = cl;
 377        return 0;
 378}
 379
 380static int z_erofs_register_collection(struct z_erofs_collector *clt,
 381                                       struct inode *inode,
 382                                       struct erofs_map_blocks *map)
 383{
 384        struct z_erofs_pcluster *pcl;
 385        struct z_erofs_collection *cl;
 386        struct erofs_workgroup *grp;
 387        int err;
 388
 389        /* no available workgroup, let's allocate one */
 390        pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
 391        if (!pcl)
 392                return -ENOMEM;
 393
 394        atomic_set(&pcl->obj.refcount, 1);
 395        pcl->obj.index = map->m_pa >> PAGE_SHIFT;
 396
 397        pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
 398                (map->m_flags & EROFS_MAP_FULL_MAPPED ?
 399                        Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
 400
 401        if (map->m_flags & EROFS_MAP_ZIPPED)
 402                pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4;
 403        else
 404                pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
 405
 406        pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0];
 407        pcl->clusterbits -= PAGE_SHIFT;
 408
 409        /* new pclusters should be claimed as type 1, primary and followed */
 410        pcl->next = clt->owned_head;
 411        clt->mode = COLLECT_PRIMARY_FOLLOWED;
 412
 413        cl = z_erofs_primarycollection(pcl);
 414
 415        /* must be cleaned before freeing to slab */
 416        DBG_BUGON(cl->nr_pages);
 417        DBG_BUGON(cl->vcnt);
 418
 419        cl->pageofs = map->m_la & ~PAGE_MASK;
 420
 421        /*
 422         * lock all primary followed works before visible to others
 423         * and mutex_trylock *never* fails for a new pcluster.
 424         */
 425        DBG_BUGON(!mutex_trylock(&cl->lock));
 426
 427        grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj);
 428        if (IS_ERR(grp)) {
 429                err = PTR_ERR(grp);
 430                goto err_out;
 431        }
 432
 433        if (grp != &pcl->obj) {
 434                clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
 435                err = -EEXIST;
 436                goto err_out;
 437        }
 438        /* used to check tail merging loop due to corrupted images */
 439        if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
 440                clt->tailpcl = pcl;
 441        clt->owned_head = &pcl->next;
 442        clt->pcl = pcl;
 443        clt->cl = cl;
 444        return 0;
 445
 446err_out:
 447        mutex_unlock(&cl->lock);
 448        kmem_cache_free(pcluster_cachep, pcl);
 449        return err;
 450}
 451
 452static int z_erofs_collector_begin(struct z_erofs_collector *clt,
 453                                   struct inode *inode,
 454                                   struct erofs_map_blocks *map)
 455{
 456        struct erofs_workgroup *grp;
 457        int ret;
 458
 459        DBG_BUGON(clt->cl);
 460
 461        /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
 462        DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
 463        DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
 464
 465        if (!PAGE_ALIGNED(map->m_pa)) {
 466                DBG_BUGON(1);
 467                return -EINVAL;
 468        }
 469
 470        grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
 471        if (grp) {
 472                clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
 473        } else {
 474                ret = z_erofs_register_collection(clt, inode, map);
 475
 476                if (!ret)
 477                        goto out;
 478                if (ret != -EEXIST)
 479                        return ret;
 480        }
 481
 482        ret = z_erofs_lookup_collection(clt, inode, map);
 483        if (ret) {
 484                erofs_workgroup_put(&clt->pcl->obj);
 485                return ret;
 486        }
 487
 488out:
 489        z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
 490                                  clt->cl->pagevec, clt->cl->vcnt);
 491
 492        clt->compressedpages = clt->pcl->compressed_pages;
 493        if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
 494                clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES;
 495        return 0;
 496}
 497
 498/*
 499 * keep in mind that no referenced pclusters will be freed
 500 * only after a RCU grace period.
 501 */
 502static void z_erofs_rcu_callback(struct rcu_head *head)
 503{
 504        struct z_erofs_collection *const cl =
 505                container_of(head, struct z_erofs_collection, rcu);
 506
 507        kmem_cache_free(pcluster_cachep,
 508                        container_of(cl, struct z_erofs_pcluster,
 509                                     primary_collection));
 510}
 511
 512void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
 513{
 514        struct z_erofs_pcluster *const pcl =
 515                container_of(grp, struct z_erofs_pcluster, obj);
 516        struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
 517
 518        call_rcu(&cl->rcu, z_erofs_rcu_callback);
 519}
 520
 521static void z_erofs_collection_put(struct z_erofs_collection *cl)
 522{
 523        struct z_erofs_pcluster *const pcl =
 524                container_of(cl, struct z_erofs_pcluster, primary_collection);
 525
 526        erofs_workgroup_put(&pcl->obj);
 527}
 528
 529static bool z_erofs_collector_end(struct z_erofs_collector *clt)
 530{
 531        struct z_erofs_collection *cl = clt->cl;
 532
 533        if (!cl)
 534                return false;
 535
 536        z_erofs_pagevec_ctor_exit(&clt->vector, false);
 537        mutex_unlock(&cl->lock);
 538
 539        /*
 540         * if all pending pages are added, don't hold its reference
 541         * any longer if the pcluster isn't hosted by ourselves.
 542         */
 543        if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
 544                z_erofs_collection_put(cl);
 545
 546        clt->cl = NULL;
 547        return true;
 548}
 549
 550static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
 551                                       unsigned int cachestrategy,
 552                                       erofs_off_t la)
 553{
 554        if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
 555                return false;
 556
 557        if (fe->backmost)
 558                return true;
 559
 560        return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
 561                la < fe->headoffset;
 562}
 563
 564static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
 565                                struct page *page)
 566{
 567        struct inode *const inode = fe->inode;
 568        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
 569        struct erofs_map_blocks *const map = &fe->map;
 570        struct z_erofs_collector *const clt = &fe->clt;
 571        const loff_t offset = page_offset(page);
 572        bool tight = true;
 573
 574        enum z_erofs_cache_alloctype cache_strategy;
 575        enum z_erofs_page_type page_type;
 576        unsigned int cur, end, spiltted, index;
 577        int err = 0;
 578
 579        /* register locked file pages as online pages in pack */
 580        z_erofs_onlinepage_init(page);
 581
 582        spiltted = 0;
 583        end = PAGE_SIZE;
 584repeat:
 585        cur = end - 1;
 586
 587        /* lucky, within the range of the current map_blocks */
 588        if (offset + cur >= map->m_la &&
 589            offset + cur < map->m_la + map->m_llen) {
 590                /* didn't get a valid collection previously (very rare) */
 591                if (!clt->cl)
 592                        goto restart_now;
 593                goto hitted;
 594        }
 595
 596        /* go ahead the next map_blocks */
 597        erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
 598
 599        if (z_erofs_collector_end(clt))
 600                fe->backmost = false;
 601
 602        map->m_la = offset + cur;
 603        map->m_llen = 0;
 604        err = z_erofs_map_blocks_iter(inode, map, 0);
 605        if (err)
 606                goto err_out;
 607
 608restart_now:
 609        if (!(map->m_flags & EROFS_MAP_MAPPED))
 610                goto hitted;
 611
 612        err = z_erofs_collector_begin(clt, inode, map);
 613        if (err)
 614                goto err_out;
 615
 616        /* preload all compressed pages (maybe downgrade role if necessary) */
 617        if (should_alloc_managed_pages(fe, sbi->ctx.cache_strategy, map->m_la))
 618                cache_strategy = DELAYEDALLOC;
 619        else
 620                cache_strategy = DONTALLOC;
 621
 622        preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy);
 623
 624hitted:
 625        /*
 626         * Ensure the current partial page belongs to this submit chain rather
 627         * than other concurrent submit chains or the noio(bypass) chain since
 628         * those chains are handled asynchronously thus the page cannot be used
 629         * for inplace I/O or pagevec (should be processed in strict order.)
 630         */
 631        tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
 632                  clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
 633
 634        cur = end - min_t(unsigned int, offset + end - map->m_la, end);
 635        if (!(map->m_flags & EROFS_MAP_MAPPED)) {
 636                zero_user_segment(page, cur, end);
 637                goto next_part;
 638        }
 639
 640        /* let's derive page type */
 641        page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
 642                (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
 643                        (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
 644                                Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
 645
 646        if (cur)
 647                tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
 648
 649retry:
 650        err = z_erofs_attach_page(clt, page, page_type);
 651        /* should allocate an additional staging page for pagevec */
 652        if (err == -EAGAIN) {
 653                struct page *const newpage =
 654                                alloc_page(GFP_NOFS | __GFP_NOFAIL);
 655
 656                newpage->mapping = Z_EROFS_MAPPING_STAGING;
 657                err = z_erofs_attach_page(clt, newpage,
 658                                          Z_EROFS_PAGE_TYPE_EXCLUSIVE);
 659                if (!err)
 660                        goto retry;
 661        }
 662
 663        if (err)
 664                goto err_out;
 665
 666        index = page->index - (map->m_la >> PAGE_SHIFT);
 667
 668        z_erofs_onlinepage_fixup(page, index, true);
 669
 670        /* bump up the number of spiltted parts of a page */
 671        ++spiltted;
 672        /* also update nr_pages */
 673        clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
 674next_part:
 675        /* can be used for verification */
 676        map->m_llen = offset + cur - map->m_la;
 677
 678        end = cur;
 679        if (end > 0)
 680                goto repeat;
 681
 682out:
 683        z_erofs_onlinepage_endio(page);
 684
 685        erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
 686                  __func__, page, spiltted, map->m_llen);
 687        return err;
 688
 689        /* if some error occurred while processing this page */
 690err_out:
 691        SetPageError(page);
 692        goto out;
 693}
 694
 695static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
 696                                       bool sync, int bios)
 697{
 698        /* wake up the caller thread for sync decompression */
 699        if (sync) {
 700                unsigned long flags;
 701
 702                spin_lock_irqsave(&io->u.wait.lock, flags);
 703                if (!atomic_add_return(bios, &io->pending_bios))
 704                        wake_up_locked(&io->u.wait);
 705                spin_unlock_irqrestore(&io->u.wait.lock, flags);
 706                return;
 707        }
 708
 709        if (!atomic_add_return(bios, &io->pending_bios))
 710                queue_work(z_erofs_workqueue, &io->u.work);
 711}
 712
 713static void z_erofs_decompressqueue_endio(struct bio *bio)
 714{
 715        tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
 716        struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
 717        blk_status_t err = bio->bi_status;
 718        struct bio_vec *bvec;
 719        struct bvec_iter_all iter_all;
 720
 721        bio_for_each_segment_all(bvec, bio, iter_all) {
 722                struct page *page = bvec->bv_page;
 723
 724                DBG_BUGON(PageUptodate(page));
 725                DBG_BUGON(!page->mapping);
 726
 727                if (err)
 728                        SetPageError(page);
 729
 730                if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
 731                        if (!err)
 732                                SetPageUptodate(page);
 733                        unlock_page(page);
 734                }
 735        }
 736        z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
 737        bio_put(bio);
 738}
 739
 740static int z_erofs_decompress_pcluster(struct super_block *sb,
 741                                       struct z_erofs_pcluster *pcl,
 742                                       struct list_head *pagepool)
 743{
 744        struct erofs_sb_info *const sbi = EROFS_SB(sb);
 745        const unsigned int clusterpages = BIT(pcl->clusterbits);
 746        struct z_erofs_pagevec_ctor ctor;
 747        unsigned int i, outputsize, llen, nr_pages;
 748        struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
 749        struct page **pages, **compressed_pages, *page;
 750
 751        enum z_erofs_page_type page_type;
 752        bool overlapped, partial;
 753        struct z_erofs_collection *cl;
 754        int err;
 755
 756        might_sleep();
 757        cl = z_erofs_primarycollection(pcl);
 758        DBG_BUGON(!READ_ONCE(cl->nr_pages));
 759
 760        mutex_lock(&cl->lock);
 761        nr_pages = cl->nr_pages;
 762
 763        if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
 764                pages = pages_onstack;
 765        } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
 766                   mutex_trylock(&z_pagemap_global_lock)) {
 767                pages = z_pagemap_global;
 768        } else {
 769                gfp_t gfp_flags = GFP_KERNEL;
 770
 771                if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
 772                        gfp_flags |= __GFP_NOFAIL;
 773
 774                pages = kvmalloc_array(nr_pages, sizeof(struct page *),
 775                                       gfp_flags);
 776
 777                /* fallback to global pagemap for the lowmem scenario */
 778                if (!pages) {
 779                        mutex_lock(&z_pagemap_global_lock);
 780                        pages = z_pagemap_global;
 781                }
 782        }
 783
 784        for (i = 0; i < nr_pages; ++i)
 785                pages[i] = NULL;
 786
 787        err = 0;
 788        z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
 789                                  cl->pagevec, 0);
 790
 791        for (i = 0; i < cl->vcnt; ++i) {
 792                unsigned int pagenr;
 793
 794                page = z_erofs_pagevec_dequeue(&ctor, &page_type);
 795
 796                /* all pages in pagevec ought to be valid */
 797                DBG_BUGON(!page);
 798                DBG_BUGON(!page->mapping);
 799
 800                if (z_erofs_put_stagingpage(pagepool, page))
 801                        continue;
 802
 803                if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
 804                        pagenr = 0;
 805                else
 806                        pagenr = z_erofs_onlinepage_index(page);
 807
 808                DBG_BUGON(pagenr >= nr_pages);
 809
 810                /*
 811                 * currently EROFS doesn't support multiref(dedup),
 812                 * so here erroring out one multiref page.
 813                 */
 814                if (pages[pagenr]) {
 815                        DBG_BUGON(1);
 816                        SetPageError(pages[pagenr]);
 817                        z_erofs_onlinepage_endio(pages[pagenr]);
 818                        err = -EFSCORRUPTED;
 819                }
 820                pages[pagenr] = page;
 821        }
 822        z_erofs_pagevec_ctor_exit(&ctor, true);
 823
 824        overlapped = false;
 825        compressed_pages = pcl->compressed_pages;
 826
 827        for (i = 0; i < clusterpages; ++i) {
 828                unsigned int pagenr;
 829
 830                page = compressed_pages[i];
 831
 832                /* all compressed pages ought to be valid */
 833                DBG_BUGON(!page);
 834                DBG_BUGON(!page->mapping);
 835
 836                if (!z_erofs_page_is_staging(page)) {
 837                        if (erofs_page_is_managed(sbi, page)) {
 838                                if (!PageUptodate(page))
 839                                        err = -EIO;
 840                                continue;
 841                        }
 842
 843                        /*
 844                         * only if non-head page can be selected
 845                         * for inplace decompression
 846                         */
 847                        pagenr = z_erofs_onlinepage_index(page);
 848
 849                        DBG_BUGON(pagenr >= nr_pages);
 850                        if (pages[pagenr]) {
 851                                DBG_BUGON(1);
 852                                SetPageError(pages[pagenr]);
 853                                z_erofs_onlinepage_endio(pages[pagenr]);
 854                                err = -EFSCORRUPTED;
 855                        }
 856                        pages[pagenr] = page;
 857
 858                        overlapped = true;
 859                }
 860
 861                /* PG_error needs checking for inplaced and staging pages */
 862                if (PageError(page)) {
 863                        DBG_BUGON(PageUptodate(page));
 864                        err = -EIO;
 865                }
 866        }
 867
 868        if (err)
 869                goto out;
 870
 871        llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
 872        if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
 873                outputsize = llen;
 874                partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
 875        } else {
 876                outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
 877                partial = true;
 878        }
 879
 880        err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
 881                                        .sb = sb,
 882                                        .in = compressed_pages,
 883                                        .out = pages,
 884                                        .pageofs_out = cl->pageofs,
 885                                        .inputsize = PAGE_SIZE,
 886                                        .outputsize = outputsize,
 887                                        .alg = pcl->algorithmformat,
 888                                        .inplace_io = overlapped,
 889                                        .partial_decoding = partial
 890                                 }, pagepool);
 891
 892out:
 893        /* must handle all compressed pages before endding pages */
 894        for (i = 0; i < clusterpages; ++i) {
 895                page = compressed_pages[i];
 896
 897                if (erofs_page_is_managed(sbi, page))
 898                        continue;
 899
 900                /* recycle all individual staging pages */
 901                (void)z_erofs_put_stagingpage(pagepool, page);
 902
 903                WRITE_ONCE(compressed_pages[i], NULL);
 904        }
 905
 906        for (i = 0; i < nr_pages; ++i) {
 907                page = pages[i];
 908                if (!page)
 909                        continue;
 910
 911                DBG_BUGON(!page->mapping);
 912
 913                /* recycle all individual staging pages */
 914                if (z_erofs_put_stagingpage(pagepool, page))
 915                        continue;
 916
 917                if (err < 0)
 918                        SetPageError(page);
 919
 920                z_erofs_onlinepage_endio(page);
 921        }
 922
 923        if (pages == z_pagemap_global)
 924                mutex_unlock(&z_pagemap_global_lock);
 925        else if (pages != pages_onstack)
 926                kvfree(pages);
 927
 928        cl->nr_pages = 0;
 929        cl->vcnt = 0;
 930
 931        /* all cl locks MUST be taken before the following line */
 932        WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
 933
 934        /* all cl locks SHOULD be released right now */
 935        mutex_unlock(&cl->lock);
 936
 937        z_erofs_collection_put(cl);
 938        return err;
 939}
 940
 941static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
 942                                     struct list_head *pagepool)
 943{
 944        z_erofs_next_pcluster_t owned = io->head;
 945
 946        while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
 947                struct z_erofs_pcluster *pcl;
 948
 949                /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
 950                DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
 951
 952                /* no possible that 'owned' equals NULL */
 953                DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
 954
 955                pcl = container_of(owned, struct z_erofs_pcluster, next);
 956                owned = READ_ONCE(pcl->next);
 957
 958                z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
 959        }
 960}
 961
 962static void z_erofs_decompressqueue_work(struct work_struct *work)
 963{
 964        struct z_erofs_decompressqueue *bgq =
 965                container_of(work, struct z_erofs_decompressqueue, u.work);
 966        LIST_HEAD(pagepool);
 967
 968        DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
 969        z_erofs_decompress_queue(bgq, &pagepool);
 970
 971        put_pages_list(&pagepool);
 972        kvfree(bgq);
 973}
 974
 975static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
 976                                               unsigned int nr,
 977                                               struct list_head *pagepool,
 978                                               struct address_space *mc,
 979                                               gfp_t gfp)
 980{
 981        const pgoff_t index = pcl->obj.index;
 982        bool tocache = false;
 983
 984        struct address_space *mapping;
 985        struct page *oldpage, *page;
 986
 987        compressed_page_t t;
 988        int justfound;
 989
 990repeat:
 991        page = READ_ONCE(pcl->compressed_pages[nr]);
 992        oldpage = page;
 993
 994        if (!page)
 995                goto out_allocpage;
 996
 997        /*
 998         * the cached page has not been allocated and
 999         * an placeholder is out there, prepare it now.
1000         */
1001        if (page == PAGE_UNALLOCATED) {
1002                tocache = true;
1003                goto out_allocpage;
1004        }
1005
1006        /* process the target tagged pointer */
1007        t = tagptr_init(compressed_page_t, page);
1008        justfound = tagptr_unfold_tags(t);
1009        page = tagptr_unfold_ptr(t);
1010
1011        mapping = READ_ONCE(page->mapping);
1012
1013        /*
1014         * unmanaged (file) pages are all locked solidly,
1015         * therefore it is impossible for `mapping' to be NULL.
1016         */
1017        if (mapping && mapping != mc)
1018                /* ought to be unmanaged pages */
1019                goto out;
1020
1021        lock_page(page);
1022
1023        /* only true if page reclaim goes wrong, should never happen */
1024        DBG_BUGON(justfound && PagePrivate(page));
1025
1026        /* the page is still in manage cache */
1027        if (page->mapping == mc) {
1028                WRITE_ONCE(pcl->compressed_pages[nr], page);
1029
1030                ClearPageError(page);
1031                if (!PagePrivate(page)) {
1032                        /*
1033                         * impossible to be !PagePrivate(page) for
1034                         * the current restriction as well if
1035                         * the page is already in compressed_pages[].
1036                         */
1037                        DBG_BUGON(!justfound);
1038
1039                        justfound = 0;
1040                        set_page_private(page, (unsigned long)pcl);
1041                        SetPagePrivate(page);
1042                }
1043
1044                /* no need to submit io if it is already up-to-date */
1045                if (PageUptodate(page)) {
1046                        unlock_page(page);
1047                        page = NULL;
1048                }
1049                goto out;
1050        }
1051
1052        /*
1053         * the managed page has been truncated, it's unsafe to
1054         * reuse this one, let's allocate a new cache-managed page.
1055         */
1056        DBG_BUGON(page->mapping);
1057        DBG_BUGON(!justfound);
1058
1059        tocache = true;
1060        unlock_page(page);
1061        put_page(page);
1062out_allocpage:
1063        page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
1064        if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1065                /* non-LRU / non-movable temporary page is needed */
1066                page->mapping = Z_EROFS_MAPPING_STAGING;
1067                tocache = false;
1068        }
1069
1070        if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
1071                if (tocache) {
1072                        /* since it added to managed cache successfully */
1073                        unlock_page(page);
1074                        put_page(page);
1075                } else {
1076                        list_add(&page->lru, pagepool);
1077                }
1078                cond_resched();
1079                goto repeat;
1080        }
1081
1082        if (tocache) {
1083                set_page_private(page, (unsigned long)pcl);
1084                SetPagePrivate(page);
1085        }
1086out:    /* the only exit (for tracing and debugging) */
1087        return page;
1088}
1089
1090static struct z_erofs_decompressqueue *
1091jobqueue_init(struct super_block *sb,
1092              struct z_erofs_decompressqueue *fgq, bool *fg)
1093{
1094        struct z_erofs_decompressqueue *q;
1095
1096        if (fg && !*fg) {
1097                q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1098                if (!q) {
1099                        *fg = true;
1100                        goto fg_out;
1101                }
1102                INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1103        } else {
1104fg_out:
1105                q = fgq;
1106                init_waitqueue_head(&fgq->u.wait);
1107                atomic_set(&fgq->pending_bios, 0);
1108        }
1109        q->sb = sb;
1110        q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1111        return q;
1112}
1113
1114/* define decompression jobqueue types */
1115enum {
1116        JQ_BYPASS,
1117        JQ_SUBMIT,
1118        NR_JOBQUEUES,
1119};
1120
1121static void *jobqueueset_init(struct super_block *sb,
1122                              struct z_erofs_decompressqueue *q[],
1123                              struct z_erofs_decompressqueue *fgq, bool *fg)
1124{
1125        /*
1126         * if managed cache is enabled, bypass jobqueue is needed,
1127         * no need to read from device for all pclusters in this queue.
1128         */
1129        q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1130        q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
1131
1132        return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
1133}
1134
1135static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1136                                    z_erofs_next_pcluster_t qtail[],
1137                                    z_erofs_next_pcluster_t owned_head)
1138{
1139        z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1140        z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1141
1142        DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1143        if (owned_head == Z_EROFS_PCLUSTER_TAIL)
1144                owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1145
1146        WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
1147
1148        WRITE_ONCE(*submit_qtail, owned_head);
1149        WRITE_ONCE(*bypass_qtail, &pcl->next);
1150
1151        qtail[JQ_BYPASS] = &pcl->next;
1152}
1153
1154static void z_erofs_submit_queue(struct super_block *sb,
1155                                 struct z_erofs_decompress_frontend *f,
1156                                 struct list_head *pagepool,
1157                                 struct z_erofs_decompressqueue *fgq,
1158                                 bool *force_fg)
1159{
1160        struct erofs_sb_info *const sbi = EROFS_SB(sb);
1161        z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1162        struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1163        void *bi_private;
1164        z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
1165        /* since bio will be NULL, no need to initialize last_index */
1166        pgoff_t last_index;
1167        unsigned int nr_bios = 0;
1168        struct bio *bio = NULL;
1169
1170        bi_private = jobqueueset_init(sb, q, fgq, force_fg);
1171        qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1172        qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1173
1174        /* by default, all need io submission */
1175        q[JQ_SUBMIT]->head = owned_head;
1176
1177        do {
1178                struct z_erofs_pcluster *pcl;
1179                pgoff_t cur, end;
1180                unsigned int i = 0;
1181                bool bypass = true;
1182
1183                /* no possible 'owned_head' equals the following */
1184                DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1185                DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1186
1187                pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1188
1189                cur = pcl->obj.index;
1190                end = cur + BIT(pcl->clusterbits);
1191
1192                /* close the main owned chain at first */
1193                owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
1194                                     Z_EROFS_PCLUSTER_TAIL_CLOSED);
1195
1196                do {
1197                        struct page *page;
1198
1199                        page = pickup_page_for_submission(pcl, i++, pagepool,
1200                                                          MNGD_MAPPING(sbi),
1201                                                          GFP_NOFS);
1202                        if (!page)
1203                                continue;
1204
1205                        if (bio && cur != last_index + 1) {
1206submit_bio_retry:
1207                                submit_bio(bio);
1208                                bio = NULL;
1209                        }
1210
1211                        if (!bio) {
1212                                bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
1213
1214                                bio->bi_end_io = z_erofs_decompressqueue_endio;
1215                                bio_set_dev(bio, sb->s_bdev);
1216                                bio->bi_iter.bi_sector = (sector_t)cur <<
1217                                        LOG_SECTORS_PER_BLOCK;
1218                                bio->bi_private = bi_private;
1219                                bio->bi_opf = REQ_OP_READ;
1220                                if (f->readahead)
1221                                        bio->bi_opf |= REQ_RAHEAD;
1222                                ++nr_bios;
1223                        }
1224
1225                        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
1226                                goto submit_bio_retry;
1227
1228                        last_index = cur;
1229                        bypass = false;
1230                } while (++cur < end);
1231
1232                if (!bypass)
1233                        qtail[JQ_SUBMIT] = &pcl->next;
1234                else
1235                        move_to_bypass_jobqueue(pcl, qtail, owned_head);
1236        } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1237
1238        if (bio)
1239                submit_bio(bio);
1240
1241        /*
1242         * although background is preferred, no one is pending for submission.
1243         * don't issue workqueue for decompression but drop it directly instead.
1244         */
1245        if (!*force_fg && !nr_bios) {
1246                kvfree(q[JQ_SUBMIT]);
1247                return;
1248        }
1249        z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
1250}
1251
1252static void z_erofs_runqueue(struct super_block *sb,
1253                             struct z_erofs_decompress_frontend *f,
1254                             struct list_head *pagepool, bool force_fg)
1255{
1256        struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1257
1258        if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
1259                return;
1260        z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
1261
1262        /* handle bypass queue (no i/o pclusters) immediately */
1263        z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
1264
1265        if (!force_fg)
1266                return;
1267
1268        /* wait until all bios are completed */
1269        io_wait_event(io[JQ_SUBMIT].u.wait,
1270                      !atomic_read(&io[JQ_SUBMIT].pending_bios));
1271
1272        /* handle synchronous decompress queue in the caller context */
1273        z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
1274}
1275
1276static int z_erofs_readpage(struct file *file, struct page *page)
1277{
1278        struct inode *const inode = page->mapping->host;
1279        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1280        int err;
1281        LIST_HEAD(pagepool);
1282
1283        trace_erofs_readpage(page, false);
1284
1285        f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1286
1287        err = z_erofs_do_read_page(&f, page);
1288        (void)z_erofs_collector_end(&f.clt);
1289
1290        /* if some compressed cluster ready, need submit them anyway */
1291        z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);
1292
1293        if (err)
1294                erofs_err(inode->i_sb, "failed to read, err [%d]", err);
1295
1296        if (f.map.mpage)
1297                put_page(f.map.mpage);
1298
1299        /* clean up the remaining free pages */
1300        put_pages_list(&pagepool);
1301        return err;
1302}
1303
1304static void z_erofs_readahead(struct readahead_control *rac)
1305{
1306        struct inode *const inode = rac->mapping->host;
1307        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1308
1309        unsigned int nr_pages = readahead_count(rac);
1310        bool sync = (nr_pages <= sbi->ctx.max_sync_decompress_pages);
1311        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1312        struct page *page, *head = NULL;
1313        LIST_HEAD(pagepool);
1314
1315        trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
1316
1317        f.readahead = true;
1318        f.headoffset = readahead_pos(rac);
1319
1320        while ((page = readahead_page(rac))) {
1321                prefetchw(&page->flags);
1322
1323                /*
1324                 * A pure asynchronous readahead is indicated if
1325                 * a PG_readahead marked page is hitted at first.
1326                 * Let's also do asynchronous decompression for this case.
1327                 */
1328                sync &= !(PageReadahead(page) && !head);
1329
1330                set_page_private(page, (unsigned long)head);
1331                head = page;
1332        }
1333
1334        while (head) {
1335                struct page *page = head;
1336                int err;
1337
1338                /* traversal in reverse order */
1339                head = (void *)page_private(page);
1340
1341                err = z_erofs_do_read_page(&f, page);
1342                if (err)
1343                        erofs_err(inode->i_sb,
1344                                  "readahead error at page %lu @ nid %llu",
1345                                  page->index, EROFS_I(inode)->nid);
1346                put_page(page);
1347        }
1348
1349        (void)z_erofs_collector_end(&f.clt);
1350
1351        z_erofs_runqueue(inode->i_sb, &f, &pagepool, sync);
1352
1353        if (f.map.mpage)
1354                put_page(f.map.mpage);
1355
1356        /* clean up the remaining free pages */
1357        put_pages_list(&pagepool);
1358}
1359
1360const struct address_space_operations z_erofs_aops = {
1361        .readpage = z_erofs_readpage,
1362        .readahead = z_erofs_readahead,
1363};
1364
1365