linux/fs/erofs/zdata.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2018 HUAWEI, Inc.
   4 *             https://www.huawei.com/
   5 */
   6#include "zdata.h"
   7#include "compress.h"
   8#include <linux/prefetch.h>
   9
  10#include <trace/events/erofs.h>
  11
  12/*
  13 * since pclustersize is variable for big pcluster feature, introduce slab
  14 * pools implementation for different pcluster sizes.
  15 */
  16struct z_erofs_pcluster_slab {
  17        struct kmem_cache *slab;
  18        unsigned int maxpages;
  19        char name[48];
  20};
  21
  22#define _PCLP(n) { .maxpages = n }
  23
  24static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
  25        _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
  26        _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
  27};
  28
  29static void z_erofs_destroy_pcluster_pool(void)
  30{
  31        int i;
  32
  33        for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
  34                if (!pcluster_pool[i].slab)
  35                        continue;
  36                kmem_cache_destroy(pcluster_pool[i].slab);
  37                pcluster_pool[i].slab = NULL;
  38        }
  39}
  40
  41static int z_erofs_create_pcluster_pool(void)
  42{
  43        struct z_erofs_pcluster_slab *pcs;
  44        struct z_erofs_pcluster *a;
  45        unsigned int size;
  46
  47        for (pcs = pcluster_pool;
  48             pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
  49                size = struct_size(a, compressed_pages, pcs->maxpages);
  50
  51                sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
  52                pcs->slab = kmem_cache_create(pcs->name, size, 0,
  53                                              SLAB_RECLAIM_ACCOUNT, NULL);
  54                if (pcs->slab)
  55                        continue;
  56
  57                z_erofs_destroy_pcluster_pool();
  58                return -ENOMEM;
  59        }
  60        return 0;
  61}
  62
  63static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
  64{
  65        int i;
  66
  67        for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
  68                struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
  69                struct z_erofs_pcluster *pcl;
  70
  71                if (nrpages > pcs->maxpages)
  72                        continue;
  73
  74                pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
  75                if (!pcl)
  76                        return ERR_PTR(-ENOMEM);
  77                pcl->pclusterpages = nrpages;
  78                return pcl;
  79        }
  80        return ERR_PTR(-EINVAL);
  81}
  82
  83static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
  84{
  85        int i;
  86
  87        for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
  88                struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
  89
  90                if (pcl->pclusterpages > pcs->maxpages)
  91                        continue;
  92
  93                kmem_cache_free(pcs->slab, pcl);
  94                return;
  95        }
  96        DBG_BUGON(1);
  97}
  98
  99/*
 100 * a compressed_pages[] placeholder in order to avoid
 101 * being filled with file pages for in-place decompression.
 102 */
 103#define PAGE_UNALLOCATED     ((void *)0x5F0E4B1D)
 104
 105/* how to allocate cached pages for a pcluster */
 106enum z_erofs_cache_alloctype {
 107        DONTALLOC,      /* don't allocate any cached pages */
 108        DELAYEDALLOC,   /* delayed allocation (at the time of submitting io) */
 109        /*
 110         * try to use cached I/O if page allocation succeeds or fallback
 111         * to in-place I/O instead to avoid any direct reclaim.
 112         */
 113        TRYALLOC,
 114};
 115
 116/*
 117 * tagged pointer with 1-bit tag for all compressed pages
 118 * tag 0 - the page is just found with an extra page reference
 119 */
 120typedef tagptr1_t compressed_page_t;
 121
 122#define tag_compressed_page_justfound(page) \
 123        tagptr_fold(compressed_page_t, page, 1)
 124
 125static struct workqueue_struct *z_erofs_workqueue __read_mostly;
 126
 127void z_erofs_exit_zip_subsystem(void)
 128{
 129        destroy_workqueue(z_erofs_workqueue);
 130        z_erofs_destroy_pcluster_pool();
 131}
 132
 133static inline int z_erofs_init_workqueue(void)
 134{
 135        const unsigned int onlinecpus = num_possible_cpus();
 136
 137        /*
 138         * no need to spawn too many threads, limiting threads could minimum
 139         * scheduling overhead, perhaps per-CPU threads should be better?
 140         */
 141        z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
 142                                            WQ_UNBOUND | WQ_HIGHPRI,
 143                                            onlinecpus + onlinecpus / 4);
 144        return z_erofs_workqueue ? 0 : -ENOMEM;
 145}
 146
 147int __init z_erofs_init_zip_subsystem(void)
 148{
 149        int err = z_erofs_create_pcluster_pool();
 150
 151        if (err)
 152                return err;
 153        err = z_erofs_init_workqueue();
 154        if (err)
 155                z_erofs_destroy_pcluster_pool();
 156        return err;
 157}
 158
 159enum z_erofs_collectmode {
 160        COLLECT_SECONDARY,
 161        COLLECT_PRIMARY,
 162        /*
 163         * The current collection was the tail of an exist chain, in addition
 164         * that the previous processed chained collections are all decided to
 165         * be hooked up to it.
 166         * A new chain will be created for the remaining collections which are
 167         * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
 168         * the next collection cannot reuse the whole page safely in
 169         * the following scenario:
 170         *  ________________________________________________________________
 171         * |      tail (partial) page     |       head (partial) page       |
 172         * |   (belongs to the next cl)   |   (belongs to the current cl)   |
 173         * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
 174         */
 175        COLLECT_PRIMARY_HOOKED,
 176        /*
 177         * a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it
 178         * could be dispatched into bypass queue later due to uptodated managed
 179         * pages. All related online pages cannot be reused for inplace I/O (or
 180         * pagevec) since it can be directly decoded without I/O submission.
 181         */
 182        COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
 183        /*
 184         * The current collection has been linked with the owned chain, and
 185         * could also be linked with the remaining collections, which means
 186         * if the processing page is the tail page of the collection, thus
 187         * the current collection can safely use the whole page (since
 188         * the previous collection is under control) for in-place I/O, as
 189         * illustrated below:
 190         *  ________________________________________________________________
 191         * |  tail (partial) page |          head (partial) page           |
 192         * |  (of the current cl) |      (of the previous collection)      |
 193         * |  PRIMARY_FOLLOWED or |                                        |
 194         * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
 195         *
 196         * [  (*) the above page can be used as inplace I/O.               ]
 197         */
 198        COLLECT_PRIMARY_FOLLOWED,
 199};
 200
 201struct z_erofs_collector {
 202        struct z_erofs_pagevec_ctor vector;
 203
 204        struct z_erofs_pcluster *pcl, *tailpcl;
 205        struct z_erofs_collection *cl;
 206        /* a pointer used to pick up inplace I/O pages */
 207        struct page **icpage_ptr;
 208        z_erofs_next_pcluster_t owned_head;
 209
 210        enum z_erofs_collectmode mode;
 211};
 212
 213struct z_erofs_decompress_frontend {
 214        struct inode *const inode;
 215
 216        struct z_erofs_collector clt;
 217        struct erofs_map_blocks map;
 218
 219        bool readahead;
 220        /* used for applying cache strategy on the fly */
 221        bool backmost;
 222        erofs_off_t headoffset;
 223};
 224
 225#define COLLECTOR_INIT() { \
 226        .owned_head = Z_EROFS_PCLUSTER_TAIL, \
 227        .mode = COLLECT_PRIMARY_FOLLOWED }
 228
 229#define DECOMPRESS_FRONTEND_INIT(__i) { \
 230        .inode = __i, .clt = COLLECTOR_INIT(), \
 231        .backmost = true, }
 232
 233static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
 234static DEFINE_MUTEX(z_pagemap_global_lock);
 235
 236static void preload_compressed_pages(struct z_erofs_collector *clt,
 237                                     struct address_space *mc,
 238                                     enum z_erofs_cache_alloctype type,
 239                                     struct list_head *pagepool)
 240{
 241        struct z_erofs_pcluster *pcl = clt->pcl;
 242        bool standalone = true;
 243        gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
 244                        __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 245        struct page **pages;
 246        pgoff_t index;
 247
 248        if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
 249                return;
 250
 251        pages = pcl->compressed_pages;
 252        index = pcl->obj.index;
 253        for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) {
 254                struct page *page;
 255                compressed_page_t t;
 256                struct page *newpage = NULL;
 257
 258                /* the compressed page was loaded before */
 259                if (READ_ONCE(*pages))
 260                        continue;
 261
 262                page = find_get_page(mc, index);
 263
 264                if (page) {
 265                        t = tag_compressed_page_justfound(page);
 266                } else {
 267                        /* I/O is needed, no possible to decompress directly */
 268                        standalone = false;
 269                        switch (type) {
 270                        case DELAYEDALLOC:
 271                                t = tagptr_init(compressed_page_t,
 272                                                PAGE_UNALLOCATED);
 273                                break;
 274                        case TRYALLOC:
 275                                newpage = erofs_allocpage(pagepool, gfp);
 276                                if (!newpage)
 277                                        continue;
 278                                set_page_private(newpage,
 279                                                 Z_EROFS_PREALLOCATED_PAGE);
 280                                t = tag_compressed_page_justfound(newpage);
 281                                break;
 282                        default:        /* DONTALLOC */
 283                                continue;
 284                        }
 285                }
 286
 287                if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
 288                        continue;
 289
 290                if (page) {
 291                        put_page(page);
 292                } else if (newpage) {
 293                        set_page_private(newpage, 0);
 294                        list_add(&newpage->lru, pagepool);
 295                }
 296        }
 297
 298        /*
 299         * don't do inplace I/O if all compressed pages are available in
 300         * managed cache since it can be moved to the bypass queue instead.
 301         */
 302        if (standalone)
 303                clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
 304}
 305
 306/* called by erofs_shrinker to get rid of all compressed_pages */
 307int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
 308                                       struct erofs_workgroup *grp)
 309{
 310        struct z_erofs_pcluster *const pcl =
 311                container_of(grp, struct z_erofs_pcluster, obj);
 312        int i;
 313
 314        /*
 315         * refcount of workgroup is now freezed as 1,
 316         * therefore no need to worry about available decompression users.
 317         */
 318        for (i = 0; i < pcl->pclusterpages; ++i) {
 319                struct page *page = pcl->compressed_pages[i];
 320
 321                if (!page)
 322                        continue;
 323
 324                /* block other users from reclaiming or migrating the page */
 325                if (!trylock_page(page))
 326                        return -EBUSY;
 327
 328                if (!erofs_page_is_managed(sbi, page))
 329                        continue;
 330
 331                /* barrier is implied in the following 'unlock_page' */
 332                WRITE_ONCE(pcl->compressed_pages[i], NULL);
 333                detach_page_private(page);
 334                unlock_page(page);
 335        }
 336        return 0;
 337}
 338
 339int erofs_try_to_free_cached_page(struct page *page)
 340{
 341        struct z_erofs_pcluster *const pcl = (void *)page_private(page);
 342        int ret = 0;    /* 0 - busy */
 343
 344        if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
 345                unsigned int i;
 346
 347                for (i = 0; i < pcl->pclusterpages; ++i) {
 348                        if (pcl->compressed_pages[i] == page) {
 349                                WRITE_ONCE(pcl->compressed_pages[i], NULL);
 350                                ret = 1;
 351                                break;
 352                        }
 353                }
 354                erofs_workgroup_unfreeze(&pcl->obj, 1);
 355
 356                if (ret)
 357                        detach_page_private(page);
 358        }
 359        return ret;
 360}
 361
 362/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
 363static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
 364                                   struct page *page)
 365{
 366        struct z_erofs_pcluster *const pcl = clt->pcl;
 367
 368        while (clt->icpage_ptr > pcl->compressed_pages)
 369                if (!cmpxchg(--clt->icpage_ptr, NULL, page))
 370                        return true;
 371        return false;
 372}
 373
 374/* callers must be with collection lock held */
 375static int z_erofs_attach_page(struct z_erofs_collector *clt,
 376                               struct page *page,
 377                               enum z_erofs_page_type type)
 378{
 379        int ret;
 380
 381        /* give priority for inplaceio */
 382        if (clt->mode >= COLLECT_PRIMARY &&
 383            type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
 384            z_erofs_try_inplace_io(clt, page))
 385                return 0;
 386
 387        ret = z_erofs_pagevec_enqueue(&clt->vector, page, type);
 388        clt->cl->vcnt += (unsigned int)ret;
 389
 390        return ret ? 0 : -EAGAIN;
 391}
 392
 393static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
 394{
 395        struct z_erofs_pcluster *pcl = clt->pcl;
 396        z_erofs_next_pcluster_t *owned_head = &clt->owned_head;
 397
 398        /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
 399        if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
 400                    *owned_head) == Z_EROFS_PCLUSTER_NIL) {
 401                *owned_head = &pcl->next;
 402                /* so we can attach this pcluster to our submission chain. */
 403                clt->mode = COLLECT_PRIMARY_FOLLOWED;
 404                return;
 405        }
 406
 407        /*
 408         * type 2, link to the end of an existing open chain, be careful
 409         * that its submission is controlled by the original attached chain.
 410         */
 411        if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
 412                    *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
 413                *owned_head = Z_EROFS_PCLUSTER_TAIL;
 414                clt->mode = COLLECT_PRIMARY_HOOKED;
 415                clt->tailpcl = NULL;
 416                return;
 417        }
 418        /* type 3, it belongs to a chain, but it isn't the end of the chain */
 419        clt->mode = COLLECT_PRIMARY;
 420}
 421
 422static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
 423                                     struct inode *inode,
 424                                     struct erofs_map_blocks *map)
 425{
 426        struct z_erofs_pcluster *pcl = clt->pcl;
 427        struct z_erofs_collection *cl;
 428        unsigned int length;
 429
 430        /* to avoid unexpected loop formed by corrupted images */
 431        if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
 432                DBG_BUGON(1);
 433                return -EFSCORRUPTED;
 434        }
 435
 436        cl = z_erofs_primarycollection(pcl);
 437        if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
 438                DBG_BUGON(1);
 439                return -EFSCORRUPTED;
 440        }
 441
 442        length = READ_ONCE(pcl->length);
 443        if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
 444                if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
 445                        DBG_BUGON(1);
 446                        return -EFSCORRUPTED;
 447                }
 448        } else {
 449                unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
 450
 451                if (map->m_flags & EROFS_MAP_FULL_MAPPED)
 452                        llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
 453
 454                while (llen > length &&
 455                       length != cmpxchg_relaxed(&pcl->length, length, llen)) {
 456                        cpu_relax();
 457                        length = READ_ONCE(pcl->length);
 458                }
 459        }
 460        mutex_lock(&cl->lock);
 461        /* used to check tail merging loop due to corrupted images */
 462        if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
 463                clt->tailpcl = pcl;
 464
 465        z_erofs_try_to_claim_pcluster(clt);
 466        clt->cl = cl;
 467        return 0;
 468}
 469
 470static int z_erofs_register_collection(struct z_erofs_collector *clt,
 471                                       struct inode *inode,
 472                                       struct erofs_map_blocks *map)
 473{
 474        struct z_erofs_pcluster *pcl;
 475        struct z_erofs_collection *cl;
 476        struct erofs_workgroup *grp;
 477        int err;
 478
 479        /* no available pcluster, let's allocate one */
 480        pcl = z_erofs_alloc_pcluster(map->m_plen >> PAGE_SHIFT);
 481        if (IS_ERR(pcl))
 482                return PTR_ERR(pcl);
 483
 484        atomic_set(&pcl->obj.refcount, 1);
 485        pcl->obj.index = map->m_pa >> PAGE_SHIFT;
 486
 487        pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
 488                (map->m_flags & EROFS_MAP_FULL_MAPPED ?
 489                        Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
 490
 491        if (map->m_flags & EROFS_MAP_ZIPPED)
 492                pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4;
 493        else
 494                pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
 495
 496        /* new pclusters should be claimed as type 1, primary and followed */
 497        pcl->next = clt->owned_head;
 498        clt->mode = COLLECT_PRIMARY_FOLLOWED;
 499
 500        cl = z_erofs_primarycollection(pcl);
 501        cl->pageofs = map->m_la & ~PAGE_MASK;
 502
 503        /*
 504         * lock all primary followed works before visible to others
 505         * and mutex_trylock *never* fails for a new pcluster.
 506         */
 507        mutex_init(&cl->lock);
 508        DBG_BUGON(!mutex_trylock(&cl->lock));
 509
 510        grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj);
 511        if (IS_ERR(grp)) {
 512                err = PTR_ERR(grp);
 513                goto err_out;
 514        }
 515
 516        if (grp != &pcl->obj) {
 517                clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
 518                err = -EEXIST;
 519                goto err_out;
 520        }
 521        /* used to check tail merging loop due to corrupted images */
 522        if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
 523                clt->tailpcl = pcl;
 524        clt->owned_head = &pcl->next;
 525        clt->pcl = pcl;
 526        clt->cl = cl;
 527        return 0;
 528
 529err_out:
 530        mutex_unlock(&cl->lock);
 531        z_erofs_free_pcluster(pcl);
 532        return err;
 533}
 534
 535static int z_erofs_collector_begin(struct z_erofs_collector *clt,
 536                                   struct inode *inode,
 537                                   struct erofs_map_blocks *map)
 538{
 539        struct erofs_workgroup *grp;
 540        int ret;
 541
 542        DBG_BUGON(clt->cl);
 543
 544        /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
 545        DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
 546        DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
 547
 548        if (!PAGE_ALIGNED(map->m_pa)) {
 549                DBG_BUGON(1);
 550                return -EINVAL;
 551        }
 552
 553        grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
 554        if (grp) {
 555                clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
 556        } else {
 557                ret = z_erofs_register_collection(clt, inode, map);
 558
 559                if (!ret)
 560                        goto out;
 561                if (ret != -EEXIST)
 562                        return ret;
 563        }
 564
 565        ret = z_erofs_lookup_collection(clt, inode, map);
 566        if (ret) {
 567                erofs_workgroup_put(&clt->pcl->obj);
 568                return ret;
 569        }
 570
 571out:
 572        z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
 573                                  clt->cl->pagevec, clt->cl->vcnt);
 574
 575        /* since file-backed online pages are traversed in reverse order */
 576        clt->icpage_ptr = clt->pcl->compressed_pages + clt->pcl->pclusterpages;
 577        return 0;
 578}
 579
 580/*
 581 * keep in mind that no referenced pclusters will be freed
 582 * only after a RCU grace period.
 583 */
 584static void z_erofs_rcu_callback(struct rcu_head *head)
 585{
 586        struct z_erofs_collection *const cl =
 587                container_of(head, struct z_erofs_collection, rcu);
 588
 589        z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster,
 590                                           primary_collection));
 591}
 592
 593void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
 594{
 595        struct z_erofs_pcluster *const pcl =
 596                container_of(grp, struct z_erofs_pcluster, obj);
 597        struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
 598
 599        call_rcu(&cl->rcu, z_erofs_rcu_callback);
 600}
 601
 602static void z_erofs_collection_put(struct z_erofs_collection *cl)
 603{
 604        struct z_erofs_pcluster *const pcl =
 605                container_of(cl, struct z_erofs_pcluster, primary_collection);
 606
 607        erofs_workgroup_put(&pcl->obj);
 608}
 609
 610static bool z_erofs_collector_end(struct z_erofs_collector *clt)
 611{
 612        struct z_erofs_collection *cl = clt->cl;
 613
 614        if (!cl)
 615                return false;
 616
 617        z_erofs_pagevec_ctor_exit(&clt->vector, false);
 618        mutex_unlock(&cl->lock);
 619
 620        /*
 621         * if all pending pages are added, don't hold its reference
 622         * any longer if the pcluster isn't hosted by ourselves.
 623         */
 624        if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
 625                z_erofs_collection_put(cl);
 626
 627        clt->cl = NULL;
 628        return true;
 629}
 630
 631static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
 632                                       unsigned int cachestrategy,
 633                                       erofs_off_t la)
 634{
 635        if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
 636                return false;
 637
 638        if (fe->backmost)
 639                return true;
 640
 641        return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
 642                la < fe->headoffset;
 643}
 644
 645static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
 646                                struct page *page, struct list_head *pagepool)
 647{
 648        struct inode *const inode = fe->inode;
 649        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
 650        struct erofs_map_blocks *const map = &fe->map;
 651        struct z_erofs_collector *const clt = &fe->clt;
 652        const loff_t offset = page_offset(page);
 653        bool tight = true;
 654
 655        enum z_erofs_cache_alloctype cache_strategy;
 656        enum z_erofs_page_type page_type;
 657        unsigned int cur, end, spiltted, index;
 658        int err = 0;
 659
 660        /* register locked file pages as online pages in pack */
 661        z_erofs_onlinepage_init(page);
 662
 663        spiltted = 0;
 664        end = PAGE_SIZE;
 665repeat:
 666        cur = end - 1;
 667
 668        /* lucky, within the range of the current map_blocks */
 669        if (offset + cur >= map->m_la &&
 670            offset + cur < map->m_la + map->m_llen) {
 671                /* didn't get a valid collection previously (very rare) */
 672                if (!clt->cl)
 673                        goto restart_now;
 674                goto hitted;
 675        }
 676
 677        /* go ahead the next map_blocks */
 678        erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
 679
 680        if (z_erofs_collector_end(clt))
 681                fe->backmost = false;
 682
 683        map->m_la = offset + cur;
 684        map->m_llen = 0;
 685        err = z_erofs_map_blocks_iter(inode, map, 0);
 686        if (err)
 687                goto err_out;
 688
 689restart_now:
 690        if (!(map->m_flags & EROFS_MAP_MAPPED))
 691                goto hitted;
 692
 693        err = z_erofs_collector_begin(clt, inode, map);
 694        if (err)
 695                goto err_out;
 696
 697        /* preload all compressed pages (maybe downgrade role if necessary) */
 698        if (should_alloc_managed_pages(fe, sbi->ctx.cache_strategy, map->m_la))
 699                cache_strategy = TRYALLOC;
 700        else
 701                cache_strategy = DONTALLOC;
 702
 703        preload_compressed_pages(clt, MNGD_MAPPING(sbi),
 704                                 cache_strategy, pagepool);
 705
 706hitted:
 707        /*
 708         * Ensure the current partial page belongs to this submit chain rather
 709         * than other concurrent submit chains or the noio(bypass) chain since
 710         * those chains are handled asynchronously thus the page cannot be used
 711         * for inplace I/O or pagevec (should be processed in strict order.)
 712         */
 713        tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
 714                  clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
 715
 716        cur = end - min_t(unsigned int, offset + end - map->m_la, end);
 717        if (!(map->m_flags & EROFS_MAP_MAPPED)) {
 718                zero_user_segment(page, cur, end);
 719                goto next_part;
 720        }
 721
 722        /* let's derive page type */
 723        page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
 724                (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
 725                        (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
 726                                Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
 727
 728        if (cur)
 729                tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
 730
 731retry:
 732        err = z_erofs_attach_page(clt, page, page_type);
 733        /* should allocate an additional short-lived page for pagevec */
 734        if (err == -EAGAIN) {
 735                struct page *const newpage =
 736                                alloc_page(GFP_NOFS | __GFP_NOFAIL);
 737
 738                set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
 739                err = z_erofs_attach_page(clt, newpage,
 740                                          Z_EROFS_PAGE_TYPE_EXCLUSIVE);
 741                if (!err)
 742                        goto retry;
 743        }
 744
 745        if (err)
 746                goto err_out;
 747
 748        index = page->index - (map->m_la >> PAGE_SHIFT);
 749
 750        z_erofs_onlinepage_fixup(page, index, true);
 751
 752        /* bump up the number of spiltted parts of a page */
 753        ++spiltted;
 754        /* also update nr_pages */
 755        clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
 756next_part:
 757        /* can be used for verification */
 758        map->m_llen = offset + cur - map->m_la;
 759
 760        end = cur;
 761        if (end > 0)
 762                goto repeat;
 763
 764out:
 765        z_erofs_onlinepage_endio(page);
 766
 767        erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
 768                  __func__, page, spiltted, map->m_llen);
 769        return err;
 770
 771        /* if some error occurred while processing this page */
 772err_out:
 773        SetPageError(page);
 774        goto out;
 775}
 776
 777static void z_erofs_decompressqueue_work(struct work_struct *work);
 778static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
 779                                       bool sync, int bios)
 780{
 781        struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
 782
 783        /* wake up the caller thread for sync decompression */
 784        if (sync) {
 785                unsigned long flags;
 786
 787                spin_lock_irqsave(&io->u.wait.lock, flags);
 788                if (!atomic_add_return(bios, &io->pending_bios))
 789                        wake_up_locked(&io->u.wait);
 790                spin_unlock_irqrestore(&io->u.wait.lock, flags);
 791                return;
 792        }
 793
 794        if (atomic_add_return(bios, &io->pending_bios))
 795                return;
 796        /* Use workqueue and sync decompression for atomic contexts only */
 797        if (in_atomic() || irqs_disabled()) {
 798                queue_work(z_erofs_workqueue, &io->u.work);
 799                sbi->ctx.readahead_sync_decompress = true;
 800                return;
 801        }
 802        z_erofs_decompressqueue_work(&io->u.work);
 803}
 804
 805static bool z_erofs_page_is_invalidated(struct page *page)
 806{
 807        return !page->mapping && !z_erofs_is_shortlived_page(page);
 808}
 809
 810static void z_erofs_decompressqueue_endio(struct bio *bio)
 811{
 812        tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
 813        struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
 814        blk_status_t err = bio->bi_status;
 815        struct bio_vec *bvec;
 816        struct bvec_iter_all iter_all;
 817
 818        bio_for_each_segment_all(bvec, bio, iter_all) {
 819                struct page *page = bvec->bv_page;
 820
 821                DBG_BUGON(PageUptodate(page));
 822                DBG_BUGON(z_erofs_page_is_invalidated(page));
 823
 824                if (err)
 825                        SetPageError(page);
 826
 827                if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
 828                        if (!err)
 829                                SetPageUptodate(page);
 830                        unlock_page(page);
 831                }
 832        }
 833        z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
 834        bio_put(bio);
 835}
 836
 837static int z_erofs_decompress_pcluster(struct super_block *sb,
 838                                       struct z_erofs_pcluster *pcl,
 839                                       struct list_head *pagepool)
 840{
 841        struct erofs_sb_info *const sbi = EROFS_SB(sb);
 842        struct z_erofs_pagevec_ctor ctor;
 843        unsigned int i, inputsize, outputsize, llen, nr_pages;
 844        struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
 845        struct page **pages, **compressed_pages, *page;
 846
 847        enum z_erofs_page_type page_type;
 848        bool overlapped, partial;
 849        struct z_erofs_collection *cl;
 850        int err;
 851
 852        might_sleep();
 853        cl = z_erofs_primarycollection(pcl);
 854        DBG_BUGON(!READ_ONCE(cl->nr_pages));
 855
 856        mutex_lock(&cl->lock);
 857        nr_pages = cl->nr_pages;
 858
 859        if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
 860                pages = pages_onstack;
 861        } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
 862                   mutex_trylock(&z_pagemap_global_lock)) {
 863                pages = z_pagemap_global;
 864        } else {
 865                gfp_t gfp_flags = GFP_KERNEL;
 866
 867                if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
 868                        gfp_flags |= __GFP_NOFAIL;
 869
 870                pages = kvmalloc_array(nr_pages, sizeof(struct page *),
 871                                       gfp_flags);
 872
 873                /* fallback to global pagemap for the lowmem scenario */
 874                if (!pages) {
 875                        mutex_lock(&z_pagemap_global_lock);
 876                        pages = z_pagemap_global;
 877                }
 878        }
 879
 880        for (i = 0; i < nr_pages; ++i)
 881                pages[i] = NULL;
 882
 883        err = 0;
 884        z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
 885                                  cl->pagevec, 0);
 886
 887        for (i = 0; i < cl->vcnt; ++i) {
 888                unsigned int pagenr;
 889
 890                page = z_erofs_pagevec_dequeue(&ctor, &page_type);
 891
 892                /* all pages in pagevec ought to be valid */
 893                DBG_BUGON(!page);
 894                DBG_BUGON(z_erofs_page_is_invalidated(page));
 895
 896                if (z_erofs_put_shortlivedpage(pagepool, page))
 897                        continue;
 898
 899                if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
 900                        pagenr = 0;
 901                else
 902                        pagenr = z_erofs_onlinepage_index(page);
 903
 904                DBG_BUGON(pagenr >= nr_pages);
 905
 906                /*
 907                 * currently EROFS doesn't support multiref(dedup),
 908                 * so here erroring out one multiref page.
 909                 */
 910                if (pages[pagenr]) {
 911                        DBG_BUGON(1);
 912                        SetPageError(pages[pagenr]);
 913                        z_erofs_onlinepage_endio(pages[pagenr]);
 914                        err = -EFSCORRUPTED;
 915                }
 916                pages[pagenr] = page;
 917        }
 918        z_erofs_pagevec_ctor_exit(&ctor, true);
 919
 920        overlapped = false;
 921        compressed_pages = pcl->compressed_pages;
 922
 923        for (i = 0; i < pcl->pclusterpages; ++i) {
 924                unsigned int pagenr;
 925
 926                page = compressed_pages[i];
 927
 928                /* all compressed pages ought to be valid */
 929                DBG_BUGON(!page);
 930                DBG_BUGON(z_erofs_page_is_invalidated(page));
 931
 932                if (!z_erofs_is_shortlived_page(page)) {
 933                        if (erofs_page_is_managed(sbi, page)) {
 934                                if (!PageUptodate(page))
 935                                        err = -EIO;
 936                                continue;
 937                        }
 938
 939                        /*
 940                         * only if non-head page can be selected
 941                         * for inplace decompression
 942                         */
 943                        pagenr = z_erofs_onlinepage_index(page);
 944
 945                        DBG_BUGON(pagenr >= nr_pages);
 946                        if (pages[pagenr]) {
 947                                DBG_BUGON(1);
 948                                SetPageError(pages[pagenr]);
 949                                z_erofs_onlinepage_endio(pages[pagenr]);
 950                                err = -EFSCORRUPTED;
 951                        }
 952                        pages[pagenr] = page;
 953
 954                        overlapped = true;
 955                }
 956
 957                /* PG_error needs checking for all non-managed pages */
 958                if (PageError(page)) {
 959                        DBG_BUGON(PageUptodate(page));
 960                        err = -EIO;
 961                }
 962        }
 963
 964        if (err)
 965                goto out;
 966
 967        llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
 968        if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
 969                outputsize = llen;
 970                partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
 971        } else {
 972                outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
 973                partial = true;
 974        }
 975
 976        inputsize = pcl->pclusterpages * PAGE_SIZE;
 977        err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
 978                                        .sb = sb,
 979                                        .in = compressed_pages,
 980                                        .out = pages,
 981                                        .pageofs_out = cl->pageofs,
 982                                        .inputsize = inputsize,
 983                                        .outputsize = outputsize,
 984                                        .alg = pcl->algorithmformat,
 985                                        .inplace_io = overlapped,
 986                                        .partial_decoding = partial
 987                                 }, pagepool);
 988
 989out:
 990        /* must handle all compressed pages before ending pages */
 991        for (i = 0; i < pcl->pclusterpages; ++i) {
 992                page = compressed_pages[i];
 993
 994                if (erofs_page_is_managed(sbi, page))
 995                        continue;
 996
 997                /* recycle all individual short-lived pages */
 998                (void)z_erofs_put_shortlivedpage(pagepool, page);
 999
1000                WRITE_ONCE(compressed_pages[i], NULL);
1001        }
1002
1003        for (i = 0; i < nr_pages; ++i) {
1004                page = pages[i];
1005                if (!page)
1006                        continue;
1007
1008                DBG_BUGON(z_erofs_page_is_invalidated(page));
1009
1010                /* recycle all individual short-lived pages */
1011                if (z_erofs_put_shortlivedpage(pagepool, page))
1012                        continue;
1013
1014                if (err < 0)
1015                        SetPageError(page);
1016
1017                z_erofs_onlinepage_endio(page);
1018        }
1019
1020        if (pages == z_pagemap_global)
1021                mutex_unlock(&z_pagemap_global_lock);
1022        else if (pages != pages_onstack)
1023                kvfree(pages);
1024
1025        cl->nr_pages = 0;
1026        cl->vcnt = 0;
1027
1028        /* all cl locks MUST be taken before the following line */
1029        WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1030
1031        /* all cl locks SHOULD be released right now */
1032        mutex_unlock(&cl->lock);
1033
1034        z_erofs_collection_put(cl);
1035        return err;
1036}
1037
1038static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1039                                     struct list_head *pagepool)
1040{
1041        z_erofs_next_pcluster_t owned = io->head;
1042
1043        while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
1044                struct z_erofs_pcluster *pcl;
1045
1046                /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1047                DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
1048
1049                /* no possible that 'owned' equals NULL */
1050                DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1051
1052                pcl = container_of(owned, struct z_erofs_pcluster, next);
1053                owned = READ_ONCE(pcl->next);
1054
1055                z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
1056        }
1057}
1058
1059static void z_erofs_decompressqueue_work(struct work_struct *work)
1060{
1061        struct z_erofs_decompressqueue *bgq =
1062                container_of(work, struct z_erofs_decompressqueue, u.work);
1063        LIST_HEAD(pagepool);
1064
1065        DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1066        z_erofs_decompress_queue(bgq, &pagepool);
1067
1068        put_pages_list(&pagepool);
1069        kvfree(bgq);
1070}
1071
1072static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
1073                                               unsigned int nr,
1074                                               struct list_head *pagepool,
1075                                               struct address_space *mc,
1076                                               gfp_t gfp)
1077{
1078        const pgoff_t index = pcl->obj.index;
1079        bool tocache = false;
1080
1081        struct address_space *mapping;
1082        struct page *oldpage, *page;
1083
1084        compressed_page_t t;
1085        int justfound;
1086
1087repeat:
1088        page = READ_ONCE(pcl->compressed_pages[nr]);
1089        oldpage = page;
1090
1091        if (!page)
1092                goto out_allocpage;
1093
1094        /*
1095         * the cached page has not been allocated and
1096         * an placeholder is out there, prepare it now.
1097         */
1098        if (page == PAGE_UNALLOCATED) {
1099                tocache = true;
1100                goto out_allocpage;
1101        }
1102
1103        /* process the target tagged pointer */
1104        t = tagptr_init(compressed_page_t, page);
1105        justfound = tagptr_unfold_tags(t);
1106        page = tagptr_unfold_ptr(t);
1107
1108        /*
1109         * preallocated cached pages, which is used to avoid direct reclaim
1110         * otherwise, it will go inplace I/O path instead.
1111         */
1112        if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1113                WRITE_ONCE(pcl->compressed_pages[nr], page);
1114                set_page_private(page, 0);
1115                tocache = true;
1116                goto out_tocache;
1117        }
1118        mapping = READ_ONCE(page->mapping);
1119
1120        /*
1121         * file-backed online pages in plcuster are all locked steady,
1122         * therefore it is impossible for `mapping' to be NULL.
1123         */
1124        if (mapping && mapping != mc)
1125                /* ought to be unmanaged pages */
1126                goto out;
1127
1128        /* directly return for shortlived page as well */
1129        if (z_erofs_is_shortlived_page(page))
1130                goto out;
1131
1132        lock_page(page);
1133
1134        /* only true if page reclaim goes wrong, should never happen */
1135        DBG_BUGON(justfound && PagePrivate(page));
1136
1137        /* the page is still in manage cache */
1138        if (page->mapping == mc) {
1139                WRITE_ONCE(pcl->compressed_pages[nr], page);
1140
1141                ClearPageError(page);
1142                if (!PagePrivate(page)) {
1143                        /*
1144                         * impossible to be !PagePrivate(page) for
1145                         * the current restriction as well if
1146                         * the page is already in compressed_pages[].
1147                         */
1148                        DBG_BUGON(!justfound);
1149
1150                        justfound = 0;
1151                        set_page_private(page, (unsigned long)pcl);
1152                        SetPagePrivate(page);
1153                }
1154
1155                /* no need to submit io if it is already up-to-date */
1156                if (PageUptodate(page)) {
1157                        unlock_page(page);
1158                        page = NULL;
1159                }
1160                goto out;
1161        }
1162
1163        /*
1164         * the managed page has been truncated, it's unsafe to
1165         * reuse this one, let's allocate a new cache-managed page.
1166         */
1167        DBG_BUGON(page->mapping);
1168        DBG_BUGON(!justfound);
1169
1170        tocache = true;
1171        unlock_page(page);
1172        put_page(page);
1173out_allocpage:
1174        page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
1175        if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
1176                list_add(&page->lru, pagepool);
1177                cond_resched();
1178                goto repeat;
1179        }
1180out_tocache:
1181        if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1182                /* turn into temporary page if fails (1 ref) */
1183                set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1184                goto out;
1185        }
1186        attach_page_private(page, pcl);
1187        /* drop a refcount added by allocpage (then we have 2 refs here) */
1188        put_page(page);
1189
1190out:    /* the only exit (for tracing and debugging) */
1191        return page;
1192}
1193
1194static struct z_erofs_decompressqueue *
1195jobqueue_init(struct super_block *sb,
1196              struct z_erofs_decompressqueue *fgq, bool *fg)
1197{
1198        struct z_erofs_decompressqueue *q;
1199
1200        if (fg && !*fg) {
1201                q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1202                if (!q) {
1203                        *fg = true;
1204                        goto fg_out;
1205                }
1206                INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1207        } else {
1208fg_out:
1209                q = fgq;
1210                init_waitqueue_head(&fgq->u.wait);
1211                atomic_set(&fgq->pending_bios, 0);
1212        }
1213        q->sb = sb;
1214        q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1215        return q;
1216}
1217
1218/* define decompression jobqueue types */
1219enum {
1220        JQ_BYPASS,
1221        JQ_SUBMIT,
1222        NR_JOBQUEUES,
1223};
1224
1225static void *jobqueueset_init(struct super_block *sb,
1226                              struct z_erofs_decompressqueue *q[],
1227                              struct z_erofs_decompressqueue *fgq, bool *fg)
1228{
1229        /*
1230         * if managed cache is enabled, bypass jobqueue is needed,
1231         * no need to read from device for all pclusters in this queue.
1232         */
1233        q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1234        q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
1235
1236        return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
1237}
1238
1239static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1240                                    z_erofs_next_pcluster_t qtail[],
1241                                    z_erofs_next_pcluster_t owned_head)
1242{
1243        z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1244        z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1245
1246        DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1247        if (owned_head == Z_EROFS_PCLUSTER_TAIL)
1248                owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1249
1250        WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
1251
1252        WRITE_ONCE(*submit_qtail, owned_head);
1253        WRITE_ONCE(*bypass_qtail, &pcl->next);
1254
1255        qtail[JQ_BYPASS] = &pcl->next;
1256}
1257
1258static void z_erofs_submit_queue(struct super_block *sb,
1259                                 struct z_erofs_decompress_frontend *f,
1260                                 struct list_head *pagepool,
1261                                 struct z_erofs_decompressqueue *fgq,
1262                                 bool *force_fg)
1263{
1264        struct erofs_sb_info *const sbi = EROFS_SB(sb);
1265        z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1266        struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1267        void *bi_private;
1268        z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
1269        /* since bio will be NULL, no need to initialize last_index */
1270        pgoff_t last_index;
1271        unsigned int nr_bios = 0;
1272        struct bio *bio = NULL;
1273
1274        bi_private = jobqueueset_init(sb, q, fgq, force_fg);
1275        qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1276        qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1277
1278        /* by default, all need io submission */
1279        q[JQ_SUBMIT]->head = owned_head;
1280
1281        do {
1282                struct z_erofs_pcluster *pcl;
1283                pgoff_t cur, end;
1284                unsigned int i = 0;
1285                bool bypass = true;
1286
1287                /* no possible 'owned_head' equals the following */
1288                DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1289                DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1290
1291                pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1292
1293                cur = pcl->obj.index;
1294                end = cur + pcl->pclusterpages;
1295
1296                /* close the main owned chain at first */
1297                owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
1298                                     Z_EROFS_PCLUSTER_TAIL_CLOSED);
1299
1300                do {
1301                        struct page *page;
1302
1303                        page = pickup_page_for_submission(pcl, i++, pagepool,
1304                                                          MNGD_MAPPING(sbi),
1305                                                          GFP_NOFS);
1306                        if (!page)
1307                                continue;
1308
1309                        if (bio && cur != last_index + 1) {
1310submit_bio_retry:
1311                                submit_bio(bio);
1312                                bio = NULL;
1313                        }
1314
1315                        if (!bio) {
1316                                bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
1317
1318                                bio->bi_end_io = z_erofs_decompressqueue_endio;
1319                                bio_set_dev(bio, sb->s_bdev);
1320                                bio->bi_iter.bi_sector = (sector_t)cur <<
1321                                        LOG_SECTORS_PER_BLOCK;
1322                                bio->bi_private = bi_private;
1323                                bio->bi_opf = REQ_OP_READ;
1324                                if (f->readahead)
1325                                        bio->bi_opf |= REQ_RAHEAD;
1326                                ++nr_bios;
1327                        }
1328
1329                        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
1330                                goto submit_bio_retry;
1331
1332                        last_index = cur;
1333                        bypass = false;
1334                } while (++cur < end);
1335
1336                if (!bypass)
1337                        qtail[JQ_SUBMIT] = &pcl->next;
1338                else
1339                        move_to_bypass_jobqueue(pcl, qtail, owned_head);
1340        } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1341
1342        if (bio)
1343                submit_bio(bio);
1344
1345        /*
1346         * although background is preferred, no one is pending for submission.
1347         * don't issue workqueue for decompression but drop it directly instead.
1348         */
1349        if (!*force_fg && !nr_bios) {
1350                kvfree(q[JQ_SUBMIT]);
1351                return;
1352        }
1353        z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
1354}
1355
1356static void z_erofs_runqueue(struct super_block *sb,
1357                             struct z_erofs_decompress_frontend *f,
1358                             struct list_head *pagepool, bool force_fg)
1359{
1360        struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1361
1362        if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
1363                return;
1364        z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
1365
1366        /* handle bypass queue (no i/o pclusters) immediately */
1367        z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
1368
1369        if (!force_fg)
1370                return;
1371
1372        /* wait until all bios are completed */
1373        io_wait_event(io[JQ_SUBMIT].u.wait,
1374                      !atomic_read(&io[JQ_SUBMIT].pending_bios));
1375
1376        /* handle synchronous decompress queue in the caller context */
1377        z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
1378}
1379
1380static int z_erofs_readpage(struct file *file, struct page *page)
1381{
1382        struct inode *const inode = page->mapping->host;
1383        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1384        int err;
1385        LIST_HEAD(pagepool);
1386
1387        trace_erofs_readpage(page, false);
1388
1389        f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1390
1391        err = z_erofs_do_read_page(&f, page, &pagepool);
1392        (void)z_erofs_collector_end(&f.clt);
1393
1394        /* if some compressed cluster ready, need submit them anyway */
1395        z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);
1396
1397        if (err)
1398                erofs_err(inode->i_sb, "failed to read, err [%d]", err);
1399
1400        if (f.map.mpage)
1401                put_page(f.map.mpage);
1402
1403        /* clean up the remaining free pages */
1404        put_pages_list(&pagepool);
1405        return err;
1406}
1407
1408static void z_erofs_readahead(struct readahead_control *rac)
1409{
1410        struct inode *const inode = rac->mapping->host;
1411        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1412
1413        unsigned int nr_pages = readahead_count(rac);
1414        bool sync = (sbi->ctx.readahead_sync_decompress &&
1415                        nr_pages <= sbi->ctx.max_sync_decompress_pages);
1416        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1417        struct page *page, *head = NULL;
1418        LIST_HEAD(pagepool);
1419
1420        trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
1421
1422        f.readahead = true;
1423        f.headoffset = readahead_pos(rac);
1424
1425        while ((page = readahead_page(rac))) {
1426                prefetchw(&page->flags);
1427
1428                /*
1429                 * A pure asynchronous readahead is indicated if
1430                 * a PG_readahead marked page is hitted at first.
1431                 * Let's also do asynchronous decompression for this case.
1432                 */
1433                sync &= !(PageReadahead(page) && !head);
1434
1435                set_page_private(page, (unsigned long)head);
1436                head = page;
1437        }
1438
1439        while (head) {
1440                struct page *page = head;
1441                int err;
1442
1443                /* traversal in reverse order */
1444                head = (void *)page_private(page);
1445
1446                err = z_erofs_do_read_page(&f, page, &pagepool);
1447                if (err)
1448                        erofs_err(inode->i_sb,
1449                                  "readahead error at page %lu @ nid %llu",
1450                                  page->index, EROFS_I(inode)->nid);
1451                put_page(page);
1452        }
1453
1454        (void)z_erofs_collector_end(&f.clt);
1455
1456        z_erofs_runqueue(inode->i_sb, &f, &pagepool, sync);
1457
1458        if (f.map.mpage)
1459                put_page(f.map.mpage);
1460
1461        /* clean up the remaining free pages */
1462        put_pages_list(&pagepool);
1463}
1464
1465const struct address_space_operations z_erofs_aops = {
1466        .readpage = z_erofs_readpage,
1467        .readahead = z_erofs_readahead,
1468};
1469