linux/fs/erofs/zdata.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2018 HUAWEI, Inc.
   4 *             https://www.huawei.com/
   5 * Created by Gao Xiang <gaoxiang25@huawei.com>
   6 */
   7#ifndef __EROFS_FS_ZDATA_H
   8#define __EROFS_FS_ZDATA_H
   9
  10#include "internal.h"
  11#include "zpvec.h"
  12
  13#define Z_EROFS_NR_INLINE_PAGEVECS      3
  14
  15/*
  16 * Structure fields follow one of the following exclusion rules.
  17 *
  18 * I: Modifiable by initialization/destruction paths and read-only
  19 *    for everyone else;
  20 *
  21 * L: Field should be protected by pageset lock;
  22 *
  23 * A: Field should be accessed / updated in atomic for parallelized code.
  24 */
  25struct z_erofs_collection {
  26        struct mutex lock;
  27
  28        /* I: page offset of start position of decompression */
  29        unsigned short pageofs;
  30
  31        /* L: maximum relative page index in pagevec[] */
  32        unsigned short nr_pages;
  33
  34        /* L: total number of pages in pagevec[] */
  35        unsigned int vcnt;
  36
  37        union {
  38                /* L: inline a certain number of pagevecs for bootstrap */
  39                erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
  40
  41                /* I: can be used to free the pcluster by RCU. */
  42                struct rcu_head rcu;
  43        };
  44};
  45
  46#define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
  47#define Z_EROFS_PCLUSTER_LENGTH_BIT     1
  48
  49/*
  50 * let's leave a type here in case of introducing
  51 * another tagged pointer later.
  52 */
  53typedef void *z_erofs_next_pcluster_t;
  54
  55struct z_erofs_pcluster {
  56        struct erofs_workgroup obj;
  57        struct z_erofs_collection primary_collection;
  58
  59        /* A: point to next chained pcluster or TAILs */
  60        z_erofs_next_pcluster_t next;
  61
  62        /* A: compressed pages (including multi-usage pages) */
  63        struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
  64
  65        /* A: lower limit of decompressed length and if full length or not */
  66        unsigned int length;
  67
  68        /* I: compression algorithm format */
  69        unsigned char algorithmformat;
  70        /* I: bit shift of physical cluster size */
  71        unsigned char clusterbits;
  72};
  73
  74#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
  75
  76/* let's avoid the valid 32-bit kernel addresses */
  77
  78/* the chained workgroup has't submitted io (still open) */
  79#define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
  80/* the chained workgroup has already submitted io */
  81#define Z_EROFS_PCLUSTER_TAIL_CLOSED    ((void *)0x5F0EDEAD)
  82
  83#define Z_EROFS_PCLUSTER_NIL            (NULL)
  84
  85#define Z_EROFS_WORKGROUP_SIZE  sizeof(struct z_erofs_pcluster)
  86
  87struct z_erofs_decompressqueue {
  88        struct super_block *sb;
  89        atomic_t pending_bios;
  90        z_erofs_next_pcluster_t head;
  91
  92        union {
  93                wait_queue_head_t wait;
  94                struct work_struct work;
  95        } u;
  96};
  97
  98#define MNGD_MAPPING(sbi)       ((sbi)->managed_cache->i_mapping)
  99static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
 100                                         struct page *page)
 101{
 102        return page->mapping == MNGD_MAPPING(sbi);
 103}
 104
 105#define Z_EROFS_ONLINEPAGE_COUNT_BITS   2
 106#define Z_EROFS_ONLINEPAGE_COUNT_MASK   ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
 107#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)
 108
 109/*
 110 * waiters (aka. ongoing_packs): # to unlock the page
 111 * sub-index: 0 - for partial page, >= 1 full page sub-index
 112 */
 113typedef atomic_t z_erofs_onlinepage_t;
 114
 115/* type punning */
 116union z_erofs_onlinepage_converter {
 117        z_erofs_onlinepage_t *o;
 118        unsigned long *v;
 119};
 120
 121static inline unsigned int z_erofs_onlinepage_index(struct page *page)
 122{
 123        union z_erofs_onlinepage_converter u;
 124
 125        DBG_BUGON(!PagePrivate(page));
 126        u.v = &page_private(page);
 127
 128        return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
 129}
 130
 131static inline void z_erofs_onlinepage_init(struct page *page)
 132{
 133        union {
 134                z_erofs_onlinepage_t o;
 135                unsigned long v;
 136        /* keep from being unlocked in advance */
 137        } u = { .o = ATOMIC_INIT(1) };
 138
 139        set_page_private(page, u.v);
 140        smp_wmb();
 141        SetPagePrivate(page);
 142}
 143
 144static inline void z_erofs_onlinepage_fixup(struct page *page,
 145        uintptr_t index, bool down)
 146{
 147        union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
 148        int orig, orig_index, val;
 149
 150repeat:
 151        orig = atomic_read(u.o);
 152        orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
 153        if (orig_index) {
 154                if (!index)
 155                        return;
 156
 157                DBG_BUGON(orig_index != index);
 158        }
 159
 160        val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
 161                ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
 162        if (atomic_cmpxchg(u.o, orig, val) != orig)
 163                goto repeat;
 164}
 165
 166static inline void z_erofs_onlinepage_endio(struct page *page)
 167{
 168        union z_erofs_onlinepage_converter u;
 169        unsigned int v;
 170
 171        DBG_BUGON(!PagePrivate(page));
 172        u.v = &page_private(page);
 173
 174        v = atomic_dec_return(u.o);
 175        if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
 176                ClearPagePrivate(page);
 177                if (!PageError(page))
 178                        SetPageUptodate(page);
 179                unlock_page(page);
 180        }
 181        erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
 182}
 183
 184#define Z_EROFS_VMAP_ONSTACK_PAGES      \
 185        min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
 186#define Z_EROFS_VMAP_GLOBAL_PAGES       2048
 187
 188#endif
 189
 190