linux/fs/cachefiles/cache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Manage high-level VFS aspects of a cache.
   3 *
   4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/statfs.h>
  10#include <linux/namei.h>
  11#include "internal.h"
  12
  13/*
  14 * Bring a cache online.
  15 */
  16int cachefiles_add_cache(struct cachefiles_cache *cache)
  17{
  18        struct fscache_cache *cache_cookie;
  19        struct path path;
  20        struct kstatfs stats;
  21        struct dentry *graveyard, *cachedir, *root;
  22        const struct cred *saved_cred;
  23        int ret;
  24
  25        _enter("");
  26
  27        cache_cookie = fscache_acquire_cache(cache->tag);
  28        if (IS_ERR(cache_cookie))
  29                return PTR_ERR(cache_cookie);
  30
  31        /* we want to work under the module's security ID */
  32        ret = cachefiles_get_security_ID(cache);
  33        if (ret < 0)
  34                goto error_getsec;
  35
  36        cachefiles_begin_secure(cache, &saved_cred);
  37
  38        /* look up the directory at the root of the cache */
  39        ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
  40        if (ret < 0)
  41                goto error_open_root;
  42
  43        cache->mnt = path.mnt;
  44        root = path.dentry;
  45
  46        ret = -EINVAL;
  47        if (is_idmapped_mnt(path.mnt)) {
  48                pr_warn("File cache on idmapped mounts not supported");
  49                goto error_unsupported;
  50        }
  51
  52        /* Check features of the backing filesystem:
  53         * - Directories must support looking up and directory creation
  54         * - We create tmpfiles to handle invalidation
  55         * - We use xattrs to store metadata
  56         * - We need to be able to query the amount of space available
  57         * - We want to be able to sync the filesystem when stopping the cache
  58         * - We use DIO to/from pages, so the blocksize mustn't be too big.
  59         */
  60        ret = -EOPNOTSUPP;
  61        if (d_is_negative(root) ||
  62            !d_backing_inode(root)->i_op->lookup ||
  63            !d_backing_inode(root)->i_op->mkdir ||
  64            !d_backing_inode(root)->i_op->tmpfile ||
  65            !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
  66            !root->d_sb->s_op->statfs ||
  67            !root->d_sb->s_op->sync_fs ||
  68            root->d_sb->s_blocksize > PAGE_SIZE)
  69                goto error_unsupported;
  70
  71        ret = -EROFS;
  72        if (sb_rdonly(root->d_sb))
  73                goto error_unsupported;
  74
  75        /* determine the security of the on-disk cache as this governs
  76         * security ID of files we create */
  77        ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
  78        if (ret < 0)
  79                goto error_unsupported;
  80
  81        /* get the cache size and blocksize */
  82        ret = vfs_statfs(&path, &stats);
  83        if (ret < 0)
  84                goto error_unsupported;
  85
  86        ret = -ERANGE;
  87        if (stats.f_bsize <= 0)
  88                goto error_unsupported;
  89
  90        ret = -EOPNOTSUPP;
  91        if (stats.f_bsize > PAGE_SIZE)
  92                goto error_unsupported;
  93
  94        cache->bsize = stats.f_bsize;
  95        cache->bshift = ilog2(stats.f_bsize);
  96
  97        _debug("blksize %u (shift %u)",
  98               cache->bsize, cache->bshift);
  99
 100        _debug("size %llu, avail %llu",
 101               (unsigned long long) stats.f_blocks,
 102               (unsigned long long) stats.f_bavail);
 103
 104        /* set up caching limits */
 105        do_div(stats.f_files, 100);
 106        cache->fstop = stats.f_files * cache->fstop_percent;
 107        cache->fcull = stats.f_files * cache->fcull_percent;
 108        cache->frun  = stats.f_files * cache->frun_percent;
 109
 110        _debug("limits {%llu,%llu,%llu} files",
 111               (unsigned long long) cache->frun,
 112               (unsigned long long) cache->fcull,
 113               (unsigned long long) cache->fstop);
 114
 115        do_div(stats.f_blocks, 100);
 116        cache->bstop = stats.f_blocks * cache->bstop_percent;
 117        cache->bcull = stats.f_blocks * cache->bcull_percent;
 118        cache->brun  = stats.f_blocks * cache->brun_percent;
 119
 120        _debug("limits {%llu,%llu,%llu} blocks",
 121               (unsigned long long) cache->brun,
 122               (unsigned long long) cache->bcull,
 123               (unsigned long long) cache->bstop);
 124
 125        /* get the cache directory and check its type */
 126        cachedir = cachefiles_get_directory(cache, root, "cache", NULL);
 127        if (IS_ERR(cachedir)) {
 128                ret = PTR_ERR(cachedir);
 129                goto error_unsupported;
 130        }
 131
 132        cache->store = cachedir;
 133
 134        /* get the graveyard directory */
 135        graveyard = cachefiles_get_directory(cache, root, "graveyard", NULL);
 136        if (IS_ERR(graveyard)) {
 137                ret = PTR_ERR(graveyard);
 138                goto error_unsupported;
 139        }
 140
 141        cache->graveyard = graveyard;
 142        cache->cache = cache_cookie;
 143
 144        ret = fscache_add_cache(cache_cookie, &cachefiles_cache_ops, cache);
 145        if (ret < 0)
 146                goto error_add_cache;
 147
 148        /* done */
 149        set_bit(CACHEFILES_READY, &cache->flags);
 150        dput(root);
 151
 152        pr_info("File cache on %s registered\n", cache_cookie->name);
 153
 154        /* check how much space the cache has */
 155        cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
 156        cachefiles_end_secure(cache, saved_cred);
 157        _leave(" = 0 [%px]", cache->cache);
 158        return 0;
 159
 160error_add_cache:
 161        cachefiles_put_directory(cache->graveyard);
 162        cache->graveyard = NULL;
 163error_unsupported:
 164        cachefiles_put_directory(cache->store);
 165        cache->store = NULL;
 166        mntput(cache->mnt);
 167        cache->mnt = NULL;
 168        dput(root);
 169error_open_root:
 170        cachefiles_end_secure(cache, saved_cred);
 171error_getsec:
 172        fscache_relinquish_cache(cache_cookie);
 173        cache->cache = NULL;
 174        pr_err("Failed to register: %d\n", ret);
 175        return ret;
 176}
 177
 178/*
 179 * See if we have space for a number of pages and/or a number of files in the
 180 * cache
 181 */
 182int cachefiles_has_space(struct cachefiles_cache *cache,
 183                         unsigned fnr, unsigned bnr,
 184                         enum cachefiles_has_space_for reason)
 185{
 186        struct kstatfs stats;
 187        u64 b_avail, b_writing;
 188        int ret;
 189
 190        struct path path = {
 191                .mnt    = cache->mnt,
 192                .dentry = cache->mnt->mnt_root,
 193        };
 194
 195        //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
 196        //       (unsigned long long) cache->frun,
 197        //       (unsigned long long) cache->fcull,
 198        //       (unsigned long long) cache->fstop,
 199        //       (unsigned long long) cache->brun,
 200        //       (unsigned long long) cache->bcull,
 201        //       (unsigned long long) cache->bstop,
 202        //       fnr, bnr);
 203
 204        /* find out how many pages of blockdev are available */
 205        memset(&stats, 0, sizeof(stats));
 206
 207        ret = vfs_statfs(&path, &stats);
 208        if (ret < 0) {
 209                trace_cachefiles_vfs_error(NULL, d_inode(path.dentry), ret,
 210                                           cachefiles_trace_statfs_error);
 211                if (ret == -EIO)
 212                        cachefiles_io_error(cache, "statfs failed");
 213                _leave(" = %d", ret);
 214                return ret;
 215        }
 216
 217        b_avail = stats.f_bavail;
 218        b_writing = atomic_long_read(&cache->b_writing);
 219        if (b_avail > b_writing)
 220                b_avail -= b_writing;
 221        else
 222                b_avail = 0;
 223
 224        //_debug("avail %llu,%llu",
 225        //       (unsigned long long)stats.f_ffree,
 226        //       (unsigned long long)b_avail);
 227
 228        /* see if there is sufficient space */
 229        if (stats.f_ffree > fnr)
 230                stats.f_ffree -= fnr;
 231        else
 232                stats.f_ffree = 0;
 233
 234        if (b_avail > bnr)
 235                b_avail -= bnr;
 236        else
 237                b_avail = 0;
 238
 239        ret = -ENOBUFS;
 240        if (stats.f_ffree < cache->fstop ||
 241            b_avail < cache->bstop)
 242                goto stop_and_begin_cull;
 243
 244        ret = 0;
 245        if (stats.f_ffree < cache->fcull ||
 246            b_avail < cache->bcull)
 247                goto begin_cull;
 248
 249        if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
 250            stats.f_ffree >= cache->frun &&
 251            b_avail >= cache->brun &&
 252            test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
 253            ) {
 254                _debug("cease culling");
 255                cachefiles_state_changed(cache);
 256        }
 257
 258        //_leave(" = 0");
 259        return 0;
 260
 261stop_and_begin_cull:
 262        switch (reason) {
 263        case cachefiles_has_space_for_write:
 264                fscache_count_no_write_space();
 265                break;
 266        case cachefiles_has_space_for_create:
 267                fscache_count_no_create_space();
 268                break;
 269        default:
 270                break;
 271        }
 272begin_cull:
 273        if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
 274                _debug("### CULL CACHE ###");
 275                cachefiles_state_changed(cache);
 276        }
 277
 278        _leave(" = %d", ret);
 279        return ret;
 280}
 281
 282/*
 283 * Mark all the objects as being out of service and queue them all for cleanup.
 284 */
 285static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
 286{
 287        struct cachefiles_object *object;
 288        unsigned int count = 0;
 289
 290        _enter("");
 291
 292        spin_lock(&cache->object_list_lock);
 293
 294        while (!list_empty(&cache->object_list)) {
 295                object = list_first_entry(&cache->object_list,
 296                                          struct cachefiles_object, cache_link);
 297                cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
 298                list_del_init(&object->cache_link);
 299                fscache_withdraw_cookie(object->cookie);
 300                count++;
 301                if ((count & 63) == 0) {
 302                        spin_unlock(&cache->object_list_lock);
 303                        cond_resched();
 304                        spin_lock(&cache->object_list_lock);
 305                }
 306        }
 307
 308        spin_unlock(&cache->object_list_lock);
 309        _leave(" [%u objs]", count);
 310}
 311
 312/*
 313 * Withdraw volumes.
 314 */
 315static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
 316{
 317        _enter("");
 318
 319        for (;;) {
 320                struct cachefiles_volume *volume = NULL;
 321
 322                spin_lock(&cache->object_list_lock);
 323                if (!list_empty(&cache->volumes)) {
 324                        volume = list_first_entry(&cache->volumes,
 325                                                  struct cachefiles_volume, cache_link);
 326                        list_del_init(&volume->cache_link);
 327                }
 328                spin_unlock(&cache->object_list_lock);
 329                if (!volume)
 330                        break;
 331
 332                cachefiles_withdraw_volume(volume);
 333        }
 334
 335        _leave("");
 336}
 337
 338/*
 339 * Sync a cache to backing disk.
 340 */
 341static void cachefiles_sync_cache(struct cachefiles_cache *cache)
 342{
 343        const struct cred *saved_cred;
 344        int ret;
 345
 346        _enter("%s", cache->cache->name);
 347
 348        /* make sure all pages pinned by operations on behalf of the netfs are
 349         * written to disc */
 350        cachefiles_begin_secure(cache, &saved_cred);
 351        down_read(&cache->mnt->mnt_sb->s_umount);
 352        ret = sync_filesystem(cache->mnt->mnt_sb);
 353        up_read(&cache->mnt->mnt_sb->s_umount);
 354        cachefiles_end_secure(cache, saved_cred);
 355
 356        if (ret == -EIO)
 357                cachefiles_io_error(cache,
 358                                    "Attempt to sync backing fs superblock returned error %d",
 359                                    ret);
 360}
 361
 362/*
 363 * Withdraw cache objects.
 364 */
 365void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
 366{
 367        struct fscache_cache *fscache = cache->cache;
 368
 369        pr_info("File cache on %s unregistering\n", fscache->name);
 370
 371        fscache_withdraw_cache(fscache);
 372
 373        /* we now have to destroy all the active objects pertaining to this
 374         * cache - which we do by passing them off to thread pool to be
 375         * disposed of */
 376        cachefiles_withdraw_objects(cache);
 377        fscache_wait_for_objects(fscache);
 378
 379        cachefiles_withdraw_volumes(cache);
 380        cachefiles_sync_cache(cache);
 381        cache->cache = NULL;
 382        fscache_relinquish_cache(fscache);
 383}
 384