linux/drivers/staging/lustre/lustre/llite/vvp_internal.h
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2013, 2015, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Sun Microsystems, Inc.
  31 *
  32 * Internal definitions for VVP layer.
  33 *
  34 *   Author: Nikita Danilov <nikita.danilov@sun.com>
  35 */
  36
  37#ifndef VVP_INTERNAL_H
  38#define VVP_INTERNAL_H
  39
  40#include "../include/lustre/lustre_idl.h"
  41#include "../include/cl_object.h"
  42
  43enum obd_notify_event;
  44struct inode;
  45struct lov_stripe_md;
  46struct lustre_md;
  47struct obd_capa;
  48struct obd_device;
  49struct obd_export;
  50struct page;
  51
  52/**
  53 * IO state private to IO state private to VVP layer.
  54 */
  55struct vvp_io {
  56        /** super class */
  57        struct cl_io_slice     vui_cl;
  58        struct cl_io_lock_link vui_link;
  59        /**
  60         * I/O vector information to or from which read/write is going.
  61         */
  62        struct iov_iter *vui_iter;
  63        /**
  64         * Total size for the left IO.
  65         */
  66        size_t vui_tot_count;
  67
  68        union {
  69                struct vvp_fault_io {
  70                        /**
  71                         * Inode modification time that is checked across DLM
  72                         * lock request.
  73                         */
  74                        time64_t            ft_mtime;
  75                        struct vm_area_struct *ft_vma;
  76                        /**
  77                         *  locked page returned from vvp_io
  78                         */
  79                        struct page         *ft_vmpage;
  80                        /**
  81                         * kernel fault info
  82                         */
  83                        struct vm_fault *ft_vmf;
  84                        /**
  85                         * fault API used bitflags for return code.
  86                         */
  87                        unsigned int    ft_flags;
  88                        /**
  89                         * check that flags are from filemap_fault
  90                         */
  91                        bool            ft_flags_valid;
  92                } fault;
  93                struct {
  94                        struct cl_page_list vui_queue;
  95                        unsigned long vui_written;
  96                        int vui_from;
  97                        int vui_to;
  98                } write;
  99        } u;
 100
 101        /**
 102         * Layout version when this IO is initialized
 103         */
 104        __u32                   vui_layout_gen;
 105        /**
 106         * File descriptor against which IO is done.
 107         */
 108        struct ll_file_data     *vui_fd;
 109        struct kiocb            *vui_iocb;
 110
 111        /* Readahead state. */
 112        pgoff_t vui_ra_start;
 113        pgoff_t vui_ra_count;
 114        /* Set when vui_ra_{start,count} have been initialized. */
 115        bool            vui_ra_valid;
 116};
 117
 118extern struct lu_device_type vvp_device_type;
 119
 120extern struct lu_context_key vvp_session_key;
 121extern struct lu_context_key vvp_thread_key;
 122
 123extern struct kmem_cache *vvp_lock_kmem;
 124extern struct kmem_cache *vvp_object_kmem;
 125extern struct kmem_cache *vvp_req_kmem;
 126
 127struct vvp_thread_info {
 128        struct cl_lock          vti_lock;
 129        struct cl_lock_descr    vti_descr;
 130        struct cl_io            vti_io;
 131        struct cl_attr          vti_attr;
 132};
 133
 134static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
 135{
 136        struct vvp_thread_info      *vti;
 137
 138        vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
 139        LASSERT(vti);
 140
 141        return vti;
 142}
 143
 144static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
 145{
 146        struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
 147
 148        memset(lock, 0, sizeof(*lock));
 149        return lock;
 150}
 151
 152static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
 153{
 154        struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
 155
 156        memset(attr, 0, sizeof(*attr));
 157
 158        return attr;
 159}
 160
 161static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
 162{
 163        struct cl_io *io = &vvp_env_info(env)->vti_io;
 164
 165        memset(io, 0, sizeof(*io));
 166
 167        return io;
 168}
 169
 170struct vvp_session {
 171        struct vvp_io cs_ios;
 172};
 173
 174static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
 175{
 176        struct vvp_session *ses;
 177
 178        ses = lu_context_key_get(env->le_ses, &vvp_session_key);
 179        LASSERT(ses);
 180
 181        return ses;
 182}
 183
 184static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
 185{
 186        return &vvp_env_session(env)->cs_ios;
 187}
 188
 189/**
 190 * ccc-private object state.
 191 */
 192struct vvp_object {
 193        struct cl_object_header vob_header;
 194        struct cl_object        vob_cl;
 195        struct inode           *vob_inode;
 196
 197        /**
 198         * A list of dirty pages pending IO in the cache. Used by
 199         * SOM. Protected by ll_inode_info::lli_lock.
 200         *
 201         * \see vvp_page::vpg_pending_linkage
 202         */
 203        struct list_head        vob_pending_list;
 204
 205        /**
 206         * Number of transient pages.  This is no longer protected by i_sem,
 207         * and needs to be atomic.  This is not actually used for anything,
 208         * and can probably be removed.
 209         */
 210        atomic_t                vob_transient_pages;
 211
 212        /**
 213         * Number of outstanding mmaps on this file.
 214         *
 215         * \see ll_vm_open(), ll_vm_close().
 216         */
 217        atomic_t                vob_mmap_cnt;
 218
 219        /**
 220         * various flags
 221         * vob_discard_page_warned
 222         *     if pages belonging to this object are discarded when a client
 223         * is evicted, some debug info will be printed, this flag will be set
 224         * during processing the first discarded page, then avoid flooding
 225         * debug message for lots of discarded pages.
 226         *
 227         * \see ll_dirty_page_discard_warn.
 228         */
 229        unsigned int            vob_discard_page_warned:1;
 230};
 231
 232/**
 233 * VVP-private page state.
 234 */
 235struct vvp_page {
 236        struct cl_page_slice vpg_cl;
 237        unsigned int    vpg_defer_uptodate:1,
 238                        vpg_ra_used:1,
 239                        vpg_write_queued:1;
 240        /**
 241         * Non-empty iff this page is already counted in
 242         * vvp_object::vob_pending_list. This list is only used as a flag,
 243         * that is, never iterated through, only checked for list_empty(), but
 244         * having a list is useful for debugging.
 245         */
 246        struct list_head           vpg_pending_linkage;
 247        /** VM page */
 248        struct page       *vpg_page;
 249};
 250
 251static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
 252{
 253        return container_of(slice, struct vvp_page, vpg_cl);
 254}
 255
 256static inline pgoff_t vvp_index(struct vvp_page *vvp)
 257{
 258        return vvp->vpg_cl.cpl_index;
 259}
 260
 261struct vvp_device {
 262        struct cl_device    vdv_cl;
 263        struct super_block *vdv_sb;
 264        struct cl_device   *vdv_next;
 265};
 266
 267struct vvp_lock {
 268        struct cl_lock_slice vlk_cl;
 269};
 270
 271struct vvp_req {
 272        struct cl_req_slice  vrq_cl;
 273};
 274
 275void *ccc_key_init(const struct lu_context *ctx,
 276                   struct lu_context_key *key);
 277void ccc_key_fini(const struct lu_context *ctx,
 278                  struct lu_context_key *key, void *data);
 279
 280void ccc_umount(const struct lu_env *env, struct cl_device *dev);
 281
 282static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
 283{
 284        return &vdv->vdv_cl.cd_lu_dev;
 285}
 286
 287static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
 288{
 289        return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
 290}
 291
 292static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
 293{
 294        return container_of0(d, struct vvp_device, vdv_cl);
 295}
 296
 297static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
 298{
 299        return container_of0(obj, struct vvp_object, vob_cl);
 300}
 301
 302static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
 303{
 304        return container_of0(obj, struct vvp_object, vob_cl.co_lu);
 305}
 306
 307static inline struct inode *vvp_object_inode(const struct cl_object *obj)
 308{
 309        return cl2vvp(obj)->vob_inode;
 310}
 311
 312int vvp_object_invariant(const struct cl_object *obj);
 313struct vvp_object *cl_inode2vvp(struct inode *inode);
 314
 315static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
 316{
 317        return cl2vvp_page(slice)->vpg_page;
 318}
 319
 320static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
 321{
 322        return container_of(slice, struct vvp_lock, vlk_cl);
 323}
 324
 325# define CLOBINVRNT(env, clob, expr)                                    \
 326        ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
 327
 328/**
 329 * New interfaces to get and put lov_stripe_md from lov layer. This violates
 330 * layering because lov_stripe_md is supposed to be a private data in lov.
 331 *
 332 * NB: If you find you have to use these interfaces for your new code, please
 333 * think about it again. These interfaces may be removed in the future for
 334 * better layering.
 335 */
 336struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
 337void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
 338int lov_read_and_clear_async_rc(struct cl_object *clob);
 339
 340struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
 341void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
 342
 343int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
 344                struct cl_io *io);
 345int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
 346int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
 347                  struct cl_lock *lock, const struct cl_io *io);
 348int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
 349                  struct cl_page *page, pgoff_t index);
 350int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
 351                 struct cl_req *req);
 352struct lu_object *vvp_object_alloc(const struct lu_env *env,
 353                                   const struct lu_object_header *hdr,
 354                                   struct lu_device *dev);
 355
 356int vvp_global_init(void);
 357void vvp_global_fini(void);
 358
 359extern const struct file_operations vvp_dump_pgcache_file_ops;
 360
 361#endif /* VVP_INTERNAL_H */
 362