linux/drivers/staging/lustre/lustre/llite/vvp_object.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2012, 2015, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Sun Microsystems, Inc.
  31 *
  32 * cl_object implementation for VVP layer.
  33 *
  34 *   Author: Nikita Danilov <nikita.danilov@sun.com>
  35 */
  36
  37#define DEBUG_SUBSYSTEM S_LLITE
  38
  39#include "../../include/linux/libcfs/libcfs.h"
  40
  41#include "../include/obd.h"
  42
  43#include "llite_internal.h"
  44#include "vvp_internal.h"
  45
  46/*****************************************************************************
  47 *
  48 * Object operations.
  49 *
  50 */
  51
  52int vvp_object_invariant(const struct cl_object *obj)
  53{
  54        struct inode *inode  = vvp_object_inode(obj);
  55        struct ll_inode_info *lli = ll_i2info(inode);
  56
  57        return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
  58                lli->lli_clob == obj;
  59}
  60
  61static int vvp_object_print(const struct lu_env *env, void *cookie,
  62                            lu_printer_t p, const struct lu_object *o)
  63{
  64        struct vvp_object    *obj   = lu2vvp(o);
  65        struct inode     *inode = obj->vob_inode;
  66        struct ll_inode_info *lli;
  67
  68        (*p)(env, cookie, "(%s %d %d) inode: %p ",
  69             list_empty(&obj->vob_pending_list) ? "-" : "+",
  70             atomic_read(&obj->vob_transient_pages),
  71             atomic_read(&obj->vob_mmap_cnt), inode);
  72        if (inode) {
  73                lli = ll_i2info(inode);
  74                (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
  75                     inode->i_ino, inode->i_generation, inode->i_mode,
  76                     inode->i_nlink, atomic_read(&inode->i_count),
  77                     lli->lli_clob, PFID(&lli->lli_fid));
  78        }
  79        return 0;
  80}
  81
  82static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
  83                        struct cl_attr *attr)
  84{
  85        struct inode *inode = vvp_object_inode(obj);
  86
  87        /*
  88         * lov overwrites most of these fields in
  89         * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
  90         * attributes are newer.
  91         */
  92
  93        attr->cat_size = i_size_read(inode);
  94        attr->cat_mtime = inode->i_mtime.tv_sec;
  95        attr->cat_atime = inode->i_atime.tv_sec;
  96        attr->cat_ctime = inode->i_ctime.tv_sec;
  97        attr->cat_blocks = inode->i_blocks;
  98        attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
  99        attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
 100        /* KMS is not known by this layer */
 101        return 0; /* layers below have to fill in the rest */
 102}
 103
 104static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj,
 105                           const struct cl_attr *attr, unsigned int valid)
 106{
 107        struct inode *inode = vvp_object_inode(obj);
 108
 109        if (valid & CAT_UID)
 110                inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
 111        if (valid & CAT_GID)
 112                inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
 113        if (valid & CAT_ATIME)
 114                inode->i_atime.tv_sec = attr->cat_atime;
 115        if (valid & CAT_MTIME)
 116                inode->i_mtime.tv_sec = attr->cat_mtime;
 117        if (valid & CAT_CTIME)
 118                inode->i_ctime.tv_sec = attr->cat_ctime;
 119        if (0 && valid & CAT_SIZE)
 120                i_size_write(inode, attr->cat_size);
 121        /* not currently necessary */
 122        if (0 && valid & (CAT_UID | CAT_GID | CAT_SIZE))
 123                mark_inode_dirty(inode);
 124        return 0;
 125}
 126
 127static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
 128                        const struct cl_object_conf *conf)
 129{
 130        struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
 131
 132        if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
 133                CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
 134                       PFID(&lli->lli_fid));
 135
 136                ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
 137
 138                /* Clean up page mmap for this inode.
 139                 * The reason for us to do this is that if the page has
 140                 * already been installed into memory space, the process
 141                 * can access it without interacting with lustre, so this
 142                 * page may be stale due to layout change, and the process
 143                 * will never be notified.
 144                 * This operation is expensive but mmap processes have to pay
 145                 * a price themselves.
 146                 */
 147                unmap_mapping_range(conf->coc_inode->i_mapping,
 148                                    0, OBD_OBJECT_EOF, 0);
 149
 150                return 0;
 151        }
 152
 153        if (conf->coc_opc != OBJECT_CONF_SET)
 154                return 0;
 155
 156        if (conf->u.coc_md && conf->u.coc_md->lsm) {
 157                CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
 158                       PFID(&lli->lli_fid), lli->lli_layout_gen,
 159                       conf->u.coc_md->lsm->lsm_layout_gen);
 160
 161                lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
 162                ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
 163        } else {
 164                CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
 165                       PFID(&lli->lli_fid), lli->lli_layout_gen);
 166
 167                lli->lli_has_smd = false;
 168                ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
 169        }
 170        return 0;
 171}
 172
 173static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
 174{
 175        struct inode *inode = vvp_object_inode(obj);
 176        int rc;
 177
 178        rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
 179        if (rc < 0) {
 180                CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
 181                       PFID(lu_object_fid(&obj->co_lu)), rc);
 182                return rc;
 183        }
 184
 185        truncate_inode_pages(inode->i_mapping, 0);
 186        return 0;
 187}
 188
 189static int vvp_object_glimpse(const struct lu_env *env,
 190                              const struct cl_object *obj, struct ost_lvb *lvb)
 191{
 192        struct inode *inode = vvp_object_inode(obj);
 193
 194        lvb->lvb_mtime = LTIME_S(inode->i_mtime);
 195        lvb->lvb_atime = LTIME_S(inode->i_atime);
 196        lvb->lvb_ctime = LTIME_S(inode->i_ctime);
 197        /*
 198         * LU-417: Add dirty pages block count lest i_blocks reports 0, some
 199         * "cp" or "tar" on remote node may think it's a completely sparse file
 200         * and skip it.
 201         */
 202        if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
 203                lvb->lvb_blocks = dirty_cnt(inode);
 204        return 0;
 205}
 206
 207static const struct cl_object_operations vvp_ops = {
 208        .coo_page_init = vvp_page_init,
 209        .coo_lock_init = vvp_lock_init,
 210        .coo_io_init   = vvp_io_init,
 211        .coo_attr_get  = vvp_attr_get,
 212        .coo_attr_update = vvp_attr_update,
 213        .coo_conf_set  = vvp_conf_set,
 214        .coo_prune     = vvp_prune,
 215        .coo_glimpse   = vvp_object_glimpse
 216};
 217
 218static int vvp_object_init0(const struct lu_env *env,
 219                            struct vvp_object *vob,
 220                            const struct cl_object_conf *conf)
 221{
 222        vob->vob_inode = conf->coc_inode;
 223        atomic_set(&vob->vob_transient_pages, 0);
 224        cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
 225        return 0;
 226}
 227
 228static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
 229                           const struct lu_object_conf *conf)
 230{
 231        struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
 232        struct vvp_object *vob = lu2vvp(obj);
 233        struct lu_object  *below;
 234        struct lu_device  *under;
 235        int result;
 236
 237        under = &dev->vdv_next->cd_lu_dev;
 238        below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
 239        if (below) {
 240                const struct cl_object_conf *cconf;
 241
 242                cconf = lu2cl_conf(conf);
 243                INIT_LIST_HEAD(&vob->vob_pending_list);
 244                lu_object_add(obj, below);
 245                result = vvp_object_init0(env, vob, cconf);
 246        } else {
 247                result = -ENOMEM;
 248        }
 249
 250        return result;
 251}
 252
 253static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
 254{
 255        struct vvp_object *vob = lu2vvp(obj);
 256
 257        lu_object_fini(obj);
 258        lu_object_header_fini(obj->lo_header);
 259        kmem_cache_free(vvp_object_kmem, vob);
 260}
 261
 262static const struct lu_object_operations vvp_lu_obj_ops = {
 263        .loo_object_init        = vvp_object_init,
 264        .loo_object_free        = vvp_object_free,
 265        .loo_object_print       = vvp_object_print,
 266};
 267
 268struct vvp_object *cl_inode2vvp(struct inode *inode)
 269{
 270        struct ll_inode_info *lli = ll_i2info(inode);
 271        struct cl_object     *obj = lli->lli_clob;
 272        struct lu_object     *lu;
 273
 274        lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
 275        LASSERT(lu);
 276        return lu2vvp(lu);
 277}
 278
 279struct lu_object *vvp_object_alloc(const struct lu_env *env,
 280                                   const struct lu_object_header *unused,
 281                                   struct lu_device *dev)
 282{
 283        struct vvp_object *vob;
 284        struct lu_object  *obj;
 285
 286        vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
 287        if (vob) {
 288                struct cl_object_header *hdr;
 289
 290                obj = &vob->vob_cl.co_lu;
 291                hdr = &vob->vob_header;
 292                cl_object_header_init(hdr);
 293                hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
 294
 295                lu_object_init(obj, &hdr->coh_lu, dev);
 296                lu_object_add_top(&hdr->coh_lu, obj);
 297
 298                vob->vob_cl.co_ops = &vvp_ops;
 299                obj->lo_ops = &vvp_lu_obj_ops;
 300        } else {
 301                obj = NULL;
 302        }
 303        return obj;
 304}
 305