linux/drivers/staging/lustre/lustre/llite/vvp_object.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2012, 2015, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Sun Microsystems, Inc.
  31 *
  32 * cl_object implementation for VVP layer.
  33 *
  34 *   Author: Nikita Danilov <nikita.danilov@sun.com>
  35 */
  36
  37#define DEBUG_SUBSYSTEM S_LLITE
  38
  39#include "../../include/linux/libcfs/libcfs.h"
  40
  41#include "../include/obd.h"
  42
  43#include "llite_internal.h"
  44#include "vvp_internal.h"
  45
  46/*****************************************************************************
  47 *
  48 * Object operations.
  49 *
  50 */
  51
  52int vvp_object_invariant(const struct cl_object *obj)
  53{
  54        struct inode *inode  = vvp_object_inode(obj);
  55        struct ll_inode_info *lli = ll_i2info(inode);
  56
  57        return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
  58                lli->lli_clob == obj;
  59}
  60
  61static int vvp_object_print(const struct lu_env *env, void *cookie,
  62                            lu_printer_t p, const struct lu_object *o)
  63{
  64        struct vvp_object    *obj   = lu2vvp(o);
  65        struct inode     *inode = obj->vob_inode;
  66        struct ll_inode_info *lli;
  67
  68        (*p)(env, cookie, "(%d %d) inode: %p ",
  69             atomic_read(&obj->vob_transient_pages),
  70             atomic_read(&obj->vob_mmap_cnt), inode);
  71        if (inode) {
  72                lli = ll_i2info(inode);
  73                (*p)(env, cookie, "%lu/%u %o %u %d %p " DFID,
  74                     inode->i_ino, inode->i_generation, inode->i_mode,
  75                     inode->i_nlink, atomic_read(&inode->i_count),
  76                     lli->lli_clob, PFID(&lli->lli_fid));
  77        }
  78        return 0;
  79}
  80
  81static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
  82                        struct cl_attr *attr)
  83{
  84        struct inode *inode = vvp_object_inode(obj);
  85
  86        /*
  87         * lov overwrites most of these fields in
  88         * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
  89         * attributes are newer.
  90         */
  91
  92        attr->cat_size = i_size_read(inode);
  93        attr->cat_mtime = inode->i_mtime.tv_sec;
  94        attr->cat_atime = inode->i_atime.tv_sec;
  95        attr->cat_ctime = inode->i_ctime.tv_sec;
  96        attr->cat_blocks = inode->i_blocks;
  97        attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
  98        attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
  99        /* KMS is not known by this layer */
 100        return 0; /* layers below have to fill in the rest */
 101}
 102
 103static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj,
 104                           const struct cl_attr *attr, unsigned int valid)
 105{
 106        struct inode *inode = vvp_object_inode(obj);
 107
 108        if (valid & CAT_UID)
 109                inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
 110        if (valid & CAT_GID)
 111                inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
 112        if (valid & CAT_ATIME)
 113                inode->i_atime.tv_sec = attr->cat_atime;
 114        if (valid & CAT_MTIME)
 115                inode->i_mtime.tv_sec = attr->cat_mtime;
 116        if (valid & CAT_CTIME)
 117                inode->i_ctime.tv_sec = attr->cat_ctime;
 118        if (0 && valid & CAT_SIZE)
 119                i_size_write(inode, attr->cat_size);
 120        /* not currently necessary */
 121        if (0 && valid & (CAT_UID | CAT_GID | CAT_SIZE))
 122                mark_inode_dirty(inode);
 123        return 0;
 124}
 125
 126static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
 127                        const struct cl_object_conf *conf)
 128{
 129        struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
 130
 131        if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
 132                CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
 133                       PFID(&lli->lli_fid));
 134
 135                ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
 136
 137                /* Clean up page mmap for this inode.
 138                 * The reason for us to do this is that if the page has
 139                 * already been installed into memory space, the process
 140                 * can access it without interacting with lustre, so this
 141                 * page may be stale due to layout change, and the process
 142                 * will never be notified.
 143                 * This operation is expensive but mmap processes have to pay
 144                 * a price themselves.
 145                 */
 146                unmap_mapping_range(conf->coc_inode->i_mapping,
 147                                    0, OBD_OBJECT_EOF, 0);
 148        }
 149
 150        return 0;
 151}
 152
 153static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
 154{
 155        struct inode *inode = vvp_object_inode(obj);
 156        int rc;
 157
 158        rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
 159        if (rc < 0) {
 160                CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
 161                       PFID(lu_object_fid(&obj->co_lu)), rc);
 162                return rc;
 163        }
 164
 165        truncate_inode_pages(inode->i_mapping, 0);
 166        return 0;
 167}
 168
 169static int vvp_object_glimpse(const struct lu_env *env,
 170                              const struct cl_object *obj, struct ost_lvb *lvb)
 171{
 172        struct inode *inode = vvp_object_inode(obj);
 173
 174        lvb->lvb_mtime = LTIME_S(inode->i_mtime);
 175        lvb->lvb_atime = LTIME_S(inode->i_atime);
 176        lvb->lvb_ctime = LTIME_S(inode->i_ctime);
 177        /*
 178         * LU-417: Add dirty pages block count lest i_blocks reports 0, some
 179         * "cp" or "tar" on remote node may think it's a completely sparse file
 180         * and skip it.
 181         */
 182        if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
 183                lvb->lvb_blocks = dirty_cnt(inode);
 184        return 0;
 185}
 186
 187static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj,
 188                             struct cl_req_attr *attr)
 189{
 190        u64 valid_flags = OBD_MD_FLTYPE;
 191        struct inode *inode;
 192        struct obdo *oa;
 193
 194        oa = attr->cra_oa;
 195        inode = vvp_object_inode(obj);
 196
 197        if (attr->cra_type == CRT_WRITE)
 198                valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
 199                               OBD_MD_FLUID | OBD_MD_FLGID;
 200        obdo_from_inode(oa, inode, valid_flags & attr->cra_flags);
 201        obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
 202        if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
 203                oa->o_parent_oid++;
 204        memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE);
 205}
 206
 207static const struct cl_object_operations vvp_ops = {
 208        .coo_page_init = vvp_page_init,
 209        .coo_lock_init = vvp_lock_init,
 210        .coo_io_init   = vvp_io_init,
 211        .coo_attr_get  = vvp_attr_get,
 212        .coo_attr_update = vvp_attr_update,
 213        .coo_conf_set  = vvp_conf_set,
 214        .coo_prune     = vvp_prune,
 215        .coo_glimpse            = vvp_object_glimpse,
 216        .coo_req_attr_set       = vvp_req_attr_set
 217};
 218
 219static int vvp_object_init0(const struct lu_env *env,
 220                            struct vvp_object *vob,
 221                            const struct cl_object_conf *conf)
 222{
 223        vob->vob_inode = conf->coc_inode;
 224        atomic_set(&vob->vob_transient_pages, 0);
 225        cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
 226        return 0;
 227}
 228
 229static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
 230                           const struct lu_object_conf *conf)
 231{
 232        struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
 233        struct vvp_object *vob = lu2vvp(obj);
 234        struct lu_object  *below;
 235        struct lu_device  *under;
 236        int result;
 237
 238        under = &dev->vdv_next->cd_lu_dev;
 239        below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
 240        if (below) {
 241                const struct cl_object_conf *cconf;
 242
 243                cconf = lu2cl_conf(conf);
 244                lu_object_add(obj, below);
 245                result = vvp_object_init0(env, vob, cconf);
 246        } else {
 247                result = -ENOMEM;
 248        }
 249
 250        return result;
 251}
 252
 253static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
 254{
 255        struct vvp_object *vob = lu2vvp(obj);
 256
 257        lu_object_fini(obj);
 258        lu_object_header_fini(obj->lo_header);
 259        kmem_cache_free(vvp_object_kmem, vob);
 260}
 261
 262static const struct lu_object_operations vvp_lu_obj_ops = {
 263        .loo_object_init        = vvp_object_init,
 264        .loo_object_free        = vvp_object_free,
 265        .loo_object_print       = vvp_object_print,
 266};
 267
 268struct vvp_object *cl_inode2vvp(struct inode *inode)
 269{
 270        struct ll_inode_info *lli = ll_i2info(inode);
 271        struct cl_object     *obj = lli->lli_clob;
 272        struct lu_object     *lu;
 273
 274        lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
 275        LASSERT(lu);
 276        return lu2vvp(lu);
 277}
 278
 279struct lu_object *vvp_object_alloc(const struct lu_env *env,
 280                                   const struct lu_object_header *unused,
 281                                   struct lu_device *dev)
 282{
 283        struct vvp_object *vob;
 284        struct lu_object  *obj;
 285
 286        vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
 287        if (vob) {
 288                struct cl_object_header *hdr;
 289
 290                obj = &vob->vob_cl.co_lu;
 291                hdr = &vob->vob_header;
 292                cl_object_header_init(hdr);
 293                hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
 294
 295                lu_object_init(obj, &hdr->coh_lu, dev);
 296                lu_object_add_top(&hdr->coh_lu, obj);
 297
 298                vob->vob_cl.co_ops = &vvp_ops;
 299                obj->lo_ops = &vvp_lu_obj_ops;
 300        } else {
 301                obj = NULL;
 302        }
 303        return obj;
 304}
 305