linux/drivers/staging/lustre/lustre/llite/llite_close.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lustre/llite/llite_close.c
  37 *
  38 * Lustre Lite routines to issue a secondary close after writeback
  39 */
  40
  41#include <linux/module.h>
  42
  43#define DEBUG_SUBSYSTEM S_LLITE
  44
  45#include "../include/lustre_lite.h"
  46#include "llite_internal.h"
  47
  48/** records that a write is in flight */
  49void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
  50{
  51        struct ll_inode_info *lli = ll_i2info(club->cob_inode);
  52
  53        spin_lock(&lli->lli_lock);
  54        lli->lli_flags |= LLIF_SOM_DIRTY;
  55        if (page && list_empty(&page->cpg_pending_linkage))
  56                list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
  57        spin_unlock(&lli->lli_lock);
  58}
  59
  60/** records that a write has completed */
  61void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
  62{
  63        struct ll_inode_info *lli = ll_i2info(club->cob_inode);
  64        int rc = 0;
  65
  66        spin_lock(&lli->lli_lock);
  67        if (page && !list_empty(&page->cpg_pending_linkage)) {
  68                list_del_init(&page->cpg_pending_linkage);
  69                rc = 1;
  70        }
  71        spin_unlock(&lli->lli_lock);
  72        if (rc)
  73                ll_queue_done_writing(club->cob_inode, 0);
  74}
  75
  76/** Queues DONE_WRITING if
  77 * - done writing is allowed;
  78 * - inode has no no dirty pages;
  79 */
  80void ll_queue_done_writing(struct inode *inode, unsigned long flags)
  81{
  82        struct ll_inode_info *lli = ll_i2info(inode);
  83        struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
  84
  85        spin_lock(&lli->lli_lock);
  86        lli->lli_flags |= flags;
  87
  88        if ((lli->lli_flags & LLIF_DONE_WRITING) &&
  89            list_empty(&club->cob_pending_list)) {
  90                struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
  91
  92                if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
  93                        CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
  94                              inode->i_ino, inode->i_generation,
  95                              lli->lli_flags);
  96                /* DONE_WRITING is allowed and inode has no dirty page. */
  97                spin_lock(&lcq->lcq_lock);
  98
  99                LASSERT(list_empty(&lli->lli_close_list));
 100                CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
 101                       inode->i_ino, inode->i_generation);
 102                list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
 103
 104                /* Avoid a concurrent insertion into the close thread queue:
 105                 * an inode is already in the close thread, open(), write(),
 106                 * close() happen, epoch is closed as the inode is marked as
 107                 * LLIF_EPOCH_PENDING. When pages are written inode should not
 108                 * be inserted into the queue again, clear this flag to avoid
 109                 * it.
 110                 */
 111                lli->lli_flags &= ~LLIF_DONE_WRITING;
 112
 113                wake_up(&lcq->lcq_waitq);
 114                spin_unlock(&lcq->lcq_lock);
 115        }
 116        spin_unlock(&lli->lli_lock);
 117}
 118
 119/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
 120void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
 121{
 122        struct ll_inode_info *lli = ll_i2info(inode);
 123
 124        op_data->op_flags |= MF_SOM_CHANGE;
 125        /* Check if Size-on-MDS attributes are valid. */
 126        if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
 127                CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
 128                       inode->i_ino, inode->i_generation,
 129                       lli->lli_flags);
 130
 131        if (!cl_local_size(inode)) {
 132                /* Send Size-on-MDS Attributes if valid. */
 133                op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
 134                                ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
 135        }
 136}
 137
 138/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
 139void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
 140                      struct obd_client_handle **och, unsigned long flags)
 141{
 142        struct ll_inode_info *lli = ll_i2info(inode);
 143        struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
 144
 145        spin_lock(&lli->lli_lock);
 146        if (!(list_empty(&club->cob_pending_list))) {
 147                if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
 148                        LASSERT(*och);
 149                        LASSERT(!lli->lli_pending_och);
 150                        /* Inode is dirty and there is no pending write done
 151                         * request yet, DONE_WRITE is to be sent later.
 152                         */
 153                        lli->lli_flags |= LLIF_EPOCH_PENDING;
 154                        lli->lli_pending_och = *och;
 155                        spin_unlock(&lli->lli_lock);
 156
 157                        inode = igrab(inode);
 158                        LASSERT(inode);
 159                        goto out;
 160                }
 161                if (flags & LLIF_DONE_WRITING) {
 162                        /* Some pages are still dirty, it is early to send
 163                         * DONE_WRITE. Wait until all pages will be flushed
 164                         * and try DONE_WRITE again later.
 165                         */
 166                        LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
 167                        lli->lli_flags |= LLIF_DONE_WRITING;
 168                        spin_unlock(&lli->lli_lock);
 169
 170                        inode = igrab(inode);
 171                        LASSERT(inode);
 172                        goto out;
 173                }
 174        }
 175        CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
 176               ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
 177        op_data->op_flags |= MF_EPOCH_CLOSE;
 178
 179        if (flags & LLIF_DONE_WRITING) {
 180                LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
 181                LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
 182                *och = lli->lli_pending_och;
 183                lli->lli_pending_och = NULL;
 184                lli->lli_flags &= ~LLIF_EPOCH_PENDING;
 185        } else {
 186                /* Pack Size-on-MDS inode attributes only if they has changed */
 187                if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
 188                        spin_unlock(&lli->lli_lock);
 189                        goto out;
 190                }
 191
 192                /* There is a pending DONE_WRITE -- close epoch with no
 193                 * attribute change.
 194                 */
 195                if (lli->lli_flags & LLIF_EPOCH_PENDING) {
 196                        spin_unlock(&lli->lli_lock);
 197                        goto out;
 198                }
 199        }
 200
 201        LASSERT(list_empty(&club->cob_pending_list));
 202        lli->lli_flags &= ~LLIF_SOM_DIRTY;
 203        spin_unlock(&lli->lli_lock);
 204        ll_done_writing_attr(inode, op_data);
 205
 206out:
 207        return;
 208}
 209
 210/**
 211 * Cliens updates SOM attributes on MDS (including llog cookies):
 212 * obd_getattr with no lock and md_setattr.
 213 */
 214int ll_som_update(struct inode *inode, struct md_op_data *op_data)
 215{
 216        struct ll_inode_info *lli = ll_i2info(inode);
 217        struct ptlrpc_request *request = NULL;
 218        __u32 old_flags;
 219        struct obdo *oa;
 220        int rc;
 221
 222        LASSERT(op_data);
 223        if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
 224                CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
 225                       inode->i_ino, inode->i_generation,
 226                       lli->lli_flags);
 227
 228        oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
 229        if (!oa) {
 230                CERROR("can't allocate memory for Size-on-MDS update.\n");
 231                return -ENOMEM;
 232        }
 233
 234        old_flags = op_data->op_flags;
 235        op_data->op_flags = MF_SOM_CHANGE;
 236
 237        /* If inode is already in another epoch, skip getattr from OSTs. */
 238        if (lli->lli_ioepoch == op_data->op_ioepoch) {
 239                rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
 240                                      old_flags & MF_GETATTR_LOCK);
 241                if (rc) {
 242                        oa->o_valid = 0;
 243                        if (rc != -ENOENT)
 244                                CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
 245                                       rc, inode->i_ino,
 246                                       inode->i_generation);
 247                } else {
 248                        CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
 249                               PFID(&lli->lli_fid));
 250                }
 251                /* Install attributes into op_data. */
 252                md_from_obdo(op_data, oa, oa->o_valid);
 253        }
 254
 255        rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
 256                        NULL, 0, NULL, 0, &request, NULL);
 257        ptlrpc_req_finished(request);
 258
 259        kmem_cache_free(obdo_cachep, oa);
 260        return rc;
 261}
 262
 263/**
 264 * Closes the ioepoch and packs all the attributes into @op_data for
 265 * DONE_WRITING rpc.
 266 */
 267static void ll_prepare_done_writing(struct inode *inode,
 268                                    struct md_op_data *op_data,
 269                                    struct obd_client_handle **och)
 270{
 271        ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
 272        /* If there is no @och, we do not do D_W yet. */
 273        if (!*och)
 274                return;
 275
 276        ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
 277        ll_prep_md_op_data(op_data, inode, NULL, NULL,
 278                           0, 0, LUSTRE_OPC_ANY, NULL);
 279}
 280
 281/** Send a DONE_WRITING rpc. */
 282static void ll_done_writing(struct inode *inode)
 283{
 284        struct obd_client_handle *och = NULL;
 285        struct md_op_data *op_data;
 286        int rc;
 287
 288        LASSERT(exp_connect_som(ll_i2mdexp(inode)));
 289
 290        op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
 291        if (!op_data)
 292                return;
 293
 294        ll_prepare_done_writing(inode, op_data, &och);
 295        /* If there is no @och, we do not do D_W yet. */
 296        if (!och)
 297                goto out;
 298
 299        rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
 300        if (rc == -EAGAIN)
 301                /* MDS has instructed us to obtain Size-on-MDS attribute from
 302                 * OSTs and send setattr to back to MDS.
 303                 */
 304                rc = ll_som_update(inode, op_data);
 305        else if (rc)
 306                CERROR("inode %lu mdc done_writing failed: rc = %d\n",
 307                       inode->i_ino, rc);
 308out:
 309        ll_finish_md_op_data(op_data);
 310        if (och) {
 311                md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
 312                kfree(och);
 313        }
 314}
 315
 316static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
 317{
 318        struct ll_inode_info *lli = NULL;
 319
 320        spin_lock(&lcq->lcq_lock);
 321
 322        if (!list_empty(&lcq->lcq_head)) {
 323                lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
 324                                 lli_close_list);
 325                list_del_init(&lli->lli_close_list);
 326        } else if (atomic_read(&lcq->lcq_stop))
 327                lli = ERR_PTR(-EALREADY);
 328
 329        spin_unlock(&lcq->lcq_lock);
 330        return lli;
 331}
 332
 333static int ll_close_thread(void *arg)
 334{
 335        struct ll_close_queue *lcq = arg;
 336
 337        complete(&lcq->lcq_comp);
 338
 339        while (1) {
 340                struct l_wait_info lwi = { 0 };
 341                struct ll_inode_info *lli;
 342                struct inode *inode;
 343
 344                l_wait_event_exclusive(lcq->lcq_waitq,
 345                                       (lli = ll_close_next_lli(lcq)) != NULL,
 346                                       &lwi);
 347                if (IS_ERR(lli))
 348                        break;
 349
 350                inode = ll_info2i(lli);
 351                CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
 352                       inode->i_ino, inode->i_generation);
 353                ll_done_writing(inode);
 354                iput(inode);
 355        }
 356
 357        CDEBUG(D_INFO, "ll_close exiting\n");
 358        complete(&lcq->lcq_comp);
 359        return 0;
 360}
 361
 362int ll_close_thread_start(struct ll_close_queue **lcq_ret)
 363{
 364        struct ll_close_queue *lcq;
 365        struct task_struct *task;
 366
 367        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
 368                return -EINTR;
 369
 370        lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
 371        if (!lcq)
 372                return -ENOMEM;
 373
 374        spin_lock_init(&lcq->lcq_lock);
 375        INIT_LIST_HEAD(&lcq->lcq_head);
 376        init_waitqueue_head(&lcq->lcq_waitq);
 377        init_completion(&lcq->lcq_comp);
 378
 379        task = kthread_run(ll_close_thread, lcq, "ll_close");
 380        if (IS_ERR(task)) {
 381                kfree(lcq);
 382                return PTR_ERR(task);
 383        }
 384
 385        wait_for_completion(&lcq->lcq_comp);
 386        *lcq_ret = lcq;
 387        return 0;
 388}
 389
 390void ll_close_thread_shutdown(struct ll_close_queue *lcq)
 391{
 392        init_completion(&lcq->lcq_comp);
 393        atomic_inc(&lcq->lcq_stop);
 394        wake_up(&lcq->lcq_waitq);
 395        wait_for_completion(&lcq->lcq_comp);
 396        kfree(lcq);
 397}
 398