linux/drivers/staging/lustre/lustre/llite/llite_close.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lustre/llite/llite_close.c
  37 *
  38 * Lustre Lite routines to issue a secondary close after writeback
  39 */
  40
  41#include <linux/module.h>
  42
  43#define DEBUG_SUBSYSTEM S_LLITE
  44
  45#include <lustre_lite.h>
  46#include "llite_internal.h"
  47
  48/** records that a write is in flight */
  49void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
  50{
  51        struct ll_inode_info *lli = ll_i2info(club->cob_inode);
  52
  53        ENTRY;
  54        spin_lock(&lli->lli_lock);
  55        lli->lli_flags |= LLIF_SOM_DIRTY;
  56        if (page != NULL && list_empty(&page->cpg_pending_linkage))
  57                list_add(&page->cpg_pending_linkage,
  58                             &club->cob_pending_list);
  59        spin_unlock(&lli->lli_lock);
  60        EXIT;
  61}
  62
  63/** records that a write has completed */
  64void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
  65{
  66        struct ll_inode_info *lli = ll_i2info(club->cob_inode);
  67        int rc = 0;
  68
  69        ENTRY;
  70        spin_lock(&lli->lli_lock);
  71        if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
  72                list_del_init(&page->cpg_pending_linkage);
  73                rc = 1;
  74        }
  75        spin_unlock(&lli->lli_lock);
  76        if (rc)
  77                ll_queue_done_writing(club->cob_inode, 0);
  78        EXIT;
  79}
  80
  81/** Queues DONE_WRITING if
  82 * - done writing is allowed;
  83 * - inode has no no dirty pages; */
  84void ll_queue_done_writing(struct inode *inode, unsigned long flags)
  85{
  86        struct ll_inode_info *lli = ll_i2info(inode);
  87        struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
  88        ENTRY;
  89
  90        spin_lock(&lli->lli_lock);
  91        lli->lli_flags |= flags;
  92
  93        if ((lli->lli_flags & LLIF_DONE_WRITING) &&
  94            list_empty(&club->cob_pending_list)) {
  95                struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
  96
  97                if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
  98                        CWARN("ino %lu/%u(flags %u) som valid it just after "
  99                              "recovery\n",
 100                              inode->i_ino, inode->i_generation,
 101                              lli->lli_flags);
 102                /* DONE_WRITING is allowed and inode has no dirty page. */
 103                spin_lock(&lcq->lcq_lock);
 104
 105                LASSERT(list_empty(&lli->lli_close_list));
 106                CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
 107                       inode->i_ino, inode->i_generation);
 108                list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
 109
 110                /* Avoid a concurrent insertion into the close thread queue:
 111                 * an inode is already in the close thread, open(), write(),
 112                 * close() happen, epoch is closed as the inode is marked as
 113                 * LLIF_EPOCH_PENDING. When pages are written inode should not
 114                 * be inserted into the queue again, clear this flag to avoid
 115                 * it. */
 116                lli->lli_flags &= ~LLIF_DONE_WRITING;
 117
 118                wake_up(&lcq->lcq_waitq);
 119                spin_unlock(&lcq->lcq_lock);
 120        }
 121        spin_unlock(&lli->lli_lock);
 122        EXIT;
 123}
 124
 125/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
 126void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
 127{
 128        struct ll_inode_info *lli = ll_i2info(inode);
 129        ENTRY;
 130
 131        op_data->op_flags |= MF_SOM_CHANGE;
 132        /* Check if Size-on-MDS attributes are valid. */
 133        if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
 134                CERROR("ino %lu/%u(flags %u) som valid it just after "
 135                       "recovery\n", inode->i_ino, inode->i_generation,
 136                       lli->lli_flags);
 137
 138        if (!cl_local_size(inode)) {
 139                /* Send Size-on-MDS Attributes if valid. */
 140                op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
 141                                ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
 142        }
 143        EXIT;
 144}
 145
 146/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
 147void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
 148                      struct obd_client_handle **och, unsigned long flags)
 149{
 150        struct ll_inode_info *lli = ll_i2info(inode);
 151        struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
 152        ENTRY;
 153
 154        spin_lock(&lli->lli_lock);
 155        if (!(list_empty(&club->cob_pending_list))) {
 156                if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
 157                        LASSERT(*och != NULL);
 158                        LASSERT(lli->lli_pending_och == NULL);
 159                        /* Inode is dirty and there is no pending write done
 160                         * request yet, DONE_WRITE is to be sent later. */
 161                        lli->lli_flags |= LLIF_EPOCH_PENDING;
 162                        lli->lli_pending_och = *och;
 163                        spin_unlock(&lli->lli_lock);
 164
 165                        inode = igrab(inode);
 166                        LASSERT(inode);
 167                        GOTO(out, 0);
 168                }
 169                if (flags & LLIF_DONE_WRITING) {
 170                        /* Some pages are still dirty, it is early to send
 171                         * DONE_WRITE. Wait untill all pages will be flushed
 172                         * and try DONE_WRITE again later. */
 173                        LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
 174                        lli->lli_flags |= LLIF_DONE_WRITING;
 175                        spin_unlock(&lli->lli_lock);
 176
 177                        inode = igrab(inode);
 178                        LASSERT(inode);
 179                        GOTO(out, 0);
 180                }
 181        }
 182        CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID"\n",
 183               ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
 184        op_data->op_flags |= MF_EPOCH_CLOSE;
 185
 186        if (flags & LLIF_DONE_WRITING) {
 187                LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
 188                LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
 189                *och = lli->lli_pending_och;
 190                lli->lli_pending_och = NULL;
 191                lli->lli_flags &= ~LLIF_EPOCH_PENDING;
 192        } else {
 193                /* Pack Size-on-MDS inode attributes only if they has changed */
 194                if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
 195                        spin_unlock(&lli->lli_lock);
 196                        GOTO(out, 0);
 197                }
 198
 199                /* There is a pending DONE_WRITE -- close epoch with no
 200                 * attribute change. */
 201                if (lli->lli_flags & LLIF_EPOCH_PENDING) {
 202                        spin_unlock(&lli->lli_lock);
 203                        GOTO(out, 0);
 204                }
 205        }
 206
 207        LASSERT(list_empty(&club->cob_pending_list));
 208        lli->lli_flags &= ~LLIF_SOM_DIRTY;
 209        spin_unlock(&lli->lli_lock);
 210        ll_done_writing_attr(inode, op_data);
 211
 212        EXIT;
 213out:
 214        return;
 215}
 216
 217/**
 218 * Cliens updates SOM attributes on MDS (including llog cookies):
 219 * obd_getattr with no lock and md_setattr.
 220 */
 221int ll_som_update(struct inode *inode, struct md_op_data *op_data)
 222{
 223        struct ll_inode_info *lli = ll_i2info(inode);
 224        struct ptlrpc_request *request = NULL;
 225        __u32 old_flags;
 226        struct obdo *oa;
 227        int rc;
 228        ENTRY;
 229
 230        LASSERT(op_data != NULL);
 231        if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
 232                CERROR("ino %lu/%u(flags %u) som valid it just after "
 233                       "recovery\n", inode->i_ino, inode->i_generation,
 234                       lli->lli_flags);
 235
 236        OBDO_ALLOC(oa);
 237        if (!oa) {
 238                CERROR("can't allocate memory for Size-on-MDS update.\n");
 239                RETURN(-ENOMEM);
 240        }
 241
 242        old_flags = op_data->op_flags;
 243        op_data->op_flags = MF_SOM_CHANGE;
 244
 245        /* If inode is already in another epoch, skip getattr from OSTs. */
 246        if (lli->lli_ioepoch == op_data->op_ioepoch) {
 247                rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
 248                                      old_flags & MF_GETATTR_LOCK);
 249                if (rc) {
 250                        oa->o_valid = 0;
 251                        if (rc != -ENOENT)
 252                                CERROR("inode_getattr failed (%d): unable to "
 253                                       "send a Size-on-MDS attribute update "
 254                                       "for inode %lu/%u\n", rc, inode->i_ino,
 255                                       inode->i_generation);
 256                } else {
 257                        CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
 258                               PFID(&lli->lli_fid));
 259                }
 260                /* Install attributes into op_data. */
 261                md_from_obdo(op_data, oa, oa->o_valid);
 262        }
 263
 264        rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
 265                        NULL, 0, NULL, 0, &request, NULL);
 266        ptlrpc_req_finished(request);
 267
 268        OBDO_FREE(oa);
 269        RETURN(rc);
 270}
 271
 272/**
 273 * Closes the ioepoch and packs all the attributes into @op_data for
 274 * DONE_WRITING rpc.
 275 */
 276static void ll_prepare_done_writing(struct inode *inode,
 277                                    struct md_op_data *op_data,
 278                                    struct obd_client_handle **och)
 279{
 280        ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
 281        /* If there is no @och, we do not do D_W yet. */
 282        if (*och == NULL)
 283                return;
 284
 285        ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
 286        ll_prep_md_op_data(op_data, inode, NULL, NULL,
 287                           0, 0, LUSTRE_OPC_ANY, NULL);
 288}
 289
 290/** Send a DONE_WRITING rpc. */
 291static void ll_done_writing(struct inode *inode)
 292{
 293        struct obd_client_handle *och = NULL;
 294        struct md_op_data *op_data;
 295        int rc;
 296        ENTRY;
 297
 298        LASSERT(exp_connect_som(ll_i2mdexp(inode)));
 299
 300        OBD_ALLOC_PTR(op_data);
 301        if (op_data == NULL) {
 302                CERROR("can't allocate op_data\n");
 303                EXIT;
 304                return;
 305        }
 306
 307        ll_prepare_done_writing(inode, op_data, &och);
 308        /* If there is no @och, we do not do D_W yet. */
 309        if (och == NULL)
 310                GOTO(out, 0);
 311
 312        rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
 313        if (rc == -EAGAIN) {
 314                /* MDS has instructed us to obtain Size-on-MDS attribute from
 315                 * OSTs and send setattr to back to MDS. */
 316                rc = ll_som_update(inode, op_data);
 317        } else if (rc) {
 318                CERROR("inode %lu mdc done_writing failed: rc = %d\n",
 319                       inode->i_ino, rc);
 320        }
 321out:
 322        ll_finish_md_op_data(op_data);
 323        if (och) {
 324                md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
 325                OBD_FREE_PTR(och);
 326        }
 327        EXIT;
 328}
 329
 330static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
 331{
 332        struct ll_inode_info *lli = NULL;
 333
 334        spin_lock(&lcq->lcq_lock);
 335
 336        if (!list_empty(&lcq->lcq_head)) {
 337                lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
 338                                     lli_close_list);
 339                list_del_init(&lli->lli_close_list);
 340        } else if (atomic_read(&lcq->lcq_stop))
 341                lli = ERR_PTR(-EALREADY);
 342
 343        spin_unlock(&lcq->lcq_lock);
 344        return lli;
 345}
 346
 347static int ll_close_thread(void *arg)
 348{
 349        struct ll_close_queue *lcq = arg;
 350        ENTRY;
 351
 352        complete(&lcq->lcq_comp);
 353
 354        while (1) {
 355                struct l_wait_info lwi = { 0 };
 356                struct ll_inode_info *lli;
 357                struct inode *inode;
 358
 359                l_wait_event_exclusive(lcq->lcq_waitq,
 360                                       (lli = ll_close_next_lli(lcq)) != NULL,
 361                                       &lwi);
 362                if (IS_ERR(lli))
 363                        break;
 364
 365                inode = ll_info2i(lli);
 366                CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
 367                       inode->i_ino, inode->i_generation);
 368                ll_done_writing(inode);
 369                iput(inode);
 370        }
 371
 372        CDEBUG(D_INFO, "ll_close exiting\n");
 373        complete(&lcq->lcq_comp);
 374        RETURN(0);
 375}
 376
 377int ll_close_thread_start(struct ll_close_queue **lcq_ret)
 378{
 379        struct ll_close_queue *lcq;
 380        task_t *task;
 381
 382        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
 383                return -EINTR;
 384
 385        OBD_ALLOC(lcq, sizeof(*lcq));
 386        if (lcq == NULL)
 387                return -ENOMEM;
 388
 389        spin_lock_init(&lcq->lcq_lock);
 390        INIT_LIST_HEAD(&lcq->lcq_head);
 391        init_waitqueue_head(&lcq->lcq_waitq);
 392        init_completion(&lcq->lcq_comp);
 393
 394        task = kthread_run(ll_close_thread, lcq, "ll_close");
 395        if (IS_ERR(task)) {
 396                OBD_FREE(lcq, sizeof(*lcq));
 397                return PTR_ERR(task);
 398        }
 399
 400        wait_for_completion(&lcq->lcq_comp);
 401        *lcq_ret = lcq;
 402        return 0;
 403}
 404
 405void ll_close_thread_shutdown(struct ll_close_queue *lcq)
 406{
 407        init_completion(&lcq->lcq_comp);
 408        atomic_inc(&lcq->lcq_stop);
 409        wake_up(&lcq->lcq_waitq);
 410        wait_for_completion(&lcq->lcq_comp);
 411        OBD_FREE(lcq, sizeof(*lcq));
 412}
 413