linux/drivers/block/drbd/drbd_req.h
<<
>>
Prefs
   1/*
   2   drbd_req.h
   3
   4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
   5
   6   Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
   7   Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
   8   Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   9
  10   DRBD is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation; either version 2, or (at your option)
  13   any later version.
  14
  15   DRBD is distributed in the hope that it will be useful,
  16   but WITHOUT ANY WARRANTY; without even the implied warranty of
  17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18   GNU General Public License for more details.
  19
  20   You should have received a copy of the GNU General Public License
  21   along with drbd; see the file COPYING.  If not, write to
  22   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23 */
  24
  25#ifndef _DRBD_REQ_H
  26#define _DRBD_REQ_H
  27
  28#include <linux/module.h>
  29
  30#include <linux/slab.h>
  31#include <linux/drbd.h>
  32#include "drbd_int.h"
  33
  34/* The request callbacks will be called in irq context by the IDE drivers,
  35   and in Softirqs/Tasklets/BH context by the SCSI drivers,
  36   and by the receiver and worker in kernel-thread context.
  37   Try to get the locking right :) */
  38
  39/*
  40 * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
  41 * associated with IO requests originating from the block layer above us.
  42 *
  43 * There are quite a few things that may happen to a drbd request
  44 * during its lifetime.
  45 *
  46 *  It will be created.
  47 *  It will be marked with the intention to be
  48 *    submitted to local disk and/or
  49 *    send via the network.
  50 *
  51 *  It has to be placed on the transfer log and other housekeeping lists,
  52 *  In case we have a network connection.
  53 *
  54 *  It may be identified as a concurrent (write) request
  55 *    and be handled accordingly.
  56 *
  57 *  It may me handed over to the local disk subsystem.
  58 *  It may be completed by the local disk subsystem,
  59 *    either successfully or with io-error.
  60 *  In case it is a READ request, and it failed locally,
  61 *    it may be retried remotely.
  62 *
  63 *  It may be queued for sending.
  64 *  It may be handed over to the network stack,
  65 *    which may fail.
  66 *  It may be acknowledged by the "peer" according to the wire_protocol in use.
  67 *    this may be a negative ack.
  68 *  It may receive a faked ack when the network connection is lost and the
  69 *  transfer log is cleaned up.
  70 *  Sending may be canceled due to network connection loss.
  71 *  When it finally has outlived its time,
  72 *    corresponding dirty bits in the resync-bitmap may be cleared or set,
  73 *    it will be destroyed,
  74 *    and completion will be signalled to the originator,
  75 *      with or without "success".
  76 */
  77
  78enum drbd_req_event {
  79        CREATED,
  80        TO_BE_SENT,
  81        TO_BE_SUBMITTED,
  82
  83        /* XXX yes, now I am inconsistent...
  84         * these are not "events" but "actions"
  85         * oh, well... */
  86        QUEUE_FOR_NET_WRITE,
  87        QUEUE_FOR_NET_READ,
  88        QUEUE_FOR_SEND_OOS,
  89
  90        /* An empty flush is queued as P_BARRIER,
  91         * which will cause it to complete "successfully",
  92         * even if the local disk flush failed.
  93         *
  94         * Just like "real" requests, empty flushes (blkdev_issue_flush()) will
  95         * only see an error if neither local nor remote data is reachable. */
  96        QUEUE_AS_DRBD_BARRIER,
  97
  98        SEND_CANCELED,
  99        SEND_FAILED,
 100        HANDED_OVER_TO_NETWORK,
 101        OOS_HANDED_TO_NETWORK,
 102        CONNECTION_LOST_WHILE_PENDING,
 103        READ_RETRY_REMOTE_CANCELED,
 104        RECV_ACKED_BY_PEER,
 105        WRITE_ACKED_BY_PEER,
 106        WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
 107        CONFLICT_RESOLVED,
 108        POSTPONE_WRITE,
 109        NEG_ACKED,
 110        BARRIER_ACKED, /* in protocol A and B */
 111        DATA_RECEIVED, /* (remote read) */
 112
 113        COMPLETED_OK,
 114        READ_COMPLETED_WITH_ERROR,
 115        READ_AHEAD_COMPLETED_WITH_ERROR,
 116        WRITE_COMPLETED_WITH_ERROR,
 117        DISCARD_COMPLETED_NOTSUPP,
 118        DISCARD_COMPLETED_WITH_ERROR,
 119
 120        ABORT_DISK_IO,
 121        RESEND,
 122        FAIL_FROZEN_DISK_IO,
 123        RESTART_FROZEN_DISK_IO,
 124        NOTHING,
 125};
 126
 127/* encoding of request states for now.  we don't actually need that many bits.
 128 * we don't need to do atomic bit operations either, since most of the time we
 129 * need to look at the connection state and/or manipulate some lists at the
 130 * same time, so we should hold the request lock anyways.
 131 */
 132enum drbd_req_state_bits {
 133        /* 3210
 134         * 0000: no local possible
 135         * 0001: to be submitted
 136         *    UNUSED, we could map: 011: submitted, completion still pending
 137         * 0110: completed ok
 138         * 0010: completed with error
 139         * 1001: Aborted (before completion)
 140         * 1x10: Aborted and completed -> free
 141         */
 142        __RQ_LOCAL_PENDING,
 143        __RQ_LOCAL_COMPLETED,
 144        __RQ_LOCAL_OK,
 145        __RQ_LOCAL_ABORTED,
 146
 147        /* 87654
 148         * 00000: no network possible
 149         * 00001: to be send
 150         * 00011: to be send, on worker queue
 151         * 00101: sent, expecting recv_ack (B) or write_ack (C)
 152         * 11101: sent,
 153         *        recv_ack (B) or implicit "ack" (A),
 154         *        still waiting for the barrier ack.
 155         *        master_bio may already be completed and invalidated.
 156         * 11100: write acked (C),
 157         *        data received (for remote read, any protocol)
 158         *        or finally the barrier ack has arrived (B,A)...
 159         *        request can be freed
 160         * 01100: neg-acked (write, protocol C)
 161         *        or neg-d-acked (read, any protocol)
 162         *        or killed from the transfer log
 163         *        during cleanup after connection loss
 164         *        request can be freed
 165         * 01000: canceled or send failed...
 166         *        request can be freed
 167         */
 168
 169        /* if "SENT" is not set, yet, this can still fail or be canceled.
 170         * if "SENT" is set already, we still wait for an Ack packet.
 171         * when cleared, the master_bio may be completed.
 172         * in (B,A) the request object may still linger on the transaction log
 173         * until the corresponding barrier ack comes in */
 174        __RQ_NET_PENDING,
 175
 176        /* If it is QUEUED, and it is a WRITE, it is also registered in the
 177         * transfer log. Currently we need this flag to avoid conflicts between
 178         * worker canceling the request and tl_clear_barrier killing it from
 179         * transfer log.  We should restructure the code so this conflict does
 180         * no longer occur. */
 181        __RQ_NET_QUEUED,
 182
 183        /* well, actually only "handed over to the network stack".
 184         *
 185         * TODO can potentially be dropped because of the similar meaning
 186         * of RQ_NET_SENT and ~RQ_NET_QUEUED.
 187         * however it is not exactly the same. before we drop it
 188         * we must ensure that we can tell a request with network part
 189         * from a request without, regardless of what happens to it. */
 190        __RQ_NET_SENT,
 191
 192        /* when set, the request may be freed (if RQ_NET_QUEUED is clear).
 193         * basically this means the corresponding P_BARRIER_ACK was received */
 194        __RQ_NET_DONE,
 195
 196        /* whether or not we know (C) or pretend (B,A) that the write
 197         * was successfully written on the peer.
 198         */
 199        __RQ_NET_OK,
 200
 201        /* peer called drbd_set_in_sync() for this write */
 202        __RQ_NET_SIS,
 203
 204        /* keep this last, its for the RQ_NET_MASK */
 205        __RQ_NET_MAX,
 206
 207        /* Set when this is a write, clear for a read */
 208        __RQ_WRITE,
 209        __RQ_WSAME,
 210        __RQ_UNMAP,
 211
 212        /* Should call drbd_al_complete_io() for this request... */
 213        __RQ_IN_ACT_LOG,
 214
 215        /* This was the most recent request during some blk_finish_plug()
 216         * or its implicit from-schedule equivalent.
 217         * We may use it as hint to send a P_UNPLUG_REMOTE */
 218        __RQ_UNPLUG,
 219
 220        /* The peer has sent a retry ACK */
 221        __RQ_POSTPONED,
 222
 223        /* would have been completed,
 224         * but was not, because of drbd_suspended() */
 225        __RQ_COMPLETION_SUSP,
 226
 227        /* We expect a receive ACK (wire proto B) */
 228        __RQ_EXP_RECEIVE_ACK,
 229
 230        /* We expect a write ACK (wite proto C) */
 231        __RQ_EXP_WRITE_ACK,
 232
 233        /* waiting for a barrier ack, did an extra kref_get */
 234        __RQ_EXP_BARR_ACK,
 235};
 236
 237#define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
 238#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
 239#define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
 240#define RQ_LOCAL_ABORTED   (1UL << __RQ_LOCAL_ABORTED)
 241
 242#define RQ_LOCAL_MASK      ((RQ_LOCAL_ABORTED << 1)-1)
 243
 244#define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
 245#define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
 246#define RQ_NET_SENT        (1UL << __RQ_NET_SENT)
 247#define RQ_NET_DONE        (1UL << __RQ_NET_DONE)
 248#define RQ_NET_OK          (1UL << __RQ_NET_OK)
 249#define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
 250
 251#define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
 252
 253#define RQ_WRITE           (1UL << __RQ_WRITE)
 254#define RQ_WSAME           (1UL << __RQ_WSAME)
 255#define RQ_UNMAP           (1UL << __RQ_UNMAP)
 256#define RQ_IN_ACT_LOG      (1UL << __RQ_IN_ACT_LOG)
 257#define RQ_UNPLUG          (1UL << __RQ_UNPLUG)
 258#define RQ_POSTPONED       (1UL << __RQ_POSTPONED)
 259#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
 260#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
 261#define RQ_EXP_WRITE_ACK   (1UL << __RQ_EXP_WRITE_ACK)
 262#define RQ_EXP_BARR_ACK    (1UL << __RQ_EXP_BARR_ACK)
 263
 264/* For waking up the frozen transfer log mod_req() has to return if the request
 265   should be counted in the epoch object*/
 266#define MR_WRITE       1
 267#define MR_READ        2
 268
 269static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
 270{
 271        struct bio *bio;
 272        bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
 273
 274        req->private_bio = bio;
 275
 276        bio->bi_private  = req;
 277        bio->bi_end_io   = drbd_request_endio;
 278        bio->bi_next     = NULL;
 279}
 280
 281/* Short lived temporary struct on the stack.
 282 * We could squirrel the error to be returned into
 283 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
 284struct bio_and_error {
 285        struct bio *bio;
 286        int error;
 287};
 288
 289extern void start_new_tl_epoch(struct drbd_connection *connection);
 290extern void drbd_req_destroy(struct kref *kref);
 291extern void _req_may_be_done(struct drbd_request *req,
 292                struct bio_and_error *m);
 293extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 294                struct bio_and_error *m);
 295extern void complete_master_bio(struct drbd_device *device,
 296                struct bio_and_error *m);
 297extern void request_timer_fn(struct timer_list *t);
 298extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
 299extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
 300extern void tl_abort_disk_io(struct drbd_device *device);
 301
 302/* this is in drbd_main.c */
 303extern void drbd_restart_request(struct drbd_request *req);
 304
 305/* use this if you don't want to deal with calling complete_master_bio()
 306 * outside the spinlock, e.g. when walking some list on cleanup. */
 307static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
 308{
 309        struct drbd_device *device = req->device;
 310        struct bio_and_error m;
 311        int rv;
 312
 313        /* __req_mod possibly frees req, do not touch req after that! */
 314        rv = __req_mod(req, what, &m);
 315        if (m.bio)
 316                complete_master_bio(device, &m);
 317
 318        return rv;
 319}
 320
 321/* completion of master bio is outside of our spinlock.
 322 * We still may or may not be inside some irqs disabled section
 323 * of the lower level driver completion callback, so we need to
 324 * spin_lock_irqsave here. */
 325static inline int req_mod(struct drbd_request *req,
 326                enum drbd_req_event what)
 327{
 328        unsigned long flags;
 329        struct drbd_device *device = req->device;
 330        struct bio_and_error m;
 331        int rv;
 332
 333        spin_lock_irqsave(&device->resource->req_lock, flags);
 334        rv = __req_mod(req, what, &m);
 335        spin_unlock_irqrestore(&device->resource->req_lock, flags);
 336
 337        if (m.bio)
 338                complete_master_bio(device, &m);
 339
 340        return rv;
 341}
 342
 343extern bool drbd_should_do_remote(union drbd_dev_state);
 344
 345#endif
 346