linux/drivers/md/dm-bio-prison.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011-2012 Red Hat, Inc.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#ifndef DM_BIO_PRISON_H
   8#define DM_BIO_PRISON_H
   9
  10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
  11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
  12
  13#include <linux/bio.h>
  14#include <linux/rbtree.h>
  15
  16/*----------------------------------------------------------------*/
  17
  18/*
  19 * Sometimes we can't deal with a bio straight away.  We put them in prison
  20 * where they can't cause any mischief.  Bios are put in a cell identified
  21 * by a key, multiple bios can be in the same cell.  When the cell is
  22 * subsequently unlocked the bios become available.
  23 */
  24struct dm_bio_prison;
  25
  26/*
  27 * Keys define a range of blocks within either a virtual or physical
  28 * device.
  29 */
  30struct dm_cell_key {
  31        int virtual;
  32        dm_thin_id dev;
  33        dm_block_t block_begin, block_end;
  34};
  35
  36/*
  37 * Treat this as opaque, only in header so callers can manage allocation
  38 * themselves.
  39 */
  40struct dm_bio_prison_cell {
  41        struct list_head user_list;     /* for client use */
  42        struct rb_node node;
  43
  44        struct dm_cell_key key;
  45        struct bio *holder;
  46        struct bio_list bios;
  47};
  48
  49struct dm_bio_prison *dm_bio_prison_create(void);
  50void dm_bio_prison_destroy(struct dm_bio_prison *prison);
  51
  52/*
  53 * These two functions just wrap a mempool.  This is a transitory step:
  54 * Eventually all bio prison clients should manage their own cell memory.
  55 *
  56 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
  57 * in interrupt context or passed GFP_NOWAIT.
  58 */
  59struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
  60                                                    gfp_t gfp);
  61void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
  62                             struct dm_bio_prison_cell *cell);
  63
  64/*
  65 * Creates, or retrieves a cell that overlaps the given key.
  66 *
  67 * Returns 1 if pre-existing cell returned, zero if new cell created using
  68 * @cell_prealloc.
  69 */
  70int dm_get_cell(struct dm_bio_prison *prison,
  71                struct dm_cell_key *key,
  72                struct dm_bio_prison_cell *cell_prealloc,
  73                struct dm_bio_prison_cell **cell_result);
  74
  75/*
  76 * An atomic op that combines retrieving or creating a cell, and adding a
  77 * bio to it.
  78 *
  79 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
  80 */
  81int dm_bio_detain(struct dm_bio_prison *prison,
  82                  struct dm_cell_key *key,
  83                  struct bio *inmate,
  84                  struct dm_bio_prison_cell *cell_prealloc,
  85                  struct dm_bio_prison_cell **cell_result);
  86
  87void dm_cell_release(struct dm_bio_prison *prison,
  88                     struct dm_bio_prison_cell *cell,
  89                     struct bio_list *bios);
  90void dm_cell_release_no_holder(struct dm_bio_prison *prison,
  91                               struct dm_bio_prison_cell *cell,
  92                               struct bio_list *inmates);
  93void dm_cell_error(struct dm_bio_prison *prison,
  94                   struct dm_bio_prison_cell *cell, int error);
  95
  96/*
  97 * Visits the cell and then releases.  Guarantees no new inmates are
  98 * inserted between the visit and release.
  99 */
 100void dm_cell_visit_release(struct dm_bio_prison *prison,
 101                           void (*visit_fn)(void *, struct dm_bio_prison_cell *),
 102                           void *context, struct dm_bio_prison_cell *cell);
 103
 104/*----------------------------------------------------------------*/
 105
 106/*
 107 * We use the deferred set to keep track of pending reads to shared blocks.
 108 * We do this to ensure the new mapping caused by a write isn't performed
 109 * until these prior reads have completed.  Otherwise the insertion of the
 110 * new mapping could free the old block that the read bios are mapped to.
 111 */
 112
 113struct dm_deferred_set;
 114struct dm_deferred_entry;
 115
 116struct dm_deferred_set *dm_deferred_set_create(void);
 117void dm_deferred_set_destroy(struct dm_deferred_set *ds);
 118
 119struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
 120void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
 121int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
 122
 123/*----------------------------------------------------------------*/
 124
 125#endif
 126