linux/block/bounce.c
<<
>>
Prefs
   1/* bounce buffer handling for block devices
   2 *
   3 * - Split from highmem.c
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/mm.h>
   9#include <linux/export.h>
  10#include <linux/swap.h>
  11#include <linux/gfp.h>
  12#include <linux/bio.h>
  13#include <linux/pagemap.h>
  14#include <linux/mempool.h>
  15#include <linux/blkdev.h>
  16#include <linux/backing-dev.h>
  17#include <linux/init.h>
  18#include <linux/hash.h>
  19#include <linux/highmem.h>
  20#include <linux/bootmem.h>
  21#include <linux/printk.h>
  22#include <asm/tlbflush.h>
  23
  24#include <trace/events/block.h>
  25
  26#define POOL_SIZE       64
  27#define ISA_POOL_SIZE   16
  28
  29static mempool_t *page_pool, *isa_page_pool;
  30
  31#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
  32static __init int init_emergency_pool(void)
  33{
  34#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
  35        if (max_pfn <= max_low_pfn)
  36                return 0;
  37#endif
  38
  39        page_pool = mempool_create_page_pool(POOL_SIZE, 0);
  40        BUG_ON(!page_pool);
  41        pr_info("pool size: %d pages\n", POOL_SIZE);
  42
  43        return 0;
  44}
  45
  46__initcall(init_emergency_pool);
  47#endif
  48
  49#ifdef CONFIG_HIGHMEM
  50/*
  51 * highmem version, map in to vec
  52 */
  53static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  54{
  55        unsigned long flags;
  56        unsigned char *vto;
  57
  58        local_irq_save(flags);
  59        vto = kmap_atomic(to->bv_page);
  60        memcpy(vto + to->bv_offset, vfrom, to->bv_len);
  61        kunmap_atomic(vto);
  62        local_irq_restore(flags);
  63}
  64
  65#else /* CONFIG_HIGHMEM */
  66
  67#define bounce_copy_vec(to, vfrom)      \
  68        memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
  69
  70#endif /* CONFIG_HIGHMEM */
  71
  72/*
  73 * allocate pages in the DMA region for the ISA pool
  74 */
  75static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
  76{
  77        return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
  78}
  79
  80/*
  81 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
  82 * as the max address, so check if the pool has already been created.
  83 */
  84int init_emergency_isa_pool(void)
  85{
  86        if (isa_page_pool)
  87                return 0;
  88
  89        isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
  90                                       mempool_free_pages, (void *) 0);
  91        BUG_ON(!isa_page_pool);
  92
  93        pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
  94        return 0;
  95}
  96
  97/*
  98 * Simple bounce buffer support for highmem pages. Depending on the
  99 * queue gfp mask set, *to may or may not be a highmem page. kmap it
 100 * always, it will do the Right Thing
 101 */
 102static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 103{
 104        unsigned char *vfrom;
 105        struct bio_vec tovec, *fromvec = from->bi_io_vec;
 106        struct bvec_iter iter;
 107
 108        bio_for_each_segment(tovec, to, iter) {
 109                if (tovec.bv_page != fromvec->bv_page) {
 110                        /*
 111                         * fromvec->bv_offset and fromvec->bv_len might have
 112                         * been modified by the block layer, so use the original
 113                         * copy, bounce_copy_vec already uses tovec->bv_len
 114                         */
 115                        vfrom = page_address(fromvec->bv_page) +
 116                                tovec.bv_offset;
 117
 118                        bounce_copy_vec(&tovec, vfrom);
 119                        flush_dcache_page(tovec.bv_page);
 120                }
 121
 122                fromvec++;
 123        }
 124}
 125
 126static void bounce_end_io(struct bio *bio, mempool_t *pool)
 127{
 128        struct bio *bio_orig = bio->bi_private;
 129        struct bio_vec *bvec, *org_vec;
 130        int i;
 131        int start = bio_orig->bi_iter.bi_idx;
 132
 133        /*
 134         * free up bounce indirect pages used
 135         */
 136        bio_for_each_segment_all(bvec, bio, i) {
 137                org_vec = bio_orig->bi_io_vec + i + start;
 138
 139                if (bvec->bv_page == org_vec->bv_page)
 140                        continue;
 141
 142                dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
 143                mempool_free(bvec->bv_page, pool);
 144        }
 145
 146        bio_orig->bi_error = bio->bi_error;
 147        bio_endio(bio_orig);
 148        bio_put(bio);
 149}
 150
 151static void bounce_end_io_write(struct bio *bio)
 152{
 153        bounce_end_io(bio, page_pool);
 154}
 155
 156static void bounce_end_io_write_isa(struct bio *bio)
 157{
 158
 159        bounce_end_io(bio, isa_page_pool);
 160}
 161
 162static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
 163{
 164        struct bio *bio_orig = bio->bi_private;
 165
 166        if (!bio->bi_error)
 167                copy_to_high_bio_irq(bio_orig, bio);
 168
 169        bounce_end_io(bio, pool);
 170}
 171
 172static void bounce_end_io_read(struct bio *bio)
 173{
 174        __bounce_end_io_read(bio, page_pool);
 175}
 176
 177static void bounce_end_io_read_isa(struct bio *bio)
 178{
 179        __bounce_end_io_read(bio, isa_page_pool);
 180}
 181
 182static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 183                               mempool_t *pool)
 184{
 185        struct bio *bio;
 186        int rw = bio_data_dir(*bio_orig);
 187        struct bio_vec *to, from;
 188        struct bvec_iter iter;
 189        unsigned i;
 190
 191        bio_for_each_segment(from, *bio_orig, iter)
 192                if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
 193                        goto bounce;
 194
 195        return;
 196bounce:
 197        bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
 198
 199        bio_for_each_segment_all(to, bio, i) {
 200                struct page *page = to->bv_page;
 201
 202                if (page_to_pfn(page) <= queue_bounce_pfn(q))
 203                        continue;
 204
 205                to->bv_page = mempool_alloc(pool, q->bounce_gfp);
 206                inc_zone_page_state(to->bv_page, NR_BOUNCE);
 207
 208                if (rw == WRITE) {
 209                        char *vto, *vfrom;
 210
 211                        flush_dcache_page(page);
 212
 213                        vto = page_address(to->bv_page) + to->bv_offset;
 214                        vfrom = kmap_atomic(page) + to->bv_offset;
 215                        memcpy(vto, vfrom, to->bv_len);
 216                        kunmap_atomic(vfrom);
 217                }
 218        }
 219
 220        trace_block_bio_bounce(q, *bio_orig);
 221
 222        bio->bi_flags |= (1 << BIO_BOUNCED);
 223
 224        if (pool == page_pool) {
 225                bio->bi_end_io = bounce_end_io_write;
 226                if (rw == READ)
 227                        bio->bi_end_io = bounce_end_io_read;
 228        } else {
 229                bio->bi_end_io = bounce_end_io_write_isa;
 230                if (rw == READ)
 231                        bio->bi_end_io = bounce_end_io_read_isa;
 232        }
 233
 234        bio->bi_private = *bio_orig;
 235        *bio_orig = bio;
 236}
 237
 238void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 239{
 240        mempool_t *pool;
 241
 242        /*
 243         * Data-less bio, nothing to bounce
 244         */
 245        if (!bio_has_data(*bio_orig))
 246                return;
 247
 248        /*
 249         * for non-isa bounce case, just check if the bounce pfn is equal
 250         * to or bigger than the highest pfn in the system -- in that case,
 251         * don't waste time iterating over bio segments
 252         */
 253        if (!(q->bounce_gfp & GFP_DMA)) {
 254                if (queue_bounce_pfn(q) >= blk_max_pfn)
 255                        return;
 256                pool = page_pool;
 257        } else {
 258                BUG_ON(!isa_page_pool);
 259                pool = isa_page_pool;
 260        }
 261
 262        /*
 263         * slow path
 264         */
 265        __blk_queue_bounce(q, bio_orig, pool);
 266}
 267
 268EXPORT_SYMBOL(blk_queue_bounce);
 269