linux/net/rds/page.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/highmem.h>
  34#include <linux/gfp.h>
  35#include <linux/cpu.h>
  36#include <linux/export.h>
  37
  38#include "rds.h"
  39
  40struct rds_page_remainder {
  41        struct page     *r_page;
  42        unsigned long   r_offset;
  43};
  44
  45static
  46DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders);
  47
  48/*
  49 * returns 0 on success or -errno on failure.
  50 *
  51 * We don't have to worry about flush_dcache_page() as this only works
  52 * with private pages.  If, say, we were to do directed receive to pinned
  53 * user pages we'd have to worry more about cache coherence.  (Though
  54 * the flush_dcache_page() in get_user_pages() would probably be enough).
  55 */
  56int rds_page_copy_user(struct page *page, unsigned long offset,
  57                       void __user *ptr, unsigned long bytes,
  58                       int to_user)
  59{
  60        unsigned long ret;
  61        void *addr;
  62
  63        addr = kmap(page);
  64        if (to_user) {
  65                rds_stats_add(s_copy_to_user, bytes);
  66                ret = copy_to_user(ptr, addr + offset, bytes);
  67        } else {
  68                rds_stats_add(s_copy_from_user, bytes);
  69                ret = copy_from_user(addr + offset, ptr, bytes);
  70        }
  71        kunmap(page);
  72
  73        return ret ? -EFAULT : 0;
  74}
  75EXPORT_SYMBOL_GPL(rds_page_copy_user);
  76
  77/**
  78 * rds_page_remainder_alloc - build up regions of a message.
  79 *
  80 * @scat: Scatter list for message
  81 * @bytes: the number of bytes needed.
  82 * @gfp: the waiting behaviour of the allocation
  83 *
  84 * @gfp is always ored with __GFP_HIGHMEM.  Callers must be prepared to
  85 * kmap the pages, etc.
  86 *
  87 * If @bytes is at least a full page then this just returns a page from
  88 * alloc_page().
  89 *
  90 * If @bytes is a partial page then this stores the unused region of the
  91 * page in a per-cpu structure.  Future partial-page allocations may be
  92 * satisfied from that cached region.  This lets us waste less memory on
  93 * small allocations with minimal complexity.  It works because the transmit
  94 * path passes read-only page regions down to devices.  They hold a page
  95 * reference until they are done with the region.
  96 */
  97int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
  98                             gfp_t gfp)
  99{
 100        struct rds_page_remainder *rem;
 101        unsigned long flags;
 102        struct page *page;
 103        int ret;
 104
 105        gfp |= __GFP_HIGHMEM;
 106
 107        /* jump straight to allocation if we're trying for a huge page */
 108        if (bytes >= PAGE_SIZE) {
 109                page = alloc_page(gfp);
 110                if (!page) {
 111                        ret = -ENOMEM;
 112                } else {
 113                        sg_set_page(scat, page, PAGE_SIZE, 0);
 114                        ret = 0;
 115                }
 116                goto out;
 117        }
 118
 119        rem = &per_cpu(rds_page_remainders, get_cpu());
 120        local_irq_save(flags);
 121
 122        while (1) {
 123                /* avoid a tiny region getting stuck by tossing it */
 124                if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) {
 125                        rds_stats_inc(s_page_remainder_miss);
 126                        __free_page(rem->r_page);
 127                        rem->r_page = NULL;
 128                }
 129
 130                /* hand out a fragment from the cached page */
 131                if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) {
 132                        sg_set_page(scat, rem->r_page, bytes, rem->r_offset);
 133                        get_page(sg_page(scat));
 134
 135                        if (rem->r_offset != 0)
 136                                rds_stats_inc(s_page_remainder_hit);
 137
 138                        rem->r_offset += ALIGN(bytes, 8);
 139                        if (rem->r_offset >= PAGE_SIZE) {
 140                                __free_page(rem->r_page);
 141                                rem->r_page = NULL;
 142                        }
 143                        ret = 0;
 144                        break;
 145                }
 146
 147                /* alloc if there is nothing for us to use */
 148                local_irq_restore(flags);
 149                put_cpu();
 150
 151                page = alloc_page(gfp);
 152
 153                rem = &per_cpu(rds_page_remainders, get_cpu());
 154                local_irq_save(flags);
 155
 156                if (!page) {
 157                        ret = -ENOMEM;
 158                        break;
 159                }
 160
 161                /* did someone race to fill the remainder before us? */
 162                if (rem->r_page) {
 163                        __free_page(page);
 164                        continue;
 165                }
 166
 167                /* otherwise install our page and loop around to alloc */
 168                rem->r_page = page;
 169                rem->r_offset = 0;
 170        }
 171
 172        local_irq_restore(flags);
 173        put_cpu();
 174out:
 175        rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret,
 176                 ret ? NULL : sg_page(scat), ret ? 0 : scat->offset,
 177                 ret ? 0 : scat->length);
 178        return ret;
 179}
 180EXPORT_SYMBOL_GPL(rds_page_remainder_alloc);
 181
 182void rds_page_exit(void)
 183{
 184        unsigned int cpu;
 185
 186        for_each_possible_cpu(cpu) {
 187                struct rds_page_remainder *rem;
 188
 189                rem = &per_cpu(rds_page_remainders, cpu);
 190                rdsdebug("cpu %u\n", cpu);
 191
 192                if (rem->r_page)
 193                        __free_page(rem->r_page);
 194                rem->r_page = NULL;
 195        }
 196}
 197