linux/net/sunrpc/socklib.c
<<
>>
Prefs
   1/*
   2 * linux/net/sunrpc/socklib.c
   3 *
   4 * Common socket helper routines for RPC client and server
   5 *
   6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/compiler.h>
  10#include <linux/netdevice.h>
  11#include <linux/gfp.h>
  12#include <linux/skbuff.h>
  13#include <linux/types.h>
  14#include <linux/pagemap.h>
  15#include <linux/udp.h>
  16#include <linux/sunrpc/xdr.h>
  17#include <linux/export.h>
  18
  19
  20/**
  21 * xdr_skb_read_bits - copy some data bits from skb to internal buffer
  22 * @desc: sk_buff copy helper
  23 * @to: copy destination
  24 * @len: number of bytes to copy
  25 *
  26 * Possibly called several times to iterate over an sk_buff and copy
  27 * data out of it.
  28 */
  29size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
  30{
  31        if (len > desc->count)
  32                len = desc->count;
  33        if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
  34                return 0;
  35        desc->count -= len;
  36        desc->offset += len;
  37        return len;
  38}
  39EXPORT_SYMBOL_GPL(xdr_skb_read_bits);
  40
  41/**
  42 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
  43 * @desc: sk_buff copy helper
  44 * @to: copy destination
  45 * @len: number of bytes to copy
  46 *
  47 * Same as skb_read_bits, but calculate a checksum at the same time.
  48 */
  49static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
  50{
  51        unsigned int pos;
  52        __wsum csum2;
  53
  54        if (len > desc->count)
  55                len = desc->count;
  56        pos = desc->offset;
  57        csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
  58        desc->csum = csum_block_add(desc->csum, csum2, pos);
  59        desc->count -= len;
  60        desc->offset += len;
  61        return len;
  62}
  63
  64/**
  65 * xdr_partial_copy_from_skb - copy data out of an skb
  66 * @xdr: target XDR buffer
  67 * @base: starting offset
  68 * @desc: sk_buff copy helper
  69 * @copy_actor: virtual method for copying data
  70 *
  71 */
  72ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
  73{
  74        struct page     **ppage = xdr->pages;
  75        unsigned int    len, pglen = xdr->page_len;
  76        ssize_t         copied = 0;
  77        size_t          ret;
  78
  79        len = xdr->head[0].iov_len;
  80        if (base < len) {
  81                len -= base;
  82                ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
  83                copied += ret;
  84                if (ret != len || !desc->count)
  85                        goto out;
  86                base = 0;
  87        } else
  88                base -= len;
  89
  90        if (unlikely(pglen == 0))
  91                goto copy_tail;
  92        if (unlikely(base >= pglen)) {
  93                base -= pglen;
  94                goto copy_tail;
  95        }
  96        if (base || xdr->page_base) {
  97                pglen -= base;
  98                base += xdr->page_base;
  99                ppage += base >> PAGE_SHIFT;
 100                base &= ~PAGE_MASK;
 101        }
 102        do {
 103                char *kaddr;
 104
 105                /* ACL likes to be lazy in allocating pages - ACLs
 106                 * are small by default but can get huge. */
 107                if (unlikely(*ppage == NULL)) {
 108                        *ppage = alloc_page(GFP_ATOMIC);
 109                        if (unlikely(*ppage == NULL)) {
 110                                if (copied == 0)
 111                                        copied = -ENOMEM;
 112                                goto out;
 113                        }
 114                }
 115
 116                len = PAGE_SIZE;
 117                kaddr = kmap_atomic(*ppage);
 118                if (base) {
 119                        len -= base;
 120                        if (pglen < len)
 121                                len = pglen;
 122                        ret = copy_actor(desc, kaddr + base, len);
 123                        base = 0;
 124                } else {
 125                        if (pglen < len)
 126                                len = pglen;
 127                        ret = copy_actor(desc, kaddr, len);
 128                }
 129                flush_dcache_page(*ppage);
 130                kunmap_atomic(kaddr);
 131                copied += ret;
 132                if (ret != len || !desc->count)
 133                        goto out;
 134                ppage++;
 135        } while ((pglen -= len) != 0);
 136copy_tail:
 137        len = xdr->tail[0].iov_len;
 138        if (base < len)
 139                copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
 140out:
 141        return copied;
 142}
 143EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb);
 144
 145/**
 146 * csum_partial_copy_to_xdr - checksum and copy data
 147 * @xdr: target XDR buffer
 148 * @skb: source skb
 149 *
 150 * We have set things up such that we perform the checksum of the UDP
 151 * packet in parallel with the copies into the RPC client iovec.  -DaveM
 152 */
 153int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 154{
 155        struct xdr_skb_reader   desc;
 156
 157        desc.skb = skb;
 158        desc.offset = 0;
 159        desc.count = skb->len - desc.offset;
 160
 161        if (skb_csum_unnecessary(skb))
 162                goto no_checksum;
 163
 164        desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
 165        if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
 166                return -1;
 167        if (desc.offset != skb->len) {
 168                __wsum csum2;
 169                csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
 170                desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
 171        }
 172        if (desc.count)
 173                return -1;
 174        if (csum_fold(desc.csum))
 175                return -1;
 176        if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 177            !skb->csum_complete_sw)
 178                netdev_rx_csum_fault(skb->dev);
 179        return 0;
 180no_checksum:
 181        if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
 182                return -1;
 183        if (desc.count)
 184                return -1;
 185        return 0;
 186}
 187EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
 188