linux/arch/m68k/include/asm/checksum.h
<<
>>
Prefs
   1#ifndef _M68K_CHECKSUM_H
   2#define _M68K_CHECKSUM_H
   3
   4#include <linux/in6.h>
   5
   6/*
   7 * computes the checksum of a memory block at buff, length len,
   8 * and adds in "sum" (32-bit)
   9 *
  10 * returns a 32-bit number suitable for feeding into itself
  11 * or csum_tcpudp_magic
  12 *
  13 * this function must be called with even lengths, except
  14 * for the last fragment, which may be odd
  15 *
  16 * it's best to have buff aligned on a 32-bit boundary
  17 */
  18__wsum csum_partial(const void *buff, int len, __wsum sum);
  19
  20/*
  21 * the same as csum_partial, but copies from src while it
  22 * checksums
  23 *
  24 * here even more important to align src and dst on a 32-bit (or even
  25 * better 64-bit) boundary
  26 */
  27
  28extern __wsum csum_partial_copy_from_user(const void __user *src,
  29                                                void *dst,
  30                                                int len, __wsum sum,
  31                                                int *csum_err);
  32
  33extern __wsum csum_partial_copy_nocheck(const void *src,
  34                                              void *dst, int len,
  35                                              __wsum sum);
  36
  37
  38#ifdef CONFIG_COLDFIRE
  39
  40/*
  41 *      The ColdFire cores don't support all the 68k instructions used
  42 *      in the optimized checksum code below. So it reverts back to using
  43 *      more standard C coded checksums. The fast checksum code is
  44 *      significantly larger than the optimized version, so it is not
  45 *      inlined here.
  46 */
  47__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
  48
  49static inline __sum16 csum_fold(__wsum sum)
  50{
  51        unsigned int tmp = (__force u32)sum;
  52
  53        tmp = (tmp & 0xffff) + (tmp >> 16);
  54        tmp = (tmp & 0xffff) + (tmp >> 16);
  55
  56        return (__force __sum16)~tmp;
  57}
  58
  59#else
  60
  61/*
  62 *      This is a version of ip_fast_csum() optimized for IP headers,
  63 *      which always checksum on 4 octet boundaries.
  64 */
  65static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  66{
  67        unsigned int sum = 0;
  68        unsigned long tmp;
  69
  70        __asm__ ("subqw #1,%2\n"
  71                 "1:\t"
  72                 "movel %1@+,%3\n\t"
  73                 "addxl %3,%0\n\t"
  74                 "dbra  %2,1b\n\t"
  75                 "movel %0,%3\n\t"
  76                 "swap  %3\n\t"
  77                 "addxw %3,%0\n\t"
  78                 "clrw  %3\n\t"
  79                 "addxw %3,%0\n\t"
  80                 : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
  81                 : "0" (sum), "1" (iph), "2" (ihl)
  82                 : "memory");
  83        return (__force __sum16)~sum;
  84}
  85
  86static inline __sum16 csum_fold(__wsum sum)
  87{
  88        unsigned int tmp = (__force u32)sum;
  89
  90        __asm__("swap %1\n\t"
  91                "addw %1, %0\n\t"
  92                "clrw %1\n\t"
  93                "addxw %1, %0"
  94                : "=&d" (sum), "=&d" (tmp)
  95                : "0" (sum), "1" (tmp));
  96
  97        return (__force __sum16)~sum;
  98}
  99
 100#endif /* CONFIG_COLDFIRE */
 101
 102static inline __wsum
 103csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
 104                  unsigned short proto, __wsum sum)
 105{
 106        __asm__ ("addl  %2,%0\n\t"
 107                 "addxl %3,%0\n\t"
 108                 "addxl %4,%0\n\t"
 109                 "clrl %1\n\t"
 110                 "addxl %1,%0"
 111                 : "=&d" (sum), "=d" (saddr)
 112                 : "g" (daddr), "1" (saddr), "d" (len + proto),
 113                   "0" (sum));
 114        return sum;
 115}
 116
 117
 118/*
 119 * computes the checksum of the TCP/UDP pseudo-header
 120 * returns a 16-bit checksum, already complemented
 121 */
 122static inline __sum16
 123csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
 124                  unsigned short proto, __wsum sum)
 125{
 126        return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 127}
 128
 129/*
 130 * this routine is used for miscellaneous IP-like checksums, mainly
 131 * in icmp.c
 132 */
 133
 134static inline __sum16 ip_compute_csum(const void *buff, int len)
 135{
 136        return csum_fold (csum_partial(buff, len, 0));
 137}
 138
 139#define _HAVE_ARCH_IPV6_CSUM
 140static __inline__ __sum16
 141csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
 142                __u32 len, unsigned short proto, __wsum sum)
 143{
 144        register unsigned long tmp;
 145        __asm__("addl %2@,%0\n\t"
 146                "movel %2@(4),%1\n\t"
 147                "addxl %1,%0\n\t"
 148                "movel %2@(8),%1\n\t"
 149                "addxl %1,%0\n\t"
 150                "movel %2@(12),%1\n\t"
 151                "addxl %1,%0\n\t"
 152                "movel %3@,%1\n\t"
 153                "addxl %1,%0\n\t"
 154                "movel %3@(4),%1\n\t"
 155                "addxl %1,%0\n\t"
 156                "movel %3@(8),%1\n\t"
 157                "addxl %1,%0\n\t"
 158                "movel %3@(12),%1\n\t"
 159                "addxl %1,%0\n\t"
 160                "addxl %4,%0\n\t"
 161                "clrl %1\n\t"
 162                "addxl %1,%0"
 163                : "=&d" (sum), "=&d" (tmp)
 164                : "a" (saddr), "a" (daddr), "d" (len + proto),
 165                  "0" (sum));
 166
 167        return csum_fold(sum);
 168}
 169
 170#endif /* _M68K_CHECKSUM_H */
 171