linux/arch/score/include/asm/checksum.h
<<
>>
Prefs
   1#ifndef _ASM_SCORE_CHECKSUM_H
   2#define _ASM_SCORE_CHECKSUM_H
   3
   4#include <linux/in6.h>
   5#include <asm/uaccess.h>
   6
   7/*
   8 * computes the checksum of a memory block at buff, length len,
   9 * and adds in "sum" (32-bit)
  10 *
  11 * returns a 32-bit number suitable for feeding into itself
  12 * or csum_tcpudp_magic
  13 *
  14 * this function must be called with even lengths, except
  15 * for the last fragment, which may be odd
  16 *
  17 * it's best to have buff aligned on a 32-bit boundary
  18 */
  19unsigned int csum_partial(const void *buff, int len, __wsum sum);
  20unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len,
  21                                        unsigned int sum, int *csum_err);
  22unsigned int csum_partial_copy(const char *src, char *dst,
  23                                        int len, unsigned int sum);
  24
  25/*
  26 * this is a new version of the above that records errors it finds in *errp,
  27 * but continues and zeros the rest of the buffer.
  28 */
  29
  30/*
  31 * Copy and checksum to user
  32 */
  33#define HAVE_CSUM_COPY_USER
  34static inline
  35__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
  36                        __wsum sum, int *err_ptr)
  37{
  38        sum = csum_partial(src, len, sum);
  39        if (copy_to_user(dst, src, len)) {
  40                *err_ptr = -EFAULT;
  41                return (__force __wsum) -1; /* invalid checksum */
  42        }
  43        return sum;
  44}
  45
  46
  47#define csum_partial_copy_nocheck csum_partial_copy
  48/*
  49 *      Fold a partial checksum without adding pseudo headers
  50 */
  51
  52static inline __sum16 csum_fold(__wsum sum)
  53{
  54        /* the while loop is unnecessary really, it's always enough with two
  55           iterations */
  56        __asm__ __volatile__(
  57                ".set volatile\n\t"
  58                ".set\tr1\n\t"
  59                "slli\tr1,%0, 16\n\t"
  60                "add\t%0,%0, r1\n\t"
  61                "cmp.c\tr1, %0\n\t"
  62                "srli\t%0, %0, 16\n\t"
  63                "bleu\t1f\n\t"
  64                "addi\t%0, 0x1\n\t"
  65                "1:ldi\tr30, 0xffff\n\t"
  66                "xor\t%0, %0, r30\n\t"
  67                "slli\t%0, %0, 16\n\t"
  68                "srli\t%0, %0, 16\n\t"
  69                ".set\tnor1\n\t"
  70                ".set optimize\n\t"
  71                : "=r" (sum)
  72                : "0" (sum));
  73        return sum;
  74}
  75
  76/*
  77 *      This is a version of ip_compute_csum() optimized for IP headers,
  78 *      which always checksum on 4 octet boundaries.
  79 *
  80 *      By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
  81 *      Arnt Gulbrandsen.
  82 */
  83static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  84{
  85        unsigned int sum;
  86        unsigned long dummy;
  87
  88        __asm__ __volatile__(
  89                ".set volatile\n\t"
  90                ".set\tnor1\n\t"
  91                "lw\t%0, [%1]\n\t"
  92                "subri\t%2, %2, 4\n\t"
  93                "slli\t%2, %2, 2\n\t"
  94                "lw\t%3, [%1, 4]\n\t"
  95                "add\t%2, %2, %1\n\t"
  96                "add\t%0, %0, %3\n\t"
  97                "cmp.c\t%3, %0\n\t"
  98                "lw\t%3, [%1, 8]\n\t"
  99                "bleu\t1f\n\t"
 100                "addi\t%0, 0x1\n\t"
 101                "1:\n\t"
 102                "add\t%0, %0, %3\n\t"
 103                "cmp.c\t%3, %0\n\t"
 104                "lw\t%3, [%1, 12]\n\t"
 105                "bleu\t1f\n\t"
 106                "addi\t%0, 0x1\n\t"
 107                "1:add\t%0, %0, %3\n\t"
 108                "cmp.c\t%3, %0\n\t"
 109                "bleu\t1f\n\t"
 110                "addi\t%0, 0x1\n"
 111
 112                "1:\tlw\t%3, [%1, 16]\n\t"
 113                "addi\t%1, 4\n\t"
 114                "add\t%0, %0, %3\n\t"
 115                "cmp.c\t%3, %0\n\t"
 116                "bleu\t2f\n\t"
 117                "addi\t%0, 0x1\n"
 118                "2:cmp.c\t%2, %1\n\t"
 119                "bne\t1b\n\t"
 120
 121                ".set\tr1\n\t"
 122                ".set optimize\n\t"
 123                : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy)
 124                : "1" (iph), "2" (ihl));
 125
 126        return csum_fold(sum);
 127}
 128
 129static inline __wsum
 130csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
 131                unsigned short proto, __wsum sum)
 132{
 133        unsigned long tmp = (ntohs(len) << 16) + proto * 256;
 134        __asm__ __volatile__(
 135                ".set volatile\n\t"
 136                "add\t%0, %0, %2\n\t"
 137                "cmp.c\t%2, %0\n\t"
 138                "bleu\t1f\n\t"
 139                "addi\t%0, 0x1\n\t"
 140                "1:\n\t"
 141                "add\t%0, %0, %3\n\t"
 142                "cmp.c\t%3, %0\n\t"
 143                "bleu\t1f\n\t"
 144                "addi\t%0, 0x1\n\t"
 145                "1:\n\t"
 146                "add\t%0, %0, %4\n\t"
 147                "cmp.c\t%4, %0\n\t"
 148                "bleu\t1f\n\t"
 149                "addi\t%0, 0x1\n\t"
 150                "1:\n\t"
 151                ".set optimize\n\t"
 152                : "=r" (sum)
 153                : "0" (daddr), "r"(saddr),
 154                "r" (tmp),
 155                "r" (sum));
 156        return sum;
 157}
 158
 159/*
 160 * computes the checksum of the TCP/UDP pseudo-header
 161 * returns a 16-bit checksum, already complemented
 162 */
 163static inline __sum16
 164csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
 165                unsigned short proto, __wsum sum)
 166{
 167        return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 168}
 169
 170/*
 171 * this routine is used for miscellaneous IP-like checksums, mainly
 172 * in icmp.c
 173 */
 174
 175static inline unsigned short ip_compute_csum(const void *buff, int len)
 176{
 177        return csum_fold(csum_partial(buff, len, 0));
 178}
 179
 180#define _HAVE_ARCH_IPV6_CSUM
 181static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
 182                                const struct in6_addr *daddr,
 183                                __u32 len, unsigned short proto,
 184                                __wsum sum)
 185{
 186        __asm__ __volatile__(
 187                ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
 188                ".set\tnoat\n\t"
 189                "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
 190                "sltu\t$1, %0, %5\n\t"
 191                "addu\t%0, $1\n\t"
 192                "addu\t%0, %6\t\t\t# csum\n\t"
 193                "sltu\t$1, %0, %6\n\t"
 194                "lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
 195                "addu\t%0, $1\n\t"
 196                "addu\t%0, %1\n\t"
 197                "sltu\t$1, %0, %1\n\t"
 198                "lw\t%1, 4(%2)\n\t"
 199                "addu\t%0, $1\n\t"
 200                "addu\t%0, %1\n\t"
 201                "sltu\t$1, %0, %1\n\t"
 202                "lw\t%1, 8(%2)\n\t"
 203                "addu\t%0, $1\n\t"
 204                "addu\t%0, %1\n\t"
 205                "sltu\t$1, %0, %1\n\t"
 206                "lw\t%1, 12(%2)\n\t"
 207                "addu\t%0, $1\n\t"
 208                "addu\t%0, %1\n\t"
 209                "sltu\t$1, %0, %1\n\t"
 210                "lw\t%1, 0(%3)\n\t"
 211                "addu\t%0, $1\n\t"
 212                "addu\t%0, %1\n\t"
 213                "sltu\t$1, %0, %1\n\t"
 214                "lw\t%1, 4(%3)\n\t"
 215                "addu\t%0, $1\n\t"
 216                "addu\t%0, %1\n\t"
 217                "sltu\t$1, %0, %1\n\t"
 218                "lw\t%1, 8(%3)\n\t"
 219                "addu\t%0, $1\n\t"
 220                "addu\t%0, %1\n\t"
 221                "sltu\t$1, %0, %1\n\t"
 222                "lw\t%1, 12(%3)\n\t"
 223                "addu\t%0, $1\n\t"
 224                "addu\t%0, %1\n\t"
 225                "sltu\t$1, %0, %1\n\t"
 226                "addu\t%0, $1\t\t\t# Add final carry\n\t"
 227                ".set\tnoat\n\t"
 228                ".set\tnoreorder"
 229                : "=r" (sum), "=r" (proto)
 230                : "r" (saddr), "r" (daddr),
 231                  "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
 232
 233        return csum_fold(sum);
 234}
 235#endif /* _ASM_SCORE_CHECKSUM_H */
 236