linux/arch/sparc/include/asm/checksum_32.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __SPARC_CHECKSUM_H
   3#define __SPARC_CHECKSUM_H
   4
   5/*  checksum.h:  IP/UDP/TCP checksum routines on the Sparc.
   6 *
   7 *  Copyright(C) 1995 Linus Torvalds
   8 *  Copyright(C) 1995 Miguel de Icaza
   9 *  Copyright(C) 1996 David S. Miller
  10 *  Copyright(C) 1996 Eddie C. Dost
  11 *  Copyright(C) 1997 Jakub Jelinek
  12 *
  13 * derived from:
  14 *      Alpha checksum c-code
  15 *      ix86 inline assembly
  16 *      RFC1071 Computing the Internet Checksum
  17 */
  18
  19#include <linux/in6.h>
  20#include <linux/uaccess.h>
  21
  22/* computes the checksum of a memory block at buff, length len,
  23 * and adds in "sum" (32-bit)
  24 *
  25 * returns a 32-bit number suitable for feeding into itself
  26 * or csum_tcpudp_magic
  27 *
  28 * this function must be called with even lengths, except
  29 * for the last fragment, which may be odd
  30 *
  31 * it's best to have buff aligned on a 32-bit boundary
  32 */
  33__wsum csum_partial(const void *buff, int len, __wsum sum);
  34
  35/* the same as csum_partial, but copies from fs:src while it
  36 * checksums
  37 *
  38 * here even more important to align src and dst on a 32-bit (or even
  39 * better 64-bit) boundary
  40 */
  41
  42unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
  43
  44static inline __wsum
  45csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
  46{
  47        register unsigned int ret asm("o0") = (unsigned int)src;
  48        register char *d asm("o1") = dst;
  49        register int l asm("g1") = len;
  50
  51        __asm__ __volatile__ (
  52                "call __csum_partial_copy_sparc_generic\n\t"
  53                " mov %6, %%g7\n"
  54        : "=&r" (ret), "=&r" (d), "=&r" (l)
  55        : "0" (ret), "1" (d), "2" (l), "r" (sum)
  56        : "o2", "o3", "o4", "o5", "o7",
  57          "g2", "g3", "g4", "g5", "g7",
  58          "memory", "cc");
  59        return (__force __wsum)ret;
  60}
  61
  62static inline __wsum
  63csum_and_copy_from_user(const void __user *src, void *dst, int len,
  64                            __wsum sum, int *err)
  65  {
  66        register unsigned long ret asm("o0") = (unsigned long)src;
  67        register char *d asm("o1") = dst;
  68        register int l asm("g1") = len;
  69        register __wsum s asm("g7") = sum;
  70
  71        if (unlikely(!access_ok(src, len))) {
  72                if (len)
  73                        *err = -EFAULT;
  74                return sum;
  75        }
  76
  77        __asm__ __volatile__ (
  78        ".section __ex_table,#alloc\n\t"
  79        ".align 4\n\t"
  80        ".word 1f,2\n\t"
  81        ".previous\n"
  82        "1:\n\t"
  83        "call __csum_partial_copy_sparc_generic\n\t"
  84        " st %8, [%%sp + 64]\n"
  85        : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
  86        : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
  87        : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
  88          "cc", "memory");
  89        return (__force __wsum)ret;
  90}
  91
  92#define HAVE_CSUM_COPY_USER
  93
  94static inline __wsum
  95csum_and_copy_to_user(const void *src, void __user *dst, int len,
  96                          __wsum sum, int *err)
  97{
  98        if (!access_ok(dst, len)) {
  99                *err = -EFAULT;
 100                return sum;
 101        } else {
 102                register unsigned long ret asm("o0") = (unsigned long)src;
 103                register char __user *d asm("o1") = dst;
 104                register int l asm("g1") = len;
 105                register __wsum s asm("g7") = sum;
 106
 107                __asm__ __volatile__ (
 108                ".section __ex_table,#alloc\n\t"
 109                ".align 4\n\t"
 110                ".word 1f,1\n\t"
 111                ".previous\n"
 112                "1:\n\t"
 113                "call __csum_partial_copy_sparc_generic\n\t"
 114                " st %8, [%%sp + 64]\n"
 115                : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
 116                : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
 117                : "o2", "o3", "o4", "o5", "o7",
 118                  "g2", "g3", "g4", "g5",
 119                  "cc", "memory");
 120                return (__force __wsum)ret;
 121        }
 122}
 123
 124/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
 125 * the majority of the time.
 126 */
 127static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 128{
 129        __sum16 sum;
 130
 131        /* Note: We must read %2 before we touch %0 for the first time,
 132         *       because GCC can legitimately use the same register for
 133         *       both operands.
 134         */
 135        __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
 136                             "ld\t[%1 + 0x00], %0\n\t"
 137                             "ld\t[%1 + 0x04], %%g2\n\t"
 138                             "ld\t[%1 + 0x08], %%g3\n\t"
 139                             "addcc\t%%g2, %0, %0\n\t"
 140                             "addxcc\t%%g3, %0, %0\n\t"
 141                             "ld\t[%1 + 0x0c], %%g2\n\t"
 142                             "ld\t[%1 + 0x10], %%g3\n\t"
 143                             "addxcc\t%%g2, %0, %0\n\t"
 144                             "addx\t%0, %%g0, %0\n"
 145                             "1:\taddcc\t%%g3, %0, %0\n\t"
 146                             "add\t%1, 4, %1\n\t"
 147                             "addxcc\t%0, %%g0, %0\n\t"
 148                             "subcc\t%%g4, 1, %%g4\n\t"
 149                             "be,a\t2f\n\t"
 150                             "sll\t%0, 16, %%g2\n\t"
 151                             "b\t1b\n\t"
 152                             "ld\t[%1 + 0x10], %%g3\n"
 153                             "2:\taddcc\t%0, %%g2, %%g2\n\t"
 154                             "srl\t%%g2, 16, %0\n\t"
 155                             "addx\t%0, %%g0, %0\n\t"
 156                             "xnor\t%%g0, %0, %0"
 157                             : "=r" (sum), "=&r" (iph)
 158                             : "r" (ihl), "1" (iph)
 159                             : "g2", "g3", "g4", "cc", "memory");
 160        return sum;
 161}
 162
 163/* Fold a partial checksum without adding pseudo headers. */
 164static inline __sum16 csum_fold(__wsum sum)
 165{
 166        unsigned int tmp;
 167
 168        __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
 169                             "srl\t%1, 16, %1\n\t"
 170                             "addx\t%1, %%g0, %1\n\t"
 171                             "xnor\t%%g0, %1, %0"
 172                             : "=&r" (sum), "=r" (tmp)
 173                             : "0" (sum), "1" ((__force u32)sum<<16)
 174                             : "cc");
 175        return (__force __sum16)sum;
 176}
 177
 178static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 179                                        __u32 len, __u8 proto,
 180                                        __wsum sum)
 181{
 182        __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
 183                             "addxcc\t%2, %0, %0\n\t"
 184                             "addxcc\t%3, %0, %0\n\t"
 185                             "addx\t%0, %%g0, %0\n\t"
 186                             : "=r" (sum), "=r" (saddr)
 187                             : "r" (daddr), "r" (proto + len), "0" (sum),
 188                               "1" (saddr)
 189                             : "cc");
 190        return sum;
 191}
 192
 193/*
 194 * computes the checksum of the TCP/UDP pseudo-header
 195 * returns a 16-bit checksum, already complemented
 196 */
 197static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 198                                        __u32 len, __u8 proto,
 199                                        __wsum sum)
 200{
 201        return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 202}
 203
 204#define _HAVE_ARCH_IPV6_CSUM
 205
 206static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
 207                                      const struct in6_addr *daddr,
 208                                      __u32 len, __u8 proto, __wsum sum)
 209{
 210        __asm__ __volatile__ (
 211                "addcc  %3, %4, %%g4\n\t"
 212                "addxcc %5, %%g4, %%g4\n\t"
 213                "ld     [%2 + 0x0c], %%g2\n\t"
 214                "ld     [%2 + 0x08], %%g3\n\t"
 215                "addxcc %%g2, %%g4, %%g4\n\t"
 216                "ld     [%2 + 0x04], %%g2\n\t"
 217                "addxcc %%g3, %%g4, %%g4\n\t"
 218                "ld     [%2 + 0x00], %%g3\n\t"
 219                "addxcc %%g2, %%g4, %%g4\n\t"
 220                "ld     [%1 + 0x0c], %%g2\n\t"
 221                "addxcc %%g3, %%g4, %%g4\n\t"
 222                "ld     [%1 + 0x08], %%g3\n\t"
 223                "addxcc %%g2, %%g4, %%g4\n\t"
 224                "ld     [%1 + 0x04], %%g2\n\t"
 225                "addxcc %%g3, %%g4, %%g4\n\t"
 226                "ld     [%1 + 0x00], %%g3\n\t"
 227                "addxcc %%g2, %%g4, %%g4\n\t"
 228                "addxcc %%g3, %%g4, %0\n\t"
 229                "addx   0, %0, %0\n"
 230                : "=&r" (sum)
 231                : "r" (saddr), "r" (daddr),
 232                  "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
 233                : "g2", "g3", "g4", "cc");
 234
 235        return csum_fold(sum);
 236}
 237
 238/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
 239static inline __sum16 ip_compute_csum(const void *buff, int len)
 240{
 241        return csum_fold(csum_partial(buff, len, 0));
 242}
 243
 244#define HAVE_ARCH_CSUM_ADD
 245static inline __wsum csum_add(__wsum csum, __wsum addend)
 246{
 247        __asm__ __volatile__(
 248                "addcc   %0, %1, %0\n"
 249                "addx    %0, %%g0, %0"
 250                : "=r" (csum)
 251                : "r" (addend), "0" (csum));
 252
 253        return csum;
 254}
 255
 256#endif /* !(__SPARC_CHECKSUM_H) */
 257