linux/arch/arm/include/asm/checksum.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  arch/arm/include/asm/checksum.h
   4 *
   5 * IP checksum routines
   6 *
   7 * Copyright (C) Original authors of ../asm-i386/checksum.h
   8 * Copyright (C) 1996-1999 Russell King
   9 */
  10#ifndef __ASM_ARM_CHECKSUM_H
  11#define __ASM_ARM_CHECKSUM_H
  12
  13#include <linux/in6.h>
  14
  15/*
  16 * computes the checksum of a memory block at buff, length len,
  17 * and adds in "sum" (32-bit)
  18 *
  19 * returns a 32-bit number suitable for feeding into itself
  20 * or csum_tcpudp_magic
  21 *
  22 * this function must be called with even lengths, except
  23 * for the last fragment, which may be odd
  24 *
  25 * it's best to have buff aligned on a 32-bit boundary
  26 */
  27__wsum csum_partial(const void *buff, int len, __wsum sum);
  28
  29/*
  30 * the same as csum_partial, but copies from src while it
  31 * checksums, and handles user-space pointer exceptions correctly, when needed.
  32 *
  33 * here even more important to align src and dst on a 32-bit (or even
  34 * better 64-bit) boundary
  35 */
  36
  37__wsum
  38csum_partial_copy_nocheck(const void *src, void *dst, int len);
  39
  40__wsum
  41csum_partial_copy_from_user(const void __user *src, void *dst, int len);
  42
  43#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
  44#define _HAVE_ARCH_CSUM_AND_COPY
  45static inline
  46__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
  47{
  48        if (!access_ok(src, len))
  49                return 0;
  50
  51        return csum_partial_copy_from_user(src, dst, len);
  52}
  53
  54/*
  55 *      Fold a partial checksum without adding pseudo headers
  56 */
  57static inline __sum16 csum_fold(__wsum sum)
  58{
  59        __asm__(
  60        "add    %0, %1, %1, ror #16     @ csum_fold"
  61        : "=r" (sum)
  62        : "r" (sum)
  63        : "cc");
  64        return (__force __sum16)(~(__force u32)sum >> 16);
  65}
  66
  67/*
  68 *      This is a version of ip_compute_csum() optimized for IP headers,
  69 *      which always checksum on 4 octet boundaries.
  70 */
  71static inline __sum16
  72ip_fast_csum(const void *iph, unsigned int ihl)
  73{
  74        unsigned int tmp1;
  75        __wsum sum;
  76
  77        __asm__ __volatile__(
  78        "ldr    %0, [%1], #4            @ ip_fast_csum          \n\
  79        ldr     %3, [%1], #4                                    \n\
  80        sub     %2, %2, #5                                      \n\
  81        adds    %0, %0, %3                                      \n\
  82        ldr     %3, [%1], #4                                    \n\
  83        adcs    %0, %0, %3                                      \n\
  84        ldr     %3, [%1], #4                                    \n\
  851:      adcs    %0, %0, %3                                      \n\
  86        ldr     %3, [%1], #4                                    \n\
  87        tst     %2, #15                 @ do this carefully     \n\
  88        subne   %2, %2, #1              @ without destroying    \n\
  89        bne     1b                      @ the carry flag        \n\
  90        adcs    %0, %0, %3                                      \n\
  91        adc     %0, %0, #0"
  92        : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
  93        : "1" (iph), "2" (ihl)
  94        : "cc", "memory");
  95        return csum_fold(sum);
  96}
  97
  98static inline __wsum
  99csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
 100                   __u8 proto, __wsum sum)
 101{
 102        u32 lenprot = len + proto;
 103        if (__builtin_constant_p(sum) && sum == 0) {
 104                __asm__(
 105                "adds   %0, %1, %2      @ csum_tcpudp_nofold0   \n\t"
 106#ifdef __ARMEB__
 107                "adcs   %0, %0, %3                              \n\t"
 108#else
 109                "adcs   %0, %0, %3, ror #8                      \n\t"
 110#endif
 111                "adc    %0, %0, #0"
 112                : "=&r" (sum)
 113                : "r" (daddr), "r" (saddr), "r" (lenprot)
 114                : "cc");
 115        } else {
 116                __asm__(
 117                "adds   %0, %1, %2      @ csum_tcpudp_nofold    \n\t"
 118                "adcs   %0, %0, %3                              \n\t"
 119#ifdef __ARMEB__
 120                "adcs   %0, %0, %4                              \n\t"
 121#else
 122                "adcs   %0, %0, %4, ror #8                      \n\t"
 123#endif
 124                "adc    %0, %0, #0"
 125                : "=&r"(sum)
 126                : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
 127                : "cc");
 128        }
 129        return sum;
 130}       
 131/*
 132 * computes the checksum of the TCP/UDP pseudo-header
 133 * returns a 16-bit checksum, already complemented
 134 */
 135static inline __sum16
 136csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
 137                  __u8 proto, __wsum sum)
 138{
 139        return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 140}
 141
 142
 143/*
 144 * this routine is used for miscellaneous IP-like checksums, mainly
 145 * in icmp.c
 146 */
 147static inline __sum16
 148ip_compute_csum(const void *buff, int len)
 149{
 150        return csum_fold(csum_partial(buff, len, 0));
 151}
 152
 153#define _HAVE_ARCH_IPV6_CSUM
 154extern __wsum
 155__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
 156                __be32 proto, __wsum sum);
 157
 158static inline __sum16
 159csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
 160                __u32 len, __u8 proto, __wsum sum)
 161{
 162        return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
 163                                           htonl(proto), sum));
 164}
 165#endif
 166