linux/arch/s390/include/asm/checksum.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *    S390 fast network checksum routines
   4 *
   5 *  S390 version
   6 *    Copyright IBM Corp. 1999
   7 *    Author(s): Ulrich Hild        (first version)
   8 *               Martin Schwidefsky (heavily optimized CKSM version)
   9 *               D.J. Barrow        (third attempt) 
  10 */
  11
  12#ifndef _S390_CHECKSUM_H
  13#define _S390_CHECKSUM_H
  14
  15#include <linux/uaccess.h>
  16#include <linux/in6.h>
  17
  18/*
  19 * Computes the checksum of a memory block at buff, length len,
  20 * and adds in "sum" (32-bit).
  21 *
  22 * Returns a 32-bit number suitable for feeding into itself
  23 * or csum_tcpudp_magic.
  24 *
  25 * This function must be called with even lengths, except
  26 * for the last fragment, which may be odd.
  27 *
  28 * It's best to have buff aligned on a 32-bit boundary.
  29 */
  30static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
  31{
  32        union register_pair rp = {
  33                .even = (unsigned long) buff,
  34                .odd = (unsigned long) len,
  35        };
  36
  37        asm volatile(
  38                "0:     cksm    %[sum],%[rp]\n"
  39                "       jo      0b\n"
  40                : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
  41        return sum;
  42}
  43
  44/*
  45 * Fold a partial checksum without adding pseudo headers.
  46 */
  47static inline __sum16 csum_fold(__wsum sum)
  48{
  49        u32 csum = (__force u32) sum;
  50
  51        csum += (csum >> 16) | (csum << 16);
  52        csum >>= 16;
  53        return (__force __sum16) ~csum;
  54}
  55
  56/*
  57 * This is a version of ip_compute_csum() optimized for IP headers,
  58 * which always checksums on 4 octet boundaries.
  59 */
  60static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  61{
  62        __u64 csum = 0;
  63        __u32 *ptr = (u32 *)iph;
  64
  65        csum += *ptr++;
  66        csum += *ptr++;
  67        csum += *ptr++;
  68        csum += *ptr++;
  69        ihl -= 4;
  70        while (ihl--)
  71                csum += *ptr++;
  72        csum += (csum >> 32) | (csum << 32);
  73        return csum_fold((__force __wsum)(csum >> 32));
  74}
  75
  76/*
  77 * Computes the checksum of the TCP/UDP pseudo-header.
  78 * Returns a 32-bit checksum.
  79 */
  80static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
  81                                        __u8 proto, __wsum sum)
  82{
  83        __u64 csum = (__force __u64)sum;
  84
  85        csum += (__force __u32)saddr;
  86        csum += (__force __u32)daddr;
  87        csum += len;
  88        csum += proto;
  89        csum += (csum >> 32) | (csum << 32);
  90        return (__force __wsum)(csum >> 32);
  91}
  92
  93/*
  94 * Computes the checksum of the TCP/UDP pseudo-header.
  95 * Returns a 16-bit checksum, already complemented.
  96 */
  97static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
  98                                        __u8 proto, __wsum sum)
  99{
 100        return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 101}
 102
 103/*
 104 * Used for miscellaneous IP-like checksums, mainly icmp.
 105 */
 106static inline __sum16 ip_compute_csum(const void *buff, int len)
 107{
 108        return csum_fold(csum_partial(buff, len, 0));
 109}
 110
 111#define _HAVE_ARCH_IPV6_CSUM
 112static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
 113                                      const struct in6_addr *daddr,
 114                                      __u32 len, __u8 proto, __wsum csum)
 115{
 116        __u64 sum = (__force __u64)csum;
 117
 118        sum += (__force __u32)saddr->s6_addr32[0];
 119        sum += (__force __u32)saddr->s6_addr32[1];
 120        sum += (__force __u32)saddr->s6_addr32[2];
 121        sum += (__force __u32)saddr->s6_addr32[3];
 122        sum += (__force __u32)daddr->s6_addr32[0];
 123        sum += (__force __u32)daddr->s6_addr32[1];
 124        sum += (__force __u32)daddr->s6_addr32[2];
 125        sum += (__force __u32)daddr->s6_addr32[3];
 126        sum += len;
 127        sum += proto;
 128        sum += (sum >> 32) | (sum << 32);
 129        return csum_fold((__force __wsum)(sum >> 32));
 130}
 131
 132#endif /* _S390_CHECKSUM_H */
 133