summaryrefslogtreecommitdiffstats
path: root/arch/x86/lib/csum-partial_64.c
blob: cea25ca8b8cf69885a2de752c3bc1fb067cded26 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
// SPDX-License-Identifier: GPL-2.0
/*
 * arch/x86_64/lib/csum-partial.c
 *
 * This file contains network checksum routines that are better done
 * in an architecture-specific manner due to speed.
 */

#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>

static inline unsigned short from32to16(unsigned a)
{
	unsigned short b = a >> 16;
	asm("addw %w2,%w0\n\t"
	    "adcw $0,%w0\n"
	    : "=r" (b)
	    : "0" (b), "r" (a));
	return b;
}

static inline __wsum csum_tail(u64 temp64, int odd)
{
	unsigned int result;

	result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
	if (unlikely(odd)) {
		result = from32to16(result);
		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
	}
	return (__force __wsum)result;
}

/*
 * Do a checksum on an arbitrary memory area.
 * Returns a 32bit checksum.
 *
 * This isn't as time critical as it used to be because many NICs
 * do hardware checksumming these days.
 *
 * Still, with CHECKSUM_COMPLETE this is called to compute
 * checksums on IPv6 headers (40 bytes) and other small parts.
 * it's best to have buff aligned on a 64-bit boundary
 */
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
	u64 temp64 = (__force u64)sum;
	unsigned odd;

	odd = 1 & (unsigned long) buff;
	if (unlikely(odd)) {
		if (unlikely(len == 0))
			return sum;
		temp64 = ror32((__force u32)sum, 8);
		temp64 += (*(unsigned char *)buff << 8);
		len--;
		buff++;
	}

	/*
	 * len == 40 is the hot case due to IPv6 headers, but annotating it likely()
	 * has noticeable negative affect on codegen for all other cases with
	 * minimal performance benefit here.
	 */
	if (len == 40) {
		asm("addq 0*8(%[src]),%[res]\n\t"
		    "adcq 1*8(%[src]),%[res]\n\t"
		    "adcq 2*8(%[src]),%[res]\n\t"
		    "adcq 3*8(%[src]),%[res]\n\t"
		    "adcq 4*8(%[src]),%[res]\n\t"
		    "adcq $0,%[res]"
		    : [res] "+r"(temp64)
		    : [src] "r"(buff), "m"(*(const char(*)[40])buff));
		return csum_tail(temp64, odd);
	}
	if (unlikely(len >= 64)) {
		/*
		 * Extra accumulators for better ILP in the loop.
		 */
		u64 tmp_accum, tmp_carries;

		asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
		    "xorl %k[tmp_carries],%k[tmp_carries]\n\t"
		    "subl $64, %[len]\n\t"
		    "1:\n\t"
		    "addq 0*8(%[src]),%[res]\n\t"
		    "adcq 1*8(%[src]),%[res]\n\t"
		    "adcq 2*8(%[src]),%[res]\n\t"
		    "adcq 3*8(%[src]),%[res]\n\t"
		    "adcl $0,%k[tmp_carries]\n\t"
		    "addq 4*8(%[src]),%[tmp_accum]\n\t"
		    "adcq 5*8(%[src]),%[tmp_accum]\n\t"
		    "adcq 6*8(%[src]),%[tmp_accum]\n\t"
		    "adcq 7*8(%[src]),%[tmp_accum]\n\t"
		    "adcl $0,%k[tmp_carries]\n\t"
		    "addq $64, %[src]\n\t"
		    "subl $64, %[len]\n\t"
		    "jge 1b\n\t"
		    "addq %[tmp_accum],%[res]\n\t"
		    "adcq %[tmp_carries],%[res]\n\t"
		    "adcq $0,%[res]"
		    : [tmp_accum] "=&r"(tmp_accum),
		      [tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
		      [len] "+r"(len), [src] "+r"(buff)
		    : "m"(*(const char *)buff));
	}

	if (len & 32) {
		asm("addq 0*8(%[src]),%[res]\n\t"
		    "adcq 1*8(%[src]),%[res]\n\t"
		    "adcq 2*8(%[src]),%[res]\n\t"
		    "adcq 3*8(%[src]),%[res]\n\t"
		    "adcq $0,%[res]"
		    : [res] "+r"(temp64)
		    : [src] "r"(buff), "m"(*(const char(*)[32])buff));
		buff += 32;
	}
	if (len & 16) {
		asm("addq 0*8(%[src]),%[res]\n\t"
		    "adcq 1*8(%[src]),%[res]\n\t"
		    "adcq $0,%[res]"
		    : [res] "+r"(temp64)
		    : [src] "r"(buff), "m"(*(const char(*)[16])buff));
		buff += 16;
	}
	if (len & 8) {
		asm("addq 0*8(%[src]),%[res]\n\t"
		    "adcq $0,%[res]"
		    : [res] "+r"(temp64)
		    : [src] "r"(buff), "m"(*(const char(*)[8])buff));
		buff += 8;
	}
	if (len & 7) {
		unsigned int shift = (-len << 3) & 63;
		unsigned long trail;

		trail = (load_unaligned_zeropad(buff) << shift) >> shift;

		asm("addq %[trail],%[res]\n\t"
		    "adcq $0,%[res]"
		    : [res] "+r"(temp64)
		    : [trail] "r"(trail));
	}
	return csum_tail(temp64, odd);
}
EXPORT_SYMBOL(csum_partial);

/*
 * this routine is used for miscellaneous IP-like checksums, mainly
 * in icmp.c
 */
__sum16 ip_compute_csum(const void *buff, int len)
{
	return csum_fold(csum_partial(buff, len, 0));
}
EXPORT_SYMBOL(ip_compute_csum);