crc32_sse42.c revision 317149
1/*
2 * Derived from crc32c.c version 1.1 by Mark Adler.
3 *
4 * Copyright (C) 2013 Mark Adler
5 *
6 * This software is provided 'as-is', without any express or implied warranty.
7 * In no event will the author be held liable for any damages arising from the
8 * use of this software.
9 *
10 * Permission is granted to anyone to use this software for any purpose,
11 * including commercial applications, and to alter it and redistribute it
12 * freely, subject to the following restrictions:
13 *
14 * 1. The origin of this software must not be misrepresented; you must not
15 *    claim that you wrote the original software. If you use this software
16 *    in a product, an acknowledgment in the product documentation would be
17 *    appreciated but is not required.
18 * 2. Altered source versions must be plainly marked as such, and must not be
19 *    misrepresented as being the original software.
20 * 3. This notice may not be removed or altered from any source distribution.
21 *
22 * Mark Adler
23 * madler@alumni.caltech.edu
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: stable/11/sys/libkern/x86/crc32_sse42.c 317149 2017-04-19 16:16:41Z markj $");
28
29/*
30 * This file is compiled in userspace in order to run ATF unit tests.
31 */
32#ifdef USERSPACE_TESTING
33#include <stdint.h>
34#include <stdlib.h>
35#else
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#endif
40
41static __inline uint32_t
42_mm_crc32_u8(uint32_t x, uint8_t y)
43{
44	/*
45	 * clang (at least 3.9.[0-1]) pessimizes "rm" (y) and "m" (y)
46	 * significantly and "r" (y) a lot by copying y to a different
47	 * local variable (on the stack or in a register), so only use
48	 * the latter.  This costs a register and an instruction but
49	 * not a uop.
50	 */
51	__asm("crc32b %1,%0" : "+r" (x) : "r" (y));
52	return (x);
53}
54
55static __inline uint32_t
56_mm_crc32_u32(uint32_t x, uint32_t y)
57{
58	__asm("crc32l %1,%0" : "+r" (x) : "r" (y));
59	return (x);
60}
61
62static __inline uint64_t
63_mm_crc32_u64(uint64_t x, uint64_t y)
64{
65	__asm("crc32q %1,%0" : "+r" (x) : "r" (y));
66	return (x);
67}
68
69/* CRC-32C (iSCSI) polynomial in reversed bit order. */
70#define POLY	0x82f63b78
71
72/*
73 * Block sizes for three-way parallel crc computation.  LONG and SHORT must
74 * both be powers of two.
75 */
76#define LONG	128
77#define SHORT	64
78
79/*
80 * Tables for updating a crc for LONG, 2 * LONG, SHORT and 2 * SHORT bytes
81 * of value 0 later in the input stream, in the same way that the hardware
82 * would, but in software without calculating intermediate steps.
83 */
84static uint32_t crc32c_long[4][256];
85static uint32_t crc32c_2long[4][256];
86static uint32_t crc32c_short[4][256];
87static uint32_t crc32c_2short[4][256];
88
89/*
90 * Multiply a matrix times a vector over the Galois field of two elements,
91 * GF(2).  Each element is a bit in an unsigned integer.  mat must have at
92 * least as many entries as the power of two for most significant one bit in
93 * vec.
94 */
95static inline uint32_t
96gf2_matrix_times(uint32_t *mat, uint32_t vec)
97{
98	uint32_t sum;
99
100	sum = 0;
101	while (vec) {
102		if (vec & 1)
103			sum ^= *mat;
104		vec >>= 1;
105		mat++;
106	}
107	return (sum);
108}
109
110/*
111 * Multiply a matrix by itself over GF(2).  Both mat and square must have 32
112 * rows.
113 */
114static inline void
115gf2_matrix_square(uint32_t *square, uint32_t *mat)
116{
117	int n;
118
119	for (n = 0; n < 32; n++)
120		square[n] = gf2_matrix_times(mat, mat[n]);
121}
122
123/*
124 * Construct an operator to apply len zeros to a crc.  len must be a power of
125 * two.  If len is not a power of two, then the result is the same as for the
126 * largest power of two less than len.  The result for len == 0 is the same as
127 * for len == 1.  A version of this routine could be easily written for any
128 * len, but that is not needed for this application.
129 */
130static void
131crc32c_zeros_op(uint32_t *even, size_t len)
132{
133	uint32_t odd[32];       /* odd-power-of-two zeros operator */
134	uint32_t row;
135	int n;
136
137	/* put operator for one zero bit in odd */
138	odd[0] = POLY;              /* CRC-32C polynomial */
139	row = 1;
140	for (n = 1; n < 32; n++) {
141		odd[n] = row;
142		row <<= 1;
143	}
144
145	/* put operator for two zero bits in even */
146	gf2_matrix_square(even, odd);
147
148	/* put operator for four zero bits in odd */
149	gf2_matrix_square(odd, even);
150
151	/*
152	 * first square will put the operator for one zero byte (eight zero
153	 * bits), in even -- next square puts operator for two zero bytes in
154	 * odd, and so on, until len has been rotated down to zero
155	 */
156	do {
157		gf2_matrix_square(even, odd);
158		len >>= 1;
159		if (len == 0)
160			return;
161		gf2_matrix_square(odd, even);
162		len >>= 1;
163	} while (len);
164
165	/* answer ended up in odd -- copy to even */
166	for (n = 0; n < 32; n++)
167		even[n] = odd[n];
168}
169
170/*
171 * Take a length and build four lookup tables for applying the zeros operator
172 * for that length, byte-by-byte on the operand.
173 */
174static void
175crc32c_zeros(uint32_t zeros[][256], size_t len)
176{
177	uint32_t op[32];
178	uint32_t n;
179
180	crc32c_zeros_op(op, len);
181	for (n = 0; n < 256; n++) {
182		zeros[0][n] = gf2_matrix_times(op, n);
183		zeros[1][n] = gf2_matrix_times(op, n << 8);
184		zeros[2][n] = gf2_matrix_times(op, n << 16);
185		zeros[3][n] = gf2_matrix_times(op, n << 24);
186	}
187}
188
189/* Apply the zeros operator table to crc. */
190static inline uint32_t
191crc32c_shift(uint32_t zeros[][256], uint32_t crc)
192{
193
194	return (zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
195	    zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24]);
196}
197
198/* Initialize tables for shifting crcs. */
199static void
200#ifdef USERSPACE_TESTING
201__attribute__((__constructor__))
202#endif
203crc32c_init_hw(void)
204{
205	crc32c_zeros(crc32c_long, LONG);
206	crc32c_zeros(crc32c_2long, 2 * LONG);
207	crc32c_zeros(crc32c_short, SHORT);
208	crc32c_zeros(crc32c_2short, 2 * SHORT);
209}
210#ifdef _KERNEL
211SYSINIT(crc32c_sse42, SI_SUB_LOCK, SI_ORDER_ANY, crc32c_init_hw, NULL);
212#endif
213
214/* Compute CRC-32C using the Intel hardware instruction. */
215#ifdef USERSPACE_TESTING
216uint32_t sse42_crc32c(uint32_t, const unsigned char *, unsigned);
217#endif
218uint32_t
219sse42_crc32c(uint32_t crc, const unsigned char *buf, unsigned len)
220{
221#ifdef __amd64__
222	const size_t align = 8;
223#else
224	const size_t align = 4;
225#endif
226	const unsigned char *next, *end;
227#ifdef __amd64__
228	uint64_t crc0, crc1, crc2;
229#else
230	uint32_t crc0, crc1, crc2;
231#endif
232
233	next = buf;
234	crc0 = crc;
235
236	/* Compute the crc to bring the data pointer to an aligned boundary. */
237	while (len && ((uintptr_t)next & (align - 1)) != 0) {
238		crc0 = _mm_crc32_u8(crc0, *next);
239		next++;
240		len--;
241	}
242
243#if LONG > SHORT
244	/*
245	 * Compute the crc on sets of LONG*3 bytes, executing three independent
246	 * crc instructions, each on LONG bytes -- this is optimized for the
247	 * Nehalem, Westmere, Sandy Bridge, and Ivy Bridge architectures, which
248	 * have a throughput of one crc per cycle, but a latency of three
249	 * cycles.
250	 */
251	crc = 0;
252	while (len >= LONG * 3) {
253		crc1 = 0;
254		crc2 = 0;
255		end = next + LONG;
256		do {
257#ifdef __amd64__
258			crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
259			crc1 = _mm_crc32_u64(crc1,
260			    *(const uint64_t *)(next + LONG));
261			crc2 = _mm_crc32_u64(crc2,
262			    *(const uint64_t *)(next + (LONG * 2)));
263#else
264			crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
265			crc1 = _mm_crc32_u32(crc1,
266			    *(const uint32_t *)(next + LONG));
267			crc2 = _mm_crc32_u32(crc2,
268			    *(const uint32_t *)(next + (LONG * 2)));
269#endif
270			next += align;
271		} while (next < end);
272		/*-
273		 * Update the crc.  Try to do it in parallel with the inner
274		 * loop.  'crc' is used to accumulate crc0 and crc1
275		 * produced by the inner loop so that the next iteration
276		 * of the loop doesn't depend on anything except crc2.
277		 *
278		 * The full expression for the update is:
279		 *     crc = S*S*S*crc + S*S*crc0 + S*crc1
280		 * where the terms are polynomials modulo the CRC polynomial.
281		 * We regroup this subtly as:
282		 *     crc = S*S * (S*crc + crc0) + S*crc1.
283		 * This has an extra dependency which reduces possible
284		 * parallelism for the expression, but it turns out to be
285		 * best to intentionally delay evaluation of this expression
286		 * so that it competes less with the inner loop.
287		 *
288		 * We also intentionally reduce parallelism by feedng back
289		 * crc2 to the inner loop as crc0 instead of accumulating
290		 * it in crc.  This synchronizes the loop with crc update.
291		 * CPU and/or compiler schedulers produced bad order without
292		 * this.
293		 *
294		 * Shifts take about 12 cycles each, so 3 here with 2
295		 * parallelizable take about 24 cycles and the crc update
296		 * takes slightly longer.  8 dependent crc32 instructions
297		 * can run in 24 cycles, so the 3-way blocking is worse
298		 * than useless for sizes less than 8 * <word size> = 64
299		 * on amd64.  In practice, SHORT = 32 confirms these
300		 * timing calculations by giving a small improvement
301		 * starting at size 96.  Then the inner loop takes about
302		 * 12 cycles and the crc update about 24, but these are
303		 * partly in parallel so the total time is less than the
304		 * 36 cycles that 12 dependent crc32 instructions would
305		 * take.
306		 *
307		 * To have a chance of completely hiding the overhead for
308		 * the crc update, the inner loop must take considerably
309		 * longer than 24 cycles.  LONG = 64 makes the inner loop
310		 * take about 24 cycles, so is not quite large enough.
311		 * LONG = 128 works OK.  Unhideable overheads are about
312		 * 12 cycles per inner loop.  All assuming timing like
313		 * Haswell.
314		 */
315		crc = crc32c_shift(crc32c_long, crc) ^ crc0;
316		crc1 = crc32c_shift(crc32c_long, crc1);
317		crc = crc32c_shift(crc32c_2long, crc) ^ crc1;
318		crc0 = crc2;
319		next += LONG * 2;
320		len -= LONG * 3;
321	}
322	crc0 ^= crc;
323#endif /* LONG > SHORT */
324
325	/*
326	 * Do the same thing, but now on SHORT*3 blocks for the remaining data
327	 * less than a LONG*3 block
328	 */
329	crc = 0;
330	while (len >= SHORT * 3) {
331		crc1 = 0;
332		crc2 = 0;
333		end = next + SHORT;
334		do {
335#ifdef __amd64__
336			crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
337			crc1 = _mm_crc32_u64(crc1,
338			    *(const uint64_t *)(next + SHORT));
339			crc2 = _mm_crc32_u64(crc2,
340			    *(const uint64_t *)(next + (SHORT * 2)));
341#else
342			crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
343			crc1 = _mm_crc32_u32(crc1,
344			    *(const uint32_t *)(next + SHORT));
345			crc2 = _mm_crc32_u32(crc2,
346			    *(const uint32_t *)(next + (SHORT * 2)));
347#endif
348			next += align;
349		} while (next < end);
350		crc = crc32c_shift(crc32c_short, crc) ^ crc0;
351		crc1 = crc32c_shift(crc32c_short, crc1);
352		crc = crc32c_shift(crc32c_2short, crc) ^ crc1;
353		crc0 = crc2;
354		next += SHORT * 2;
355		len -= SHORT * 3;
356	}
357	crc0 ^= crc;
358
359	/* Compute the crc on the remaining bytes at native word size. */
360	end = next + (len - (len & (align - 1)));
361	while (next < end) {
362#ifdef __amd64__
363		crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
364#else
365		crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
366#endif
367		next += align;
368	}
369	len &= (align - 1);
370
371	/* Compute the crc for any trailing bytes. */
372	while (len) {
373		crc0 = _mm_crc32_u8(crc0, *next);
374		next++;
375		len--;
376	}
377
378	return ((uint32_t)crc0);
379}
380