3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License version 2 for more details (a copy is included
13 * in the LICENSE file that accompanied this code).
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see http://www.gnu.org/licenses
18 * Please visit http://www.xyratex.com/contact if you need additional
19 * information or have any questions.
25 * Copyright 2012 Xyratex Technology Limited
27 * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
29 * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
30 * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
32 * http://www.intel.com/products/processor/manuals/
33 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
34 * Volume 2B: Instruction Set Reference, N-Z
36 * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
37 * Alexander Boyko <Alexander_Boyko@xyratex.com>
40 /* gcc 4.1.2 does not support pclmulqdq instruction
41 * Use macro defenition from linux kernel 2.6.38 */
43 #define REG_NUM_INVALID 100
44 .macro XMM_NUM opd xmm
45 \opd = REG_NUM_INVALID
100 .macro PFX_REX opd1 opd2 W=0
101 .if ((\opd1 | \opd2) & 8) || \W
102 .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
106 .macro MODRM mod opd1 opd2
107 .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
110 .macro PCLMULQDQ imm8 xmm1 xmm2
111 XMM_NUM clmul_opd1 \xmm1
112 XMM_NUM clmul_opd2 \xmm2
114 PFX_REX clmul_opd1 clmul_opd2
115 .byte 0x0f, 0x3a, 0x44
116 MODRM 0xc0 clmul_opd1 clmul_opd2
123 * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
124 * #define CONSTANT_R1 0x154442bd4LL
126 * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
127 * #define CONSTANT_R2 0x1c6e41596LL
130 .octa 0x00000001c6e415960000000154442bd4
132 * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
133 * #define CONSTANT_R3 0x1751997d0LL
135 * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
136 * #define CONSTANT_R4 0x0ccaa009eLL
139 .octa 0x00000000ccaa009e00000001751997d0
141 * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
142 * #define CONSTANT_R5 0x163cd6124LL
145 .octa 0x00000000000000000000000163cd6124
147 .octa 0x000000000000000000000000FFFFFFFF
149 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
151 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
152 * #define CONSTANT_RU 0x1F7011641LL
155 .octa 0x00000001F701164100000001DB710641
157 #define CONSTANT %xmm0
164 #warning Using 32bit code support
175 * BUF - buffer (16 bytes aligned)
176 * LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
177 * CRC - initial crc32
179 * uint crc32_pclmul_le_16(unsigned char const *buffer,
180 * size_t len, uint crc32)
182 .globl crc32_pclmul_le_16
184 crc32_pclmul_le_16:/* buffer and buffer size are 16 bytes aligned */
186 movdqa 0x10(BUF), %xmm2
187 movdqa 0x20(BUF), %xmm3
188 movdqa 0x30(BUF), %xmm4
194 /* This is for position independed code(-fPIC) support for 32bit */
203 movdqa .Lconstant_R2R1(%rip), CONSTANT
205 movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
208 loop_64:/* 64 bytes Full cache line folding */
209 prefetchnta 0x40(BUF)
216 PCLMULQDQ 00, CONSTANT, %xmm1
217 PCLMULQDQ 00, CONSTANT, %xmm2
218 PCLMULQDQ 00, CONSTANT, %xmm3
220 PCLMULQDQ 00, CONSTANT, %xmm4
222 PCLMULQDQ 0x11, CONSTANT, %xmm5
223 PCLMULQDQ 0x11, CONSTANT, %xmm6
224 PCLMULQDQ 0x11, CONSTANT, %xmm7
226 PCLMULQDQ 0x11, CONSTANT, %xmm8
234 /* xmm8 unsupported for x32 */
236 PCLMULQDQ 00, CONSTANT, %xmm4
237 PCLMULQDQ 0x11, CONSTANT, %xmm5
242 pxor 0x10(BUF), %xmm2
243 pxor 0x20(BUF), %xmm3
244 pxor 0x30(BUF), %xmm4
250 less_64:/* Folding cache line into 128bit */
252 movdqa .Lconstant_R4R3(%rip), CONSTANT
254 movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT
259 PCLMULQDQ 0x00, CONSTANT, %xmm1
260 PCLMULQDQ 0x11, CONSTANT, %xmm5
265 PCLMULQDQ 0x00, CONSTANT, %xmm1
266 PCLMULQDQ 0x11, CONSTANT, %xmm5
271 PCLMULQDQ 0x00, CONSTANT, %xmm1
272 PCLMULQDQ 0x11, CONSTANT, %xmm5
278 loop_16:/* Folding rest buffer into 128bit */
280 PCLMULQDQ 0x00, CONSTANT, %xmm1
281 PCLMULQDQ 0x11, CONSTANT, %xmm5
290 /* perform the last 64 bit fold, also adds 32 zeroes
291 * to the input stream */
292 PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
296 /* final 32-bit fold */
299 movdqa .Lconstant_R5(%rip), CONSTANT
300 movdqa .Lconstant_mask32(%rip), %xmm3
302 movdqa .Lconstant_R5 - delta(%ecx), CONSTANT
303 movdqa .Lconstant_mask32 - delta(%ecx), %xmm3
307 PCLMULQDQ 0x00, CONSTANT, %xmm1
310 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
312 movdqa .Lconstant_RUpoly(%rip), CONSTANT
314 movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT
318 PCLMULQDQ 0x10, CONSTANT, %xmm1
320 PCLMULQDQ 0x00, CONSTANT, %xmm1
322 pextrd $0x01, %xmm1, %eax