1 | /* |
2 | * Using hardware provided CRC32 instruction to accelerate the CRC32 disposal. |
3 | * CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE) |
4 | * CRC32 is a new instruction in Intel SSE4.2, the reference can be found at: |
5 | * http://www.intel.com/products/processor/manuals/ |
6 | * Intel(R) 64 and IA-32 Architectures Software Developer's Manual |
7 | * Volume 2A: Instruction Set Reference, A-M |
8 | * |
9 | * Copyright (C) 2008 Intel Corporation |
10 | * Authors: Austin Zhang <austin_zhang@linux.intel.com> |
11 | * Kent Liu <kent.liu@intel.com> |
12 | * |
13 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms and conditions of the GNU General Public License, |
15 | * version 2, as published by the Free Software Foundation. |
16 | * |
17 | * This program is distributed in the hope it will be useful, but WITHOUT |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
20 | * more details. |
21 | * |
22 | * You should have received a copy of the GNU General Public License along with |
23 | * this program; if not, write to the Free Software Foundation, Inc., |
24 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
25 | * |
26 | */ |
27 | #include <linux/init.h> |
28 | #include <linux/module.h> |
29 | #include <linux/string.h> |
30 | #include <linux/kernel.h> |
31 | #include <crypto/internal/hash.h> |
32 | |
33 | #include <asm/cpufeatures.h> |
34 | #include <asm/cpu_device_id.h> |
35 | #include <asm/fpu/internal.h> |
36 | |
37 | #define CHKSUM_BLOCK_SIZE 1 |
38 | #define CHKSUM_DIGEST_SIZE 4 |
39 | |
40 | #define SCALE_F sizeof(unsigned long) |
41 | |
42 | #ifdef CONFIG_X86_64 |
43 | #define REX_PRE "0x48, " |
44 | #else |
45 | #define REX_PRE |
46 | #endif |
47 | |
48 | #ifdef CONFIG_X86_64 |
49 | /* |
50 | * use carryless multiply version of crc32c when buffer |
51 | * size is >= 512 to account |
52 | * for fpu state save/restore overhead. |
53 | */ |
54 | #define CRC32C_PCL_BREAKEVEN 512 |
55 | |
56 | asmlinkage unsigned int crc_pcl(const u8 *buffer, int len, |
57 | unsigned int crc_init); |
58 | #endif /* CONFIG_X86_64 */ |
59 | |
60 | static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length) |
61 | { |
62 | while (length--) { |
63 | __asm__ __volatile__( |
64 | ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1" |
65 | :"=S" (crc) |
66 | :"0" (crc), "c" (*data) |
67 | ); |
68 | data++; |
69 | } |
70 | |
71 | return crc; |
72 | } |
73 | |
74 | static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len) |
75 | { |
76 | unsigned int iquotient = len / SCALE_F; |
77 | unsigned int iremainder = len % SCALE_F; |
78 | unsigned long *ptmp = (unsigned long *)p; |
79 | |
80 | while (iquotient--) { |
81 | __asm__ __volatile__( |
82 | ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;" |
83 | :"=S" (crc) |
84 | :"0" (crc), "c" (*ptmp) |
85 | ); |
86 | ptmp++; |
87 | } |
88 | |
89 | if (iremainder) |
90 | crc = crc32c_intel_le_hw_byte(crc, (unsigned char *)ptmp, |
91 | iremainder); |
92 | |
93 | return crc; |
94 | } |
95 | |
96 | /* |
97 | * Setting the seed allows arbitrary accumulators and flexible XOR policy |
98 | * If your algorithm starts with ~0, then XOR with ~0 before you set |
99 | * the seed. |
100 | */ |
101 | static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key, |
102 | unsigned int keylen) |
103 | { |
104 | u32 *mctx = crypto_shash_ctx(hash); |
105 | |
106 | if (keylen != sizeof(u32)) { |
107 | crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); |
108 | return -EINVAL; |
109 | } |
110 | *mctx = le32_to_cpup((__le32 *)key); |
111 | return 0; |
112 | } |
113 | |
114 | static int crc32c_intel_init(struct shash_desc *desc) |
115 | { |
116 | u32 *mctx = crypto_shash_ctx(desc->tfm); |
117 | u32 *crcp = shash_desc_ctx(desc); |
118 | |
119 | *crcp = *mctx; |
120 | |
121 | return 0; |
122 | } |
123 | |
124 | static int crc32c_intel_update(struct shash_desc *desc, const u8 *data, |
125 | unsigned int len) |
126 | { |
127 | u32 *crcp = shash_desc_ctx(desc); |
128 | |
129 | *crcp = crc32c_intel_le_hw(*crcp, data, len); |
130 | return 0; |
131 | } |
132 | |
133 | static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len, |
134 | u8 *out) |
135 | { |
136 | *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); |
137 | return 0; |
138 | } |
139 | |
140 | static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data, |
141 | unsigned int len, u8 *out) |
142 | { |
143 | return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out); |
144 | } |
145 | |
146 | static int crc32c_intel_final(struct shash_desc *desc, u8 *out) |
147 | { |
148 | u32 *crcp = shash_desc_ctx(desc); |
149 | |
150 | *(__le32 *)out = ~cpu_to_le32p(crcp); |
151 | return 0; |
152 | } |
153 | |
154 | static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data, |
155 | unsigned int len, u8 *out) |
156 | { |
157 | return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len, |
158 | out); |
159 | } |
160 | |
161 | static int crc32c_intel_cra_init(struct crypto_tfm *tfm) |
162 | { |
163 | u32 *key = crypto_tfm_ctx(tfm); |
164 | |
165 | *key = ~0; |
166 | |
167 | return 0; |
168 | } |
169 | |
170 | #ifdef CONFIG_X86_64 |
171 | static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data, |
172 | unsigned int len) |
173 | { |
174 | u32 *crcp = shash_desc_ctx(desc); |
175 | |
176 | /* |
177 | * use faster PCL version if datasize is large enough to |
178 | * overcome kernel fpu state save/restore overhead |
179 | */ |
180 | if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { |
181 | kernel_fpu_begin(); |
182 | *crcp = crc_pcl(data, len, *crcp); |
183 | kernel_fpu_end(); |
184 | } else |
185 | *crcp = crc32c_intel_le_hw(*crcp, data, len); |
186 | return 0; |
187 | } |
188 | |
189 | static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len, |
190 | u8 *out) |
191 | { |
192 | if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { |
193 | kernel_fpu_begin(); |
194 | *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp)); |
195 | kernel_fpu_end(); |
196 | } else |
197 | *(__le32 *)out = |
198 | ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); |
199 | return 0; |
200 | } |
201 | |
202 | static int crc32c_pcl_intel_finup(struct shash_desc *desc, const u8 *data, |
203 | unsigned int len, u8 *out) |
204 | { |
205 | return __crc32c_pcl_intel_finup(shash_desc_ctx(desc), data, len, out); |
206 | } |
207 | |
208 | static int crc32c_pcl_intel_digest(struct shash_desc *desc, const u8 *data, |
209 | unsigned int len, u8 *out) |
210 | { |
211 | return __crc32c_pcl_intel_finup(crypto_shash_ctx(desc->tfm), data, len, |
212 | out); |
213 | } |
214 | #endif /* CONFIG_X86_64 */ |
215 | |
216 | static struct shash_alg alg = { |
217 | .setkey = crc32c_intel_setkey, |
218 | .init = crc32c_intel_init, |
219 | .update = crc32c_intel_update, |
220 | .final = crc32c_intel_final, |
221 | .finup = crc32c_intel_finup, |
222 | .digest = crc32c_intel_digest, |
223 | .descsize = sizeof(u32), |
224 | .digestsize = CHKSUM_DIGEST_SIZE, |
225 | .base = { |
226 | .cra_name = "crc32c" , |
227 | .cra_driver_name = "crc32c-intel" , |
228 | .cra_priority = 200, |
229 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
230 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
231 | .cra_ctxsize = sizeof(u32), |
232 | .cra_module = THIS_MODULE, |
233 | .cra_init = crc32c_intel_cra_init, |
234 | } |
235 | }; |
236 | |
237 | static const struct x86_cpu_id crc32c_cpu_id[] = { |
238 | X86_FEATURE_MATCH(X86_FEATURE_XMM4_2), |
239 | {} |
240 | }; |
241 | MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id); |
242 | |
243 | static int __init crc32c_intel_mod_init(void) |
244 | { |
245 | if (!x86_match_cpu(crc32c_cpu_id)) |
246 | return -ENODEV; |
247 | #ifdef CONFIG_X86_64 |
248 | if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { |
249 | alg.update = crc32c_pcl_intel_update; |
250 | alg.finup = crc32c_pcl_intel_finup; |
251 | alg.digest = crc32c_pcl_intel_digest; |
252 | } |
253 | #endif |
254 | return crypto_register_shash(&alg); |
255 | } |
256 | |
257 | static void __exit crc32c_intel_mod_fini(void) |
258 | { |
259 | crypto_unregister_shash(&alg); |
260 | } |
261 | |
262 | module_init(crc32c_intel_mod_init); |
263 | module_exit(crc32c_intel_mod_fini); |
264 | |
265 | MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.com>" ); |
266 | MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware." ); |
267 | MODULE_LICENSE("GPL" ); |
268 | |
269 | MODULE_ALIAS_CRYPTO("crc32c" ); |
270 | MODULE_ALIAS_CRYPTO("crc32c-intel" ); |
271 | |