1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Cryptographic API. |
4 | * |
5 | * Support for VIA PadLock hardware crypto engine. |
6 | * |
7 | * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> |
8 | * |
9 | */ |
10 | |
11 | #include <crypto/algapi.h> |
12 | #include <crypto/aes.h> |
13 | #include <crypto/internal/skcipher.h> |
14 | #include <crypto/padlock.h> |
15 | #include <linux/module.h> |
16 | #include <linux/init.h> |
17 | #include <linux/types.h> |
18 | #include <linux/errno.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/percpu.h> |
23 | #include <linux/smp.h> |
24 | #include <linux/slab.h> |
25 | #include <asm/cpu_device_id.h> |
26 | #include <asm/byteorder.h> |
27 | #include <asm/processor.h> |
28 | #include <asm/fpu/api.h> |
29 | |
30 | /* |
31 | * Number of data blocks actually fetched for each xcrypt insn. |
32 | * Processors with prefetch errata will fetch extra blocks. |
33 | */ |
34 | static unsigned int ecb_fetch_blocks = 2; |
35 | #define MAX_ECB_FETCH_BLOCKS (8) |
36 | #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) |
37 | |
38 | static unsigned int cbc_fetch_blocks = 1; |
39 | #define MAX_CBC_FETCH_BLOCKS (4) |
40 | #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) |
41 | |
42 | /* Control word. */ |
43 | struct cword { |
44 | unsigned int __attribute__ ((__packed__)) |
45 | rounds:4, |
46 | algo:3, |
47 | keygen:1, |
48 | interm:1, |
49 | encdec:1, |
50 | ksize:2; |
51 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); |
52 | |
53 | /* Whenever making any changes to the following |
54 | * structure *make sure* you keep E, d_data |
55 | * and cword aligned on 16 Bytes boundaries and |
56 | * the Hardware can access 16 * 16 bytes of E and d_data |
57 | * (only the first 15 * 16 bytes matter but the HW reads |
58 | * more). |
59 | */ |
60 | struct aes_ctx { |
61 | u32 E[AES_MAX_KEYLENGTH_U32] |
62 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); |
63 | u32 d_data[AES_MAX_KEYLENGTH_U32] |
64 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); |
65 | struct { |
66 | struct cword encrypt; |
67 | struct cword decrypt; |
68 | } cword; |
69 | u32 *D; |
70 | }; |
71 | |
72 | static DEFINE_PER_CPU(struct cword *, paes_last_cword); |
73 | |
74 | /* Tells whether the ACE is capable to generate |
75 | the extended key for a given key_len. */ |
76 | static inline int |
77 | aes_hw_extkey_available(uint8_t key_len) |
78 | { |
79 | /* TODO: We should check the actual CPU model/stepping |
80 | as it's possible that the capability will be |
81 | added in the next CPU revisions. */ |
82 | if (key_len == 16) |
83 | return 1; |
84 | return 0; |
85 | } |
86 | |
87 | static inline struct aes_ctx *aes_ctx_common(void *ctx) |
88 | { |
89 | unsigned long addr = (unsigned long)ctx; |
90 | unsigned long align = PADLOCK_ALIGNMENT; |
91 | |
92 | if (align <= crypto_tfm_ctx_alignment()) |
93 | align = 1; |
94 | return (struct aes_ctx *)ALIGN(addr, align); |
95 | } |
96 | |
97 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) |
98 | { |
99 | return aes_ctx_common(ctx: crypto_tfm_ctx(tfm)); |
100 | } |
101 | |
102 | static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm) |
103 | { |
104 | return aes_ctx_common(ctx: crypto_skcipher_ctx(tfm)); |
105 | } |
106 | |
107 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
108 | unsigned int key_len) |
109 | { |
110 | struct aes_ctx *ctx = aes_ctx(tfm); |
111 | const __le32 *key = (const __le32 *)in_key; |
112 | struct crypto_aes_ctx gen_aes; |
113 | int cpu; |
114 | |
115 | if (key_len % 8) |
116 | return -EINVAL; |
117 | |
118 | /* |
119 | * If the hardware is capable of generating the extended key |
120 | * itself we must supply the plain key for both encryption |
121 | * and decryption. |
122 | */ |
123 | ctx->D = ctx->E; |
124 | |
125 | ctx->E[0] = le32_to_cpu(key[0]); |
126 | ctx->E[1] = le32_to_cpu(key[1]); |
127 | ctx->E[2] = le32_to_cpu(key[2]); |
128 | ctx->E[3] = le32_to_cpu(key[3]); |
129 | |
130 | /* Prepare control words. */ |
131 | memset(&ctx->cword, 0, sizeof(ctx->cword)); |
132 | |
133 | ctx->cword.decrypt.encdec = 1; |
134 | ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; |
135 | ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; |
136 | ctx->cword.encrypt.ksize = (key_len - 16) / 8; |
137 | ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; |
138 | |
139 | /* Don't generate extended keys if the hardware can do it. */ |
140 | if (aes_hw_extkey_available(key_len)) |
141 | goto ok; |
142 | |
143 | ctx->D = ctx->d_data; |
144 | ctx->cword.encrypt.keygen = 1; |
145 | ctx->cword.decrypt.keygen = 1; |
146 | |
147 | if (aes_expandkey(ctx: &gen_aes, in_key, key_len)) |
148 | return -EINVAL; |
149 | |
150 | memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); |
151 | memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); |
152 | |
153 | ok: |
154 | for_each_online_cpu(cpu) |
155 | if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || |
156 | &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) |
157 | per_cpu(paes_last_cword, cpu) = NULL; |
158 | |
159 | return 0; |
160 | } |
161 | |
162 | static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, |
163 | unsigned int key_len) |
164 | { |
165 | return aes_set_key(tfm: crypto_skcipher_tfm(tfm), in_key, key_len); |
166 | } |
167 | |
168 | /* ====== Encryption/decryption routines ====== */ |
169 | |
170 | /* These are the real call to PadLock. */ |
171 | static inline void padlock_reset_key(struct cword *cword) |
172 | { |
173 | int cpu = raw_smp_processor_id(); |
174 | |
175 | if (cword != per_cpu(paes_last_cword, cpu)) |
176 | #ifndef CONFIG_X86_64 |
177 | asm volatile ("pushfl; popfl" ); |
178 | #else |
179 | asm volatile ("pushfq; popfq" ); |
180 | #endif |
181 | } |
182 | |
183 | static inline void padlock_store_cword(struct cword *cword) |
184 | { |
185 | per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; |
186 | } |
187 | |
188 | /* |
189 | * While the padlock instructions don't use FP/SSE registers, they |
190 | * generate a spurious DNA fault when CR0.TS is '1'. Fortunately, |
191 | * the kernel doesn't use CR0.TS. |
192 | */ |
193 | |
194 | static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
195 | struct cword *control_word, int count) |
196 | { |
197 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
198 | : "+S" (input), "+D" (output) |
199 | : "d" (control_word), "b" (key), "c" (count)); |
200 | } |
201 | |
202 | static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
203 | u8 *iv, struct cword *control_word, int count) |
204 | { |
205 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
206 | : "+S" (input), "+D" (output), "+a" (iv) |
207 | : "d" (control_word), "b" (key), "c" (count)); |
208 | return iv; |
209 | } |
210 | |
211 | static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, |
212 | struct cword *cword, int count) |
213 | { |
214 | /* |
215 | * Padlock prefetches extra data so we must provide mapped input buffers. |
216 | * Assume there are at least 16 bytes of stack already in use. |
217 | */ |
218 | u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; |
219 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); |
220 | |
221 | memcpy(tmp, in, count * AES_BLOCK_SIZE); |
222 | rep_xcrypt_ecb(input: tmp, output: out, key, control_word: cword, count); |
223 | } |
224 | |
225 | static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, |
226 | u8 *iv, struct cword *cword, int count) |
227 | { |
228 | /* |
229 | * Padlock prefetches extra data so we must provide mapped input buffers. |
230 | * Assume there are at least 16 bytes of stack already in use. |
231 | */ |
232 | u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; |
233 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); |
234 | |
235 | memcpy(tmp, in, count * AES_BLOCK_SIZE); |
236 | return rep_xcrypt_cbc(input: tmp, output: out, key, iv, control_word: cword, count); |
237 | } |
238 | |
239 | static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, |
240 | struct cword *cword, int count) |
241 | { |
242 | /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. |
243 | * We could avoid some copying here but it's probably not worth it. |
244 | */ |
245 | if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { |
246 | ecb_crypt_copy(in, out, key, cword, count); |
247 | return; |
248 | } |
249 | |
250 | rep_xcrypt_ecb(input: in, output: out, key, control_word: cword, count); |
251 | } |
252 | |
253 | static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, |
254 | u8 *iv, struct cword *cword, int count) |
255 | { |
256 | /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ |
257 | if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) |
258 | return cbc_crypt_copy(in, out, key, iv, cword, count); |
259 | |
260 | return rep_xcrypt_cbc(input: in, output: out, key, iv, control_word: cword, count); |
261 | } |
262 | |
263 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
264 | void *control_word, u32 count) |
265 | { |
266 | u32 initial = count & (ecb_fetch_blocks - 1); |
267 | |
268 | if (count < ecb_fetch_blocks) { |
269 | ecb_crypt(in: input, out: output, key, cword: control_word, count); |
270 | return; |
271 | } |
272 | |
273 | count -= initial; |
274 | |
275 | if (initial) |
276 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
277 | : "+S" (input), "+D" (output) |
278 | : "d" (control_word), "b" (key), "c" (initial)); |
279 | |
280 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
281 | : "+S" (input), "+D" (output) |
282 | : "d" (control_word), "b" (key), "c" (count)); |
283 | } |
284 | |
285 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
286 | u8 *iv, void *control_word, u32 count) |
287 | { |
288 | u32 initial = count & (cbc_fetch_blocks - 1); |
289 | |
290 | if (count < cbc_fetch_blocks) |
291 | return cbc_crypt(in: input, out: output, key, iv, cword: control_word, count); |
292 | |
293 | count -= initial; |
294 | |
295 | if (initial) |
296 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
297 | : "+S" (input), "+D" (output), "+a" (iv) |
298 | : "d" (control_word), "b" (key), "c" (initial)); |
299 | |
300 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
301 | : "+S" (input), "+D" (output), "+a" (iv) |
302 | : "d" (control_word), "b" (key), "c" (count)); |
303 | return iv; |
304 | } |
305 | |
306 | static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
307 | { |
308 | struct aes_ctx *ctx = aes_ctx(tfm); |
309 | |
310 | padlock_reset_key(cword: &ctx->cword.encrypt); |
311 | ecb_crypt(in, out, key: ctx->E, cword: &ctx->cword.encrypt, count: 1); |
312 | padlock_store_cword(cword: &ctx->cword.encrypt); |
313 | } |
314 | |
315 | static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
316 | { |
317 | struct aes_ctx *ctx = aes_ctx(tfm); |
318 | |
319 | padlock_reset_key(cword: &ctx->cword.encrypt); |
320 | ecb_crypt(in, out, key: ctx->D, cword: &ctx->cword.decrypt, count: 1); |
321 | padlock_store_cword(cword: &ctx->cword.encrypt); |
322 | } |
323 | |
324 | static struct crypto_alg aes_alg = { |
325 | .cra_name = "aes" , |
326 | .cra_driver_name = "aes-padlock" , |
327 | .cra_priority = PADLOCK_CRA_PRIORITY, |
328 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
329 | .cra_blocksize = AES_BLOCK_SIZE, |
330 | .cra_ctxsize = sizeof(struct aes_ctx), |
331 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
332 | .cra_module = THIS_MODULE, |
333 | .cra_u = { |
334 | .cipher = { |
335 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
336 | .cia_max_keysize = AES_MAX_KEY_SIZE, |
337 | .cia_setkey = aes_set_key, |
338 | .cia_encrypt = padlock_aes_encrypt, |
339 | .cia_decrypt = padlock_aes_decrypt, |
340 | } |
341 | } |
342 | }; |
343 | |
344 | static int ecb_aes_encrypt(struct skcipher_request *req) |
345 | { |
346 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
347 | struct aes_ctx *ctx = skcipher_aes_ctx(tfm); |
348 | struct skcipher_walk walk; |
349 | unsigned int nbytes; |
350 | int err; |
351 | |
352 | padlock_reset_key(cword: &ctx->cword.encrypt); |
353 | |
354 | err = skcipher_walk_virt(walk: &walk, req, atomic: false); |
355 | |
356 | while ((nbytes = walk.nbytes) != 0) { |
357 | padlock_xcrypt_ecb(input: walk.src.virt.addr, output: walk.dst.virt.addr, |
358 | key: ctx->E, control_word: &ctx->cword.encrypt, |
359 | count: nbytes / AES_BLOCK_SIZE); |
360 | nbytes &= AES_BLOCK_SIZE - 1; |
361 | err = skcipher_walk_done(walk: &walk, err: nbytes); |
362 | } |
363 | |
364 | padlock_store_cword(cword: &ctx->cword.encrypt); |
365 | |
366 | return err; |
367 | } |
368 | |
369 | static int ecb_aes_decrypt(struct skcipher_request *req) |
370 | { |
371 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
372 | struct aes_ctx *ctx = skcipher_aes_ctx(tfm); |
373 | struct skcipher_walk walk; |
374 | unsigned int nbytes; |
375 | int err; |
376 | |
377 | padlock_reset_key(cword: &ctx->cword.decrypt); |
378 | |
379 | err = skcipher_walk_virt(walk: &walk, req, atomic: false); |
380 | |
381 | while ((nbytes = walk.nbytes) != 0) { |
382 | padlock_xcrypt_ecb(input: walk.src.virt.addr, output: walk.dst.virt.addr, |
383 | key: ctx->D, control_word: &ctx->cword.decrypt, |
384 | count: nbytes / AES_BLOCK_SIZE); |
385 | nbytes &= AES_BLOCK_SIZE - 1; |
386 | err = skcipher_walk_done(walk: &walk, err: nbytes); |
387 | } |
388 | |
389 | padlock_store_cword(cword: &ctx->cword.encrypt); |
390 | |
391 | return err; |
392 | } |
393 | |
394 | static struct skcipher_alg ecb_aes_alg = { |
395 | .base.cra_name = "ecb(aes)" , |
396 | .base.cra_driver_name = "ecb-aes-padlock" , |
397 | .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
398 | .base.cra_blocksize = AES_BLOCK_SIZE, |
399 | .base.cra_ctxsize = sizeof(struct aes_ctx), |
400 | .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, |
401 | .base.cra_module = THIS_MODULE, |
402 | .min_keysize = AES_MIN_KEY_SIZE, |
403 | .max_keysize = AES_MAX_KEY_SIZE, |
404 | .setkey = aes_set_key_skcipher, |
405 | .encrypt = ecb_aes_encrypt, |
406 | .decrypt = ecb_aes_decrypt, |
407 | }; |
408 | |
409 | static int cbc_aes_encrypt(struct skcipher_request *req) |
410 | { |
411 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
412 | struct aes_ctx *ctx = skcipher_aes_ctx(tfm); |
413 | struct skcipher_walk walk; |
414 | unsigned int nbytes; |
415 | int err; |
416 | |
417 | padlock_reset_key(cword: &ctx->cword.encrypt); |
418 | |
419 | err = skcipher_walk_virt(walk: &walk, req, atomic: false); |
420 | |
421 | while ((nbytes = walk.nbytes) != 0) { |
422 | u8 *iv = padlock_xcrypt_cbc(input: walk.src.virt.addr, |
423 | output: walk.dst.virt.addr, key: ctx->E, |
424 | iv: walk.iv, control_word: &ctx->cword.encrypt, |
425 | count: nbytes / AES_BLOCK_SIZE); |
426 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); |
427 | nbytes &= AES_BLOCK_SIZE - 1; |
428 | err = skcipher_walk_done(walk: &walk, err: nbytes); |
429 | } |
430 | |
431 | padlock_store_cword(cword: &ctx->cword.decrypt); |
432 | |
433 | return err; |
434 | } |
435 | |
436 | static int cbc_aes_decrypt(struct skcipher_request *req) |
437 | { |
438 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
439 | struct aes_ctx *ctx = skcipher_aes_ctx(tfm); |
440 | struct skcipher_walk walk; |
441 | unsigned int nbytes; |
442 | int err; |
443 | |
444 | padlock_reset_key(cword: &ctx->cword.encrypt); |
445 | |
446 | err = skcipher_walk_virt(walk: &walk, req, atomic: false); |
447 | |
448 | while ((nbytes = walk.nbytes) != 0) { |
449 | padlock_xcrypt_cbc(input: walk.src.virt.addr, output: walk.dst.virt.addr, |
450 | key: ctx->D, iv: walk.iv, control_word: &ctx->cword.decrypt, |
451 | count: nbytes / AES_BLOCK_SIZE); |
452 | nbytes &= AES_BLOCK_SIZE - 1; |
453 | err = skcipher_walk_done(walk: &walk, err: nbytes); |
454 | } |
455 | |
456 | padlock_store_cword(cword: &ctx->cword.encrypt); |
457 | |
458 | return err; |
459 | } |
460 | |
461 | static struct skcipher_alg cbc_aes_alg = { |
462 | .base.cra_name = "cbc(aes)" , |
463 | .base.cra_driver_name = "cbc-aes-padlock" , |
464 | .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
465 | .base.cra_blocksize = AES_BLOCK_SIZE, |
466 | .base.cra_ctxsize = sizeof(struct aes_ctx), |
467 | .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, |
468 | .base.cra_module = THIS_MODULE, |
469 | .min_keysize = AES_MIN_KEY_SIZE, |
470 | .max_keysize = AES_MAX_KEY_SIZE, |
471 | .ivsize = AES_BLOCK_SIZE, |
472 | .setkey = aes_set_key_skcipher, |
473 | .encrypt = cbc_aes_encrypt, |
474 | .decrypt = cbc_aes_decrypt, |
475 | }; |
476 | |
477 | static const struct x86_cpu_id padlock_cpu_id[] = { |
478 | X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), |
479 | {} |
480 | }; |
481 | MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); |
482 | |
483 | static int __init padlock_init(void) |
484 | { |
485 | int ret; |
486 | struct cpuinfo_x86 *c = &cpu_data(0); |
487 | |
488 | if (!x86_match_cpu(match: padlock_cpu_id)) |
489 | return -ENODEV; |
490 | |
491 | if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { |
492 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n" ); |
493 | return -ENODEV; |
494 | } |
495 | |
496 | if ((ret = crypto_register_alg(alg: &aes_alg)) != 0) |
497 | goto aes_err; |
498 | |
499 | if ((ret = crypto_register_skcipher(alg: &ecb_aes_alg)) != 0) |
500 | goto ecb_aes_err; |
501 | |
502 | if ((ret = crypto_register_skcipher(alg: &cbc_aes_alg)) != 0) |
503 | goto cbc_aes_err; |
504 | |
505 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n" ); |
506 | |
507 | if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { |
508 | ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; |
509 | cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; |
510 | printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n" ); |
511 | } |
512 | |
513 | out: |
514 | return ret; |
515 | |
516 | cbc_aes_err: |
517 | crypto_unregister_skcipher(alg: &ecb_aes_alg); |
518 | ecb_aes_err: |
519 | crypto_unregister_alg(alg: &aes_alg); |
520 | aes_err: |
521 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n" ); |
522 | goto out; |
523 | } |
524 | |
525 | static void __exit padlock_fini(void) |
526 | { |
527 | crypto_unregister_skcipher(alg: &cbc_aes_alg); |
528 | crypto_unregister_skcipher(alg: &ecb_aes_alg); |
529 | crypto_unregister_alg(alg: &aes_alg); |
530 | } |
531 | |
532 | module_init(padlock_init); |
533 | module_exit(padlock_fini); |
534 | |
535 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support" ); |
536 | MODULE_LICENSE("GPL" ); |
537 | MODULE_AUTHOR("Michal Ludvig" ); |
538 | |
539 | MODULE_ALIAS_CRYPTO("aes" ); |
540 | |