1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * AMCC SoC PPC4xx Crypto Driver |
4 | * |
5 | * Copyright (c) 2008 Applied Micro Circuits Corporation. |
6 | * All rights reserved. James Hsiao <jhsiao@amcc.com> |
7 | * |
8 | * This file implements AMCC crypto offload Linux device driver for use with |
9 | * Linux CryptoAPI. |
10 | */ |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/spinlock_types.h> |
15 | #include <linux/random.h> |
16 | #include <linux/scatterlist.h> |
17 | #include <linux/crypto.h> |
18 | #include <linux/dma-mapping.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/init.h> |
21 | #include <linux/module.h> |
22 | #include <linux/of_address.h> |
23 | #include <linux/of_irq.h> |
24 | #include <linux/of_platform.h> |
25 | #include <linux/slab.h> |
26 | #include <asm/dcr.h> |
27 | #include <asm/dcr-regs.h> |
28 | #include <asm/cacheflush.h> |
29 | #include <crypto/aead.h> |
30 | #include <crypto/aes.h> |
31 | #include <crypto/ctr.h> |
32 | #include <crypto/gcm.h> |
33 | #include <crypto/sha1.h> |
34 | #include <crypto/rng.h> |
35 | #include <crypto/scatterwalk.h> |
36 | #include <crypto/skcipher.h> |
37 | #include <crypto/internal/aead.h> |
38 | #include <crypto/internal/rng.h> |
39 | #include <crypto/internal/skcipher.h> |
40 | #include "crypto4xx_reg_def.h" |
41 | #include "crypto4xx_core.h" |
42 | #include "crypto4xx_sa.h" |
43 | #include "crypto4xx_trng.h" |
44 | |
45 | #define PPC4XX_SEC_VERSION_STR "0.5" |
46 | |
47 | /* |
48 | * PPC4xx Crypto Engine Initialization Routine |
49 | */ |
50 | static void crypto4xx_hw_init(struct crypto4xx_device *dev) |
51 | { |
52 | union ce_ring_size ring_size; |
53 | union ce_ring_control ring_ctrl; |
54 | union ce_part_ring_size part_ring_size; |
55 | union ce_io_threshold io_threshold; |
56 | u32 rand_num; |
57 | union ce_pe_dma_cfg pe_dma_cfg; |
58 | u32 device_ctrl; |
59 | |
60 | writel(PPC4XX_BYTE_ORDER, addr: dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); |
61 | /* setup pe dma, include reset sg, pdr and pe, then release reset */ |
62 | pe_dma_cfg.w = 0; |
63 | pe_dma_cfg.bf.bo_sgpd_en = 1; |
64 | pe_dma_cfg.bf.bo_data_en = 0; |
65 | pe_dma_cfg.bf.bo_sa_en = 1; |
66 | pe_dma_cfg.bf.bo_pd_en = 1; |
67 | pe_dma_cfg.bf.dynamic_sa_en = 1; |
68 | pe_dma_cfg.bf.reset_sg = 1; |
69 | pe_dma_cfg.bf.reset_pdr = 1; |
70 | pe_dma_cfg.bf.reset_pe = 1; |
71 | writel(val: pe_dma_cfg.w, addr: dev->ce_base + CRYPTO4XX_PE_DMA_CFG); |
72 | /* un reset pe,sg and pdr */ |
73 | pe_dma_cfg.bf.pe_mode = 0; |
74 | pe_dma_cfg.bf.reset_sg = 0; |
75 | pe_dma_cfg.bf.reset_pdr = 0; |
76 | pe_dma_cfg.bf.reset_pe = 0; |
77 | pe_dma_cfg.bf.bo_td_en = 0; |
78 | writel(val: pe_dma_cfg.w, addr: dev->ce_base + CRYPTO4XX_PE_DMA_CFG); |
79 | writel(val: dev->pdr_pa, addr: dev->ce_base + CRYPTO4XX_PDR_BASE); |
80 | writel(val: dev->pdr_pa, addr: dev->ce_base + CRYPTO4XX_RDR_BASE); |
81 | writel(PPC4XX_PRNG_CTRL_AUTO_EN, addr: dev->ce_base + CRYPTO4XX_PRNG_CTRL); |
82 | get_random_bytes(buf: &rand_num, len: sizeof(rand_num)); |
83 | writel(val: rand_num, addr: dev->ce_base + CRYPTO4XX_PRNG_SEED_L); |
84 | get_random_bytes(buf: &rand_num, len: sizeof(rand_num)); |
85 | writel(val: rand_num, addr: dev->ce_base + CRYPTO4XX_PRNG_SEED_H); |
86 | ring_size.w = 0; |
87 | ring_size.bf.ring_offset = PPC4XX_PD_SIZE; |
88 | ring_size.bf.ring_size = PPC4XX_NUM_PD; |
89 | writel(val: ring_size.w, addr: dev->ce_base + CRYPTO4XX_RING_SIZE); |
90 | ring_ctrl.w = 0; |
91 | writel(val: ring_ctrl.w, addr: dev->ce_base + CRYPTO4XX_RING_CTRL); |
92 | device_ctrl = readl(addr: dev->ce_base + CRYPTO4XX_DEVICE_CTRL); |
93 | device_ctrl |= PPC4XX_DC_3DES_EN; |
94 | writel(val: device_ctrl, addr: dev->ce_base + CRYPTO4XX_DEVICE_CTRL); |
95 | writel(val: dev->gdr_pa, addr: dev->ce_base + CRYPTO4XX_GATH_RING_BASE); |
96 | writel(val: dev->sdr_pa, addr: dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); |
97 | part_ring_size.w = 0; |
98 | part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE; |
99 | part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE; |
100 | writel(val: part_ring_size.w, addr: dev->ce_base + CRYPTO4XX_PART_RING_SIZE); |
101 | writel(PPC4XX_SD_BUFFER_SIZE, addr: dev->ce_base + CRYPTO4XX_PART_RING_CFG); |
102 | io_threshold.w = 0; |
103 | io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD; |
104 | io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD; |
105 | writel(val: io_threshold.w, addr: dev->ce_base + CRYPTO4XX_IO_THRESHOLD); |
106 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR); |
107 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR); |
108 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR); |
109 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR); |
110 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_SA_UADDR); |
111 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR); |
112 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR); |
113 | /* un reset pe,sg and pdr */ |
114 | pe_dma_cfg.bf.pe_mode = 1; |
115 | pe_dma_cfg.bf.reset_sg = 0; |
116 | pe_dma_cfg.bf.reset_pdr = 0; |
117 | pe_dma_cfg.bf.reset_pe = 0; |
118 | pe_dma_cfg.bf.bo_td_en = 0; |
119 | writel(val: pe_dma_cfg.w, addr: dev->ce_base + CRYPTO4XX_PE_DMA_CFG); |
120 | /*clear all pending interrupt*/ |
121 | writel(PPC4XX_INTERRUPT_CLR, addr: dev->ce_base + CRYPTO4XX_INT_CLR); |
122 | writel(PPC4XX_INT_DESCR_CNT, addr: dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); |
123 | writel(PPC4XX_INT_DESCR_CNT, addr: dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); |
124 | writel(PPC4XX_INT_CFG, addr: dev->ce_base + CRYPTO4XX_INT_CFG); |
125 | if (dev->is_revb) { |
126 | writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10, |
127 | addr: dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT); |
128 | writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT, |
129 | addr: dev->ce_base + CRYPTO4XX_INT_EN); |
130 | } else { |
131 | writel(PPC4XX_PD_DONE_INT, addr: dev->ce_base + CRYPTO4XX_INT_EN); |
132 | } |
133 | } |
134 | |
135 | int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) |
136 | { |
137 | ctx->sa_in = kcalloc(n: size, size: 4, GFP_ATOMIC); |
138 | if (ctx->sa_in == NULL) |
139 | return -ENOMEM; |
140 | |
141 | ctx->sa_out = kcalloc(n: size, size: 4, GFP_ATOMIC); |
142 | if (ctx->sa_out == NULL) { |
143 | kfree(objp: ctx->sa_in); |
144 | ctx->sa_in = NULL; |
145 | return -ENOMEM; |
146 | } |
147 | |
148 | ctx->sa_len = size; |
149 | |
150 | return 0; |
151 | } |
152 | |
153 | void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) |
154 | { |
155 | kfree(objp: ctx->sa_in); |
156 | ctx->sa_in = NULL; |
157 | kfree(objp: ctx->sa_out); |
158 | ctx->sa_out = NULL; |
159 | ctx->sa_len = 0; |
160 | } |
161 | |
162 | /* |
163 | * alloc memory for the gather ring |
164 | * no need to alloc buf for the ring |
165 | * gdr_tail, gdr_head and gdr_count are initialized by this function |
166 | */ |
167 | static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) |
168 | { |
169 | int i; |
170 | dev->pdr = dma_alloc_coherent(dev: dev->core_dev->device, |
171 | size: sizeof(struct ce_pd) * PPC4XX_NUM_PD, |
172 | dma_handle: &dev->pdr_pa, GFP_KERNEL); |
173 | if (!dev->pdr) |
174 | return -ENOMEM; |
175 | |
176 | dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, size: sizeof(struct pd_uinfo), |
177 | GFP_KERNEL); |
178 | if (!dev->pdr_uinfo) { |
179 | dma_free_coherent(dev: dev->core_dev->device, |
180 | size: sizeof(struct ce_pd) * PPC4XX_NUM_PD, |
181 | cpu_addr: dev->pdr, |
182 | dma_handle: dev->pdr_pa); |
183 | return -ENOMEM; |
184 | } |
185 | dev->shadow_sa_pool = dma_alloc_coherent(dev: dev->core_dev->device, |
186 | size: sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, |
187 | dma_handle: &dev->shadow_sa_pool_pa, |
188 | GFP_KERNEL); |
189 | if (!dev->shadow_sa_pool) |
190 | return -ENOMEM; |
191 | |
192 | dev->shadow_sr_pool = dma_alloc_coherent(dev: dev->core_dev->device, |
193 | size: sizeof(struct sa_state_record) * PPC4XX_NUM_PD, |
194 | dma_handle: &dev->shadow_sr_pool_pa, GFP_KERNEL); |
195 | if (!dev->shadow_sr_pool) |
196 | return -ENOMEM; |
197 | for (i = 0; i < PPC4XX_NUM_PD; i++) { |
198 | struct ce_pd *pd = &dev->pdr[i]; |
199 | struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i]; |
200 | |
201 | pd->sa = dev->shadow_sa_pool_pa + |
202 | sizeof(union shadow_sa_buf) * i; |
203 | |
204 | /* alloc 256 bytes which is enough for any kind of dynamic sa */ |
205 | pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa; |
206 | |
207 | /* alloc state record */ |
208 | pd_uinfo->sr_va = &dev->shadow_sr_pool[i]; |
209 | pd_uinfo->sr_pa = dev->shadow_sr_pool_pa + |
210 | sizeof(struct sa_state_record) * i; |
211 | } |
212 | |
213 | return 0; |
214 | } |
215 | |
216 | static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) |
217 | { |
218 | if (dev->pdr) |
219 | dma_free_coherent(dev: dev->core_dev->device, |
220 | size: sizeof(struct ce_pd) * PPC4XX_NUM_PD, |
221 | cpu_addr: dev->pdr, dma_handle: dev->pdr_pa); |
222 | |
223 | if (dev->shadow_sa_pool) |
224 | dma_free_coherent(dev: dev->core_dev->device, |
225 | size: sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, |
226 | cpu_addr: dev->shadow_sa_pool, dma_handle: dev->shadow_sa_pool_pa); |
227 | |
228 | if (dev->shadow_sr_pool) |
229 | dma_free_coherent(dev: dev->core_dev->device, |
230 | size: sizeof(struct sa_state_record) * PPC4XX_NUM_PD, |
231 | cpu_addr: dev->shadow_sr_pool, dma_handle: dev->shadow_sr_pool_pa); |
232 | |
233 | kfree(objp: dev->pdr_uinfo); |
234 | } |
235 | |
236 | static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev) |
237 | { |
238 | u32 retval; |
239 | u32 tmp; |
240 | |
241 | retval = dev->pdr_head; |
242 | tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD; |
243 | |
244 | if (tmp == dev->pdr_tail) |
245 | return ERING_WAS_FULL; |
246 | |
247 | dev->pdr_head = tmp; |
248 | |
249 | return retval; |
250 | } |
251 | |
252 | static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) |
253 | { |
254 | struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx]; |
255 | u32 tail; |
256 | unsigned long flags; |
257 | |
258 | spin_lock_irqsave(&dev->core_dev->lock, flags); |
259 | pd_uinfo->state = PD_ENTRY_FREE; |
260 | |
261 | if (dev->pdr_tail != PPC4XX_LAST_PD) |
262 | dev->pdr_tail++; |
263 | else |
264 | dev->pdr_tail = 0; |
265 | tail = dev->pdr_tail; |
266 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
267 | |
268 | return tail; |
269 | } |
270 | |
271 | /* |
272 | * alloc memory for the gather ring |
273 | * no need to alloc buf for the ring |
274 | * gdr_tail, gdr_head and gdr_count are initialized by this function |
275 | */ |
276 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) |
277 | { |
278 | dev->gdr = dma_alloc_coherent(dev: dev->core_dev->device, |
279 | size: sizeof(struct ce_gd) * PPC4XX_NUM_GD, |
280 | dma_handle: &dev->gdr_pa, GFP_KERNEL); |
281 | if (!dev->gdr) |
282 | return -ENOMEM; |
283 | |
284 | return 0; |
285 | } |
286 | |
287 | static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev) |
288 | { |
289 | if (dev->gdr) |
290 | dma_free_coherent(dev: dev->core_dev->device, |
291 | size: sizeof(struct ce_gd) * PPC4XX_NUM_GD, |
292 | cpu_addr: dev->gdr, dma_handle: dev->gdr_pa); |
293 | } |
294 | |
295 | /* |
296 | * when this function is called. |
297 | * preemption or interrupt must be disabled |
298 | */ |
299 | static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) |
300 | { |
301 | u32 retval; |
302 | u32 tmp; |
303 | |
304 | if (n >= PPC4XX_NUM_GD) |
305 | return ERING_WAS_FULL; |
306 | |
307 | retval = dev->gdr_head; |
308 | tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD; |
309 | if (dev->gdr_head > dev->gdr_tail) { |
310 | if (tmp < dev->gdr_head && tmp >= dev->gdr_tail) |
311 | return ERING_WAS_FULL; |
312 | } else if (dev->gdr_head < dev->gdr_tail) { |
313 | if (tmp < dev->gdr_head || tmp >= dev->gdr_tail) |
314 | return ERING_WAS_FULL; |
315 | } |
316 | dev->gdr_head = tmp; |
317 | |
318 | return retval; |
319 | } |
320 | |
321 | static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev) |
322 | { |
323 | unsigned long flags; |
324 | |
325 | spin_lock_irqsave(&dev->core_dev->lock, flags); |
326 | if (dev->gdr_tail == dev->gdr_head) { |
327 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
328 | return 0; |
329 | } |
330 | |
331 | if (dev->gdr_tail != PPC4XX_LAST_GD) |
332 | dev->gdr_tail++; |
333 | else |
334 | dev->gdr_tail = 0; |
335 | |
336 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
337 | |
338 | return 0; |
339 | } |
340 | |
341 | static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, |
342 | dma_addr_t *gd_dma, u32 idx) |
343 | { |
344 | *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx; |
345 | |
346 | return &dev->gdr[idx]; |
347 | } |
348 | |
349 | /* |
350 | * alloc memory for the scatter ring |
351 | * need to alloc buf for the ring |
352 | * sdr_tail, sdr_head and sdr_count are initialized by this function |
353 | */ |
354 | static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) |
355 | { |
356 | int i; |
357 | |
358 | dev->scatter_buffer_va = |
359 | dma_alloc_coherent(dev: dev->core_dev->device, |
360 | PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, |
361 | dma_handle: &dev->scatter_buffer_pa, GFP_KERNEL); |
362 | if (!dev->scatter_buffer_va) |
363 | return -ENOMEM; |
364 | |
365 | /* alloc memory for scatter descriptor ring */ |
366 | dev->sdr = dma_alloc_coherent(dev: dev->core_dev->device, |
367 | size: sizeof(struct ce_sd) * PPC4XX_NUM_SD, |
368 | dma_handle: &dev->sdr_pa, GFP_KERNEL); |
369 | if (!dev->sdr) |
370 | return -ENOMEM; |
371 | |
372 | for (i = 0; i < PPC4XX_NUM_SD; i++) { |
373 | dev->sdr[i].ptr = dev->scatter_buffer_pa + |
374 | PPC4XX_SD_BUFFER_SIZE * i; |
375 | } |
376 | |
377 | return 0; |
378 | } |
379 | |
380 | static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) |
381 | { |
382 | if (dev->sdr) |
383 | dma_free_coherent(dev: dev->core_dev->device, |
384 | size: sizeof(struct ce_sd) * PPC4XX_NUM_SD, |
385 | cpu_addr: dev->sdr, dma_handle: dev->sdr_pa); |
386 | |
387 | if (dev->scatter_buffer_va) |
388 | dma_free_coherent(dev: dev->core_dev->device, |
389 | PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, |
390 | cpu_addr: dev->scatter_buffer_va, |
391 | dma_handle: dev->scatter_buffer_pa); |
392 | } |
393 | |
394 | /* |
395 | * when this function is called. |
396 | * preemption or interrupt must be disabled |
397 | */ |
398 | static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n) |
399 | { |
400 | u32 retval; |
401 | u32 tmp; |
402 | |
403 | if (n >= PPC4XX_NUM_SD) |
404 | return ERING_WAS_FULL; |
405 | |
406 | retval = dev->sdr_head; |
407 | tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD; |
408 | if (dev->sdr_head > dev->gdr_tail) { |
409 | if (tmp < dev->sdr_head && tmp >= dev->sdr_tail) |
410 | return ERING_WAS_FULL; |
411 | } else if (dev->sdr_head < dev->sdr_tail) { |
412 | if (tmp < dev->sdr_head || tmp >= dev->sdr_tail) |
413 | return ERING_WAS_FULL; |
414 | } /* the head = tail, or empty case is already take cared */ |
415 | dev->sdr_head = tmp; |
416 | |
417 | return retval; |
418 | } |
419 | |
420 | static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev) |
421 | { |
422 | unsigned long flags; |
423 | |
424 | spin_lock_irqsave(&dev->core_dev->lock, flags); |
425 | if (dev->sdr_tail == dev->sdr_head) { |
426 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
427 | return 0; |
428 | } |
429 | if (dev->sdr_tail != PPC4XX_LAST_SD) |
430 | dev->sdr_tail++; |
431 | else |
432 | dev->sdr_tail = 0; |
433 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
434 | |
435 | return 0; |
436 | } |
437 | |
438 | static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev, |
439 | dma_addr_t *sd_dma, u32 idx) |
440 | { |
441 | *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx; |
442 | |
443 | return &dev->sdr[idx]; |
444 | } |
445 | |
446 | static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, |
447 | struct ce_pd *pd, |
448 | struct pd_uinfo *pd_uinfo, |
449 | u32 nbytes, |
450 | struct scatterlist *dst) |
451 | { |
452 | unsigned int first_sd = pd_uinfo->first_sd; |
453 | unsigned int last_sd; |
454 | unsigned int overflow = 0; |
455 | unsigned int to_copy; |
456 | unsigned int dst_start = 0; |
457 | |
458 | /* |
459 | * Because the scatter buffers are all neatly organized in one |
460 | * big continuous ringbuffer; scatterwalk_map_and_copy() can |
461 | * be instructed to copy a range of buffers in one go. |
462 | */ |
463 | |
464 | last_sd = (first_sd + pd_uinfo->num_sd); |
465 | if (last_sd > PPC4XX_LAST_SD) { |
466 | last_sd = PPC4XX_LAST_SD; |
467 | overflow = last_sd % PPC4XX_NUM_SD; |
468 | } |
469 | |
470 | while (nbytes) { |
471 | void *buf = dev->scatter_buffer_va + |
472 | first_sd * PPC4XX_SD_BUFFER_SIZE; |
473 | |
474 | to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE * |
475 | (1 + last_sd - first_sd)); |
476 | scatterwalk_map_and_copy(buf, sg: dst, start: dst_start, nbytes: to_copy, out: 1); |
477 | nbytes -= to_copy; |
478 | |
479 | if (overflow) { |
480 | first_sd = 0; |
481 | last_sd = overflow; |
482 | dst_start += to_copy; |
483 | overflow = 0; |
484 | } |
485 | } |
486 | } |
487 | |
488 | static void crypto4xx_copy_digest_to_dst(void *dst, |
489 | struct pd_uinfo *pd_uinfo, |
490 | struct crypto4xx_ctx *ctx) |
491 | { |
492 | struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; |
493 | |
494 | if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { |
495 | memcpy(dst, pd_uinfo->sr_va->save_digest, |
496 | SA_HASH_ALG_SHA1_DIGEST_SIZE); |
497 | } |
498 | } |
499 | |
500 | static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, |
501 | struct pd_uinfo *pd_uinfo) |
502 | { |
503 | int i; |
504 | if (pd_uinfo->num_gd) { |
505 | for (i = 0; i < pd_uinfo->num_gd; i++) |
506 | crypto4xx_put_gd_to_gdr(dev); |
507 | pd_uinfo->first_gd = 0xffffffff; |
508 | pd_uinfo->num_gd = 0; |
509 | } |
510 | if (pd_uinfo->num_sd) { |
511 | for (i = 0; i < pd_uinfo->num_sd; i++) |
512 | crypto4xx_put_sd_to_sdr(dev); |
513 | |
514 | pd_uinfo->first_sd = 0xffffffff; |
515 | pd_uinfo->num_sd = 0; |
516 | } |
517 | } |
518 | |
519 | static void crypto4xx_cipher_done(struct crypto4xx_device *dev, |
520 | struct pd_uinfo *pd_uinfo, |
521 | struct ce_pd *pd) |
522 | { |
523 | struct skcipher_request *req; |
524 | struct scatterlist *dst; |
525 | |
526 | req = skcipher_request_cast(req: pd_uinfo->async_req); |
527 | |
528 | if (pd_uinfo->sa_va->sa_command_0.bf.scatter) { |
529 | crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, |
530 | nbytes: req->cryptlen, dst: req->dst); |
531 | } else { |
532 | dst = pd_uinfo->dest_va; |
533 | dma_unmap_page(dev->core_dev->device, pd->dest, dst->length, |
534 | DMA_FROM_DEVICE); |
535 | } |
536 | |
537 | if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) { |
538 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
539 | |
540 | crypto4xx_memcpy_from_le32(dst: (u32 *)req->iv, |
541 | buf: pd_uinfo->sr_va->save_iv, |
542 | len: crypto_skcipher_ivsize(tfm: skcipher)); |
543 | } |
544 | |
545 | crypto4xx_ret_sg_desc(dev, pd_uinfo); |
546 | |
547 | if (pd_uinfo->state & PD_ENTRY_BUSY) |
548 | skcipher_request_complete(req, err: -EINPROGRESS); |
549 | skcipher_request_complete(req, err: 0); |
550 | } |
551 | |
552 | static void crypto4xx_ahash_done(struct crypto4xx_device *dev, |
553 | struct pd_uinfo *pd_uinfo) |
554 | { |
555 | struct crypto4xx_ctx *ctx; |
556 | struct ahash_request *ahash_req; |
557 | |
558 | ahash_req = ahash_request_cast(req: pd_uinfo->async_req); |
559 | ctx = crypto_ahash_ctx(tfm: crypto_ahash_reqtfm(req: ahash_req)); |
560 | |
561 | crypto4xx_copy_digest_to_dst(dst: ahash_req->result, pd_uinfo, ctx); |
562 | crypto4xx_ret_sg_desc(dev, pd_uinfo); |
563 | |
564 | if (pd_uinfo->state & PD_ENTRY_BUSY) |
565 | ahash_request_complete(req: ahash_req, err: -EINPROGRESS); |
566 | ahash_request_complete(req: ahash_req, err: 0); |
567 | } |
568 | |
569 | static void crypto4xx_aead_done(struct crypto4xx_device *dev, |
570 | struct pd_uinfo *pd_uinfo, |
571 | struct ce_pd *pd) |
572 | { |
573 | struct aead_request *aead_req = container_of(pd_uinfo->async_req, |
574 | struct aead_request, base); |
575 | struct scatterlist *dst = pd_uinfo->dest_va; |
576 | size_t cp_len = crypto_aead_authsize( |
577 | tfm: crypto_aead_reqtfm(req: aead_req)); |
578 | u32 icv[AES_BLOCK_SIZE]; |
579 | int err = 0; |
580 | |
581 | if (pd_uinfo->sa_va->sa_command_0.bf.scatter) { |
582 | crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, |
583 | nbytes: pd->pd_ctl_len.bf.pkt_len, |
584 | dst); |
585 | } else { |
586 | dma_unmap_page(dev->core_dev->device, pd->dest, dst->length, |
587 | DMA_FROM_DEVICE); |
588 | } |
589 | |
590 | if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) { |
591 | /* append icv at the end */ |
592 | crypto4xx_memcpy_from_le32(dst: icv, buf: pd_uinfo->sr_va->save_digest, |
593 | len: sizeof(icv)); |
594 | |
595 | scatterwalk_map_and_copy(buf: icv, sg: dst, start: aead_req->cryptlen, |
596 | nbytes: cp_len, out: 1); |
597 | } else { |
598 | /* check icv at the end */ |
599 | scatterwalk_map_and_copy(buf: icv, sg: aead_req->src, |
600 | start: aead_req->assoclen + aead_req->cryptlen - |
601 | cp_len, nbytes: cp_len, out: 0); |
602 | |
603 | crypto4xx_memcpy_from_le32(dst: icv, buf: icv, len: sizeof(icv)); |
604 | |
605 | if (crypto_memneq(a: icv, b: pd_uinfo->sr_va->save_digest, size: cp_len)) |
606 | err = -EBADMSG; |
607 | } |
608 | |
609 | crypto4xx_ret_sg_desc(dev, pd_uinfo); |
610 | |
611 | if (pd->pd_ctl.bf.status & 0xff) { |
612 | if (!__ratelimit(&dev->aead_ratelimit)) { |
613 | if (pd->pd_ctl.bf.status & 2) |
614 | pr_err("pad fail error\n" ); |
615 | if (pd->pd_ctl.bf.status & 4) |
616 | pr_err("seqnum fail\n" ); |
617 | if (pd->pd_ctl.bf.status & 8) |
618 | pr_err("error _notify\n" ); |
619 | pr_err("aead return err status = 0x%02x\n" , |
620 | pd->pd_ctl.bf.status & 0xff); |
621 | pr_err("pd pad_ctl = 0x%08x\n" , |
622 | pd->pd_ctl.bf.pd_pad_ctl); |
623 | } |
624 | err = -EINVAL; |
625 | } |
626 | |
627 | if (pd_uinfo->state & PD_ENTRY_BUSY) |
628 | aead_request_complete(req: aead_req, err: -EINPROGRESS); |
629 | |
630 | aead_request_complete(req: aead_req, err); |
631 | } |
632 | |
633 | static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) |
634 | { |
635 | struct ce_pd *pd = &dev->pdr[idx]; |
636 | struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx]; |
637 | |
638 | switch (crypto_tfm_alg_type(tfm: pd_uinfo->async_req->tfm)) { |
639 | case CRYPTO_ALG_TYPE_SKCIPHER: |
640 | crypto4xx_cipher_done(dev, pd_uinfo, pd); |
641 | break; |
642 | case CRYPTO_ALG_TYPE_AEAD: |
643 | crypto4xx_aead_done(dev, pd_uinfo, pd); |
644 | break; |
645 | case CRYPTO_ALG_TYPE_AHASH: |
646 | crypto4xx_ahash_done(dev, pd_uinfo); |
647 | break; |
648 | } |
649 | } |
650 | |
651 | static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev) |
652 | { |
653 | crypto4xx_destroy_pdr(dev: core_dev->dev); |
654 | crypto4xx_destroy_gdr(dev: core_dev->dev); |
655 | crypto4xx_destroy_sdr(dev: core_dev->dev); |
656 | iounmap(addr: core_dev->dev->ce_base); |
657 | kfree(objp: core_dev->dev); |
658 | kfree(objp: core_dev); |
659 | } |
660 | |
661 | static u32 get_next_gd(u32 current) |
662 | { |
663 | if (current != PPC4XX_LAST_GD) |
664 | return current + 1; |
665 | else |
666 | return 0; |
667 | } |
668 | |
669 | static u32 get_next_sd(u32 current) |
670 | { |
671 | if (current != PPC4XX_LAST_SD) |
672 | return current + 1; |
673 | else |
674 | return 0; |
675 | } |
676 | |
677 | int crypto4xx_build_pd(struct crypto_async_request *req, |
678 | struct crypto4xx_ctx *ctx, |
679 | struct scatterlist *src, |
680 | struct scatterlist *dst, |
681 | const unsigned int datalen, |
682 | const __le32 *iv, const u32 iv_len, |
683 | const struct dynamic_sa_ctl *req_sa, |
684 | const unsigned int sa_len, |
685 | const unsigned int assoclen, |
686 | struct scatterlist *_dst) |
687 | { |
688 | struct crypto4xx_device *dev = ctx->dev; |
689 | struct dynamic_sa_ctl *sa; |
690 | struct ce_gd *gd; |
691 | struct ce_pd *pd; |
692 | u32 num_gd, num_sd; |
693 | u32 fst_gd = 0xffffffff; |
694 | u32 fst_sd = 0xffffffff; |
695 | u32 pd_entry; |
696 | unsigned long flags; |
697 | struct pd_uinfo *pd_uinfo; |
698 | unsigned int nbytes = datalen; |
699 | size_t offset_to_sr_ptr; |
700 | u32 gd_idx = 0; |
701 | int tmp; |
702 | bool is_busy, force_sd; |
703 | |
704 | /* |
705 | * There's a very subtile/disguised "bug" in the hardware that |
706 | * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption |
707 | * of the hardware spec: |
708 | * *drum roll* the AES/(T)DES OFB and CFB modes are listed as |
709 | * operation modes for >>> "Block ciphers" <<<. |
710 | * |
711 | * To workaround this issue and stop the hardware from causing |
712 | * "overran dst buffer" on crypttexts that are not a multiple |
713 | * of 16 (AES_BLOCK_SIZE), we force the driver to use the |
714 | * scatter buffers. |
715 | */ |
716 | force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB |
717 | || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB) |
718 | && (datalen % AES_BLOCK_SIZE); |
719 | |
720 | /* figure how many gd are needed */ |
721 | tmp = sg_nents_for_len(sg: src, len: assoclen + datalen); |
722 | if (tmp < 0) { |
723 | dev_err(dev->core_dev->device, "Invalid number of src SG.\n" ); |
724 | return tmp; |
725 | } |
726 | if (tmp == 1) |
727 | tmp = 0; |
728 | num_gd = tmp; |
729 | |
730 | if (assoclen) { |
731 | nbytes += assoclen; |
732 | dst = scatterwalk_ffwd(dst: _dst, src: dst, len: assoclen); |
733 | } |
734 | |
735 | /* figure how many sd are needed */ |
736 | if (sg_is_last(sg: dst) && force_sd == false) { |
737 | num_sd = 0; |
738 | } else { |
739 | if (datalen > PPC4XX_SD_BUFFER_SIZE) { |
740 | num_sd = datalen / PPC4XX_SD_BUFFER_SIZE; |
741 | if (datalen % PPC4XX_SD_BUFFER_SIZE) |
742 | num_sd++; |
743 | } else { |
744 | num_sd = 1; |
745 | } |
746 | } |
747 | |
748 | /* |
749 | * The follow section of code needs to be protected |
750 | * The gather ring and scatter ring needs to be consecutive |
751 | * In case of run out of any kind of descriptor, the descriptor |
752 | * already got must be return the original place. |
753 | */ |
754 | spin_lock_irqsave(&dev->core_dev->lock, flags); |
755 | /* |
756 | * Let the caller know to slow down, once more than 13/16ths = 81% |
757 | * of the available data contexts are being used simultaneously. |
758 | * |
759 | * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for |
760 | * 31 more contexts. Before new requests have to be rejected. |
761 | */ |
762 | if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) { |
763 | is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >= |
764 | ((PPC4XX_NUM_PD * 13) / 16); |
765 | } else { |
766 | /* |
767 | * To fix contention issues between ipsec (no blacklog) and |
768 | * dm-crypto (backlog) reserve 32 entries for "no backlog" |
769 | * data contexts. |
770 | */ |
771 | is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >= |
772 | ((PPC4XX_NUM_PD * 15) / 16); |
773 | |
774 | if (is_busy) { |
775 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
776 | return -EBUSY; |
777 | } |
778 | } |
779 | |
780 | if (num_gd) { |
781 | fst_gd = crypto4xx_get_n_gd(dev, n: num_gd); |
782 | if (fst_gd == ERING_WAS_FULL) { |
783 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
784 | return -EAGAIN; |
785 | } |
786 | } |
787 | if (num_sd) { |
788 | fst_sd = crypto4xx_get_n_sd(dev, n: num_sd); |
789 | if (fst_sd == ERING_WAS_FULL) { |
790 | if (num_gd) |
791 | dev->gdr_head = fst_gd; |
792 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
793 | return -EAGAIN; |
794 | } |
795 | } |
796 | pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev); |
797 | if (pd_entry == ERING_WAS_FULL) { |
798 | if (num_gd) |
799 | dev->gdr_head = fst_gd; |
800 | if (num_sd) |
801 | dev->sdr_head = fst_sd; |
802 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
803 | return -EAGAIN; |
804 | } |
805 | spin_unlock_irqrestore(lock: &dev->core_dev->lock, flags); |
806 | |
807 | pd = &dev->pdr[pd_entry]; |
808 | pd->sa_len = sa_len; |
809 | |
810 | pd_uinfo = &dev->pdr_uinfo[pd_entry]; |
811 | pd_uinfo->num_gd = num_gd; |
812 | pd_uinfo->num_sd = num_sd; |
813 | pd_uinfo->dest_va = dst; |
814 | pd_uinfo->async_req = req; |
815 | |
816 | if (iv_len) |
817 | memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len); |
818 | |
819 | sa = pd_uinfo->sa_va; |
820 | memcpy(sa, req_sa, sa_len * 4); |
821 | |
822 | sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2); |
823 | offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(cts: sa); |
824 | *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa; |
825 | |
826 | if (num_gd) { |
827 | dma_addr_t gd_dma; |
828 | struct scatterlist *sg; |
829 | |
830 | /* get first gd we are going to use */ |
831 | gd_idx = fst_gd; |
832 | pd_uinfo->first_gd = fst_gd; |
833 | gd = crypto4xx_get_gdp(dev, gd_dma: &gd_dma, idx: gd_idx); |
834 | pd->src = gd_dma; |
835 | /* enable gather */ |
836 | sa->sa_command_0.bf.gather = 1; |
837 | /* walk the sg, and setup gather array */ |
838 | |
839 | sg = src; |
840 | while (nbytes) { |
841 | size_t len; |
842 | |
843 | len = min(sg->length, nbytes); |
844 | gd->ptr = dma_map_page(dev->core_dev->device, |
845 | sg_page(sg), sg->offset, len, DMA_TO_DEVICE); |
846 | gd->ctl_len.len = len; |
847 | gd->ctl_len.done = 0; |
848 | gd->ctl_len.ready = 1; |
849 | if (len >= nbytes) |
850 | break; |
851 | |
852 | nbytes -= sg->length; |
853 | gd_idx = get_next_gd(get_current: gd_idx); |
854 | gd = crypto4xx_get_gdp(dev, gd_dma: &gd_dma, idx: gd_idx); |
855 | sg = sg_next(sg); |
856 | } |
857 | } else { |
858 | pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), |
859 | src->offset, min(nbytes, src->length), |
860 | DMA_TO_DEVICE); |
861 | /* |
862 | * Disable gather in sa command |
863 | */ |
864 | sa->sa_command_0.bf.gather = 0; |
865 | /* |
866 | * Indicate gather array is not used |
867 | */ |
868 | pd_uinfo->first_gd = 0xffffffff; |
869 | } |
870 | if (!num_sd) { |
871 | /* |
872 | * we know application give us dst a whole piece of memory |
873 | * no need to use scatter ring. |
874 | */ |
875 | pd_uinfo->first_sd = 0xffffffff; |
876 | sa->sa_command_0.bf.scatter = 0; |
877 | pd->dest = (u32)dma_map_page(dev->core_dev->device, |
878 | sg_page(dst), dst->offset, |
879 | min(datalen, dst->length), |
880 | DMA_TO_DEVICE); |
881 | } else { |
882 | dma_addr_t sd_dma; |
883 | struct ce_sd *sd = NULL; |
884 | |
885 | u32 sd_idx = fst_sd; |
886 | nbytes = datalen; |
887 | sa->sa_command_0.bf.scatter = 1; |
888 | pd_uinfo->first_sd = fst_sd; |
889 | sd = crypto4xx_get_sdp(dev, sd_dma: &sd_dma, idx: sd_idx); |
890 | pd->dest = sd_dma; |
891 | /* setup scatter descriptor */ |
892 | sd->ctl.done = 0; |
893 | sd->ctl.rdy = 1; |
894 | /* sd->ptr should be setup by sd_init routine*/ |
895 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) |
896 | nbytes -= PPC4XX_SD_BUFFER_SIZE; |
897 | else |
898 | nbytes = 0; |
899 | while (nbytes) { |
900 | sd_idx = get_next_sd(get_current: sd_idx); |
901 | sd = crypto4xx_get_sdp(dev, sd_dma: &sd_dma, idx: sd_idx); |
902 | /* setup scatter descriptor */ |
903 | sd->ctl.done = 0; |
904 | sd->ctl.rdy = 1; |
905 | if (nbytes >= PPC4XX_SD_BUFFER_SIZE) { |
906 | nbytes -= PPC4XX_SD_BUFFER_SIZE; |
907 | } else { |
908 | /* |
909 | * SD entry can hold PPC4XX_SD_BUFFER_SIZE, |
910 | * which is more than nbytes, so done. |
911 | */ |
912 | nbytes = 0; |
913 | } |
914 | } |
915 | } |
916 | |
917 | pd->pd_ctl.w = PD_CTL_HOST_READY | |
918 | ((crypto_tfm_alg_type(tfm: req->tfm) == CRYPTO_ALG_TYPE_AHASH) || |
919 | (crypto_tfm_alg_type(tfm: req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? |
920 | PD_CTL_HASH_FINAL : 0); |
921 | pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen); |
922 | pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0); |
923 | |
924 | wmb(); |
925 | /* write any value to push engine to read a pd */ |
926 | writel(val: 0, addr: dev->ce_base + CRYPTO4XX_INT_DESCR_RD); |
927 | writel(val: 1, addr: dev->ce_base + CRYPTO4XX_INT_DESCR_RD); |
928 | return is_busy ? -EBUSY : -EINPROGRESS; |
929 | } |
930 | |
931 | /* |
932 | * Algorithm Registration Functions |
933 | */ |
934 | static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg, |
935 | struct crypto4xx_ctx *ctx) |
936 | { |
937 | ctx->dev = amcc_alg->dev; |
938 | ctx->sa_in = NULL; |
939 | ctx->sa_out = NULL; |
940 | ctx->sa_len = 0; |
941 | } |
942 | |
943 | static int crypto4xx_sk_init(struct crypto_skcipher *sk) |
944 | { |
945 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm: sk); |
946 | struct crypto4xx_alg *amcc_alg; |
947 | struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(tfm: sk); |
948 | |
949 | if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) { |
950 | ctx->sw_cipher.cipher = |
951 | crypto_alloc_sync_skcipher(alg_name: alg->base.cra_name, type: 0, |
952 | CRYPTO_ALG_NEED_FALLBACK); |
953 | if (IS_ERR(ptr: ctx->sw_cipher.cipher)) |
954 | return PTR_ERR(ptr: ctx->sw_cipher.cipher); |
955 | } |
956 | |
957 | amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher); |
958 | crypto4xx_ctx_init(amcc_alg, ctx); |
959 | return 0; |
960 | } |
961 | |
962 | static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx) |
963 | { |
964 | crypto4xx_free_sa(ctx); |
965 | } |
966 | |
967 | static void crypto4xx_sk_exit(struct crypto_skcipher *sk) |
968 | { |
969 | struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(tfm: sk); |
970 | |
971 | crypto4xx_common_exit(ctx); |
972 | if (ctx->sw_cipher.cipher) |
973 | crypto_free_sync_skcipher(tfm: ctx->sw_cipher.cipher); |
974 | } |
975 | |
976 | static int crypto4xx_aead_init(struct crypto_aead *tfm) |
977 | { |
978 | struct aead_alg *alg = crypto_aead_alg(tfm); |
979 | struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm); |
980 | struct crypto4xx_alg *amcc_alg; |
981 | |
982 | ctx->sw_cipher.aead = crypto_alloc_aead(alg_name: alg->base.cra_name, type: 0, |
983 | CRYPTO_ALG_NEED_FALLBACK | |
984 | CRYPTO_ALG_ASYNC); |
985 | if (IS_ERR(ptr: ctx->sw_cipher.aead)) |
986 | return PTR_ERR(ptr: ctx->sw_cipher.aead); |
987 | |
988 | amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead); |
989 | crypto4xx_ctx_init(amcc_alg, ctx); |
990 | crypto_aead_set_reqsize(aead: tfm, max(sizeof(struct aead_request) + 32 + |
991 | crypto_aead_reqsize(ctx->sw_cipher.aead), |
992 | sizeof(struct crypto4xx_aead_reqctx))); |
993 | return 0; |
994 | } |
995 | |
996 | static void crypto4xx_aead_exit(struct crypto_aead *tfm) |
997 | { |
998 | struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm); |
999 | |
1000 | crypto4xx_common_exit(ctx); |
1001 | crypto_free_aead(tfm: ctx->sw_cipher.aead); |
1002 | } |
1003 | |
1004 | static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, |
1005 | struct crypto4xx_alg_common *crypto_alg, |
1006 | int array_size) |
1007 | { |
1008 | struct crypto4xx_alg *alg; |
1009 | int i; |
1010 | int rc = 0; |
1011 | |
1012 | for (i = 0; i < array_size; i++) { |
1013 | alg = kzalloc(size: sizeof(struct crypto4xx_alg), GFP_KERNEL); |
1014 | if (!alg) |
1015 | return -ENOMEM; |
1016 | |
1017 | alg->alg = crypto_alg[i]; |
1018 | alg->dev = sec_dev; |
1019 | |
1020 | switch (alg->alg.type) { |
1021 | case CRYPTO_ALG_TYPE_AEAD: |
1022 | rc = crypto_register_aead(alg: &alg->alg.u.aead); |
1023 | break; |
1024 | |
1025 | case CRYPTO_ALG_TYPE_AHASH: |
1026 | rc = crypto_register_ahash(alg: &alg->alg.u.hash); |
1027 | break; |
1028 | |
1029 | case CRYPTO_ALG_TYPE_RNG: |
1030 | rc = crypto_register_rng(alg: &alg->alg.u.rng); |
1031 | break; |
1032 | |
1033 | default: |
1034 | rc = crypto_register_skcipher(alg: &alg->alg.u.cipher); |
1035 | break; |
1036 | } |
1037 | |
1038 | if (rc) |
1039 | kfree(objp: alg); |
1040 | else |
1041 | list_add_tail(new: &alg->entry, head: &sec_dev->alg_list); |
1042 | } |
1043 | |
1044 | return 0; |
1045 | } |
1046 | |
1047 | static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) |
1048 | { |
1049 | struct crypto4xx_alg *alg, *tmp; |
1050 | |
1051 | list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { |
1052 | list_del(entry: &alg->entry); |
1053 | switch (alg->alg.type) { |
1054 | case CRYPTO_ALG_TYPE_AHASH: |
1055 | crypto_unregister_ahash(alg: &alg->alg.u.hash); |
1056 | break; |
1057 | |
1058 | case CRYPTO_ALG_TYPE_AEAD: |
1059 | crypto_unregister_aead(alg: &alg->alg.u.aead); |
1060 | break; |
1061 | |
1062 | case CRYPTO_ALG_TYPE_RNG: |
1063 | crypto_unregister_rng(alg: &alg->alg.u.rng); |
1064 | break; |
1065 | |
1066 | default: |
1067 | crypto_unregister_skcipher(alg: &alg->alg.u.cipher); |
1068 | } |
1069 | kfree(objp: alg); |
1070 | } |
1071 | } |
1072 | |
1073 | static void crypto4xx_bh_tasklet_cb(unsigned long data) |
1074 | { |
1075 | struct device *dev = (struct device *)data; |
1076 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); |
1077 | struct pd_uinfo *pd_uinfo; |
1078 | struct ce_pd *pd; |
1079 | u32 tail = core_dev->dev->pdr_tail; |
1080 | u32 head = core_dev->dev->pdr_head; |
1081 | |
1082 | do { |
1083 | pd_uinfo = &core_dev->dev->pdr_uinfo[tail]; |
1084 | pd = &core_dev->dev->pdr[tail]; |
1085 | if ((pd_uinfo->state & PD_ENTRY_INUSE) && |
1086 | ((READ_ONCE(pd->pd_ctl.w) & |
1087 | (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) == |
1088 | PD_CTL_PE_DONE)) { |
1089 | crypto4xx_pd_done(dev: core_dev->dev, idx: tail); |
1090 | tail = crypto4xx_put_pd_to_pdr(dev: core_dev->dev, idx: tail); |
1091 | } else { |
1092 | /* if tail not done, break */ |
1093 | break; |
1094 | } |
1095 | } while (head != tail); |
1096 | } |
1097 | |
1098 | /* |
1099 | * Top Half of isr. |
1100 | */ |
1101 | static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data, |
1102 | u32 clr_val) |
1103 | { |
1104 | struct device *dev = data; |
1105 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); |
1106 | |
1107 | writel(val: clr_val, addr: core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); |
1108 | tasklet_schedule(t: &core_dev->tasklet); |
1109 | |
1110 | return IRQ_HANDLED; |
1111 | } |
1112 | |
1113 | static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) |
1114 | { |
1115 | return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR); |
1116 | } |
1117 | |
1118 | static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data) |
1119 | { |
1120 | return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR | |
1121 | PPC4XX_TMO_ERR_INT); |
1122 | } |
1123 | |
1124 | static int ppc4xx_prng_data_read(struct crypto4xx_device *dev, |
1125 | u8 *data, unsigned int max) |
1126 | { |
1127 | unsigned int i, curr = 0; |
1128 | u32 val[2]; |
1129 | |
1130 | do { |
1131 | /* trigger PRN generation */ |
1132 | writel(PPC4XX_PRNG_CTRL_AUTO_EN, |
1133 | addr: dev->ce_base + CRYPTO4XX_PRNG_CTRL); |
1134 | |
1135 | for (i = 0; i < 1024; i++) { |
1136 | /* usually 19 iterations are enough */ |
1137 | if ((readl(addr: dev->ce_base + CRYPTO4XX_PRNG_STAT) & |
1138 | CRYPTO4XX_PRNG_STAT_BUSY)) |
1139 | continue; |
1140 | |
1141 | val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0); |
1142 | val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1); |
1143 | break; |
1144 | } |
1145 | if (i == 1024) |
1146 | return -ETIMEDOUT; |
1147 | |
1148 | if ((max - curr) >= 8) { |
1149 | memcpy(data, &val, 8); |
1150 | data += 8; |
1151 | curr += 8; |
1152 | } else { |
1153 | /* copy only remaining bytes */ |
1154 | memcpy(data, &val, max - curr); |
1155 | break; |
1156 | } |
1157 | } while (curr < max); |
1158 | |
1159 | return curr; |
1160 | } |
1161 | |
1162 | static int crypto4xx_prng_generate(struct crypto_rng *tfm, |
1163 | const u8 *src, unsigned int slen, |
1164 | u8 *dstn, unsigned int dlen) |
1165 | { |
1166 | struct rng_alg *alg = crypto_rng_alg(tfm); |
1167 | struct crypto4xx_alg *amcc_alg; |
1168 | struct crypto4xx_device *dev; |
1169 | int ret; |
1170 | |
1171 | amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng); |
1172 | dev = amcc_alg->dev; |
1173 | |
1174 | mutex_lock(&dev->core_dev->rng_lock); |
1175 | ret = ppc4xx_prng_data_read(dev, data: dstn, max: dlen); |
1176 | mutex_unlock(lock: &dev->core_dev->rng_lock); |
1177 | return ret; |
1178 | } |
1179 | |
1180 | |
1181 | static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed, |
1182 | unsigned int slen) |
1183 | { |
1184 | return 0; |
1185 | } |
1186 | |
1187 | /* |
1188 | * Supported Crypto Algorithms |
1189 | */ |
1190 | static struct crypto4xx_alg_common crypto4xx_alg[] = { |
1191 | /* Crypto AES modes */ |
1192 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { |
1193 | .base = { |
1194 | .cra_name = "cbc(aes)" , |
1195 | .cra_driver_name = "cbc-aes-ppc4xx" , |
1196 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1197 | .cra_flags = CRYPTO_ALG_ASYNC | |
1198 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1199 | .cra_blocksize = AES_BLOCK_SIZE, |
1200 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1201 | .cra_module = THIS_MODULE, |
1202 | }, |
1203 | .min_keysize = AES_MIN_KEY_SIZE, |
1204 | .max_keysize = AES_MAX_KEY_SIZE, |
1205 | .ivsize = AES_IV_SIZE, |
1206 | .setkey = crypto4xx_setkey_aes_cbc, |
1207 | .encrypt = crypto4xx_encrypt_iv_block, |
1208 | .decrypt = crypto4xx_decrypt_iv_block, |
1209 | .init = crypto4xx_sk_init, |
1210 | .exit = crypto4xx_sk_exit, |
1211 | } }, |
1212 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { |
1213 | .base = { |
1214 | .cra_name = "ctr(aes)" , |
1215 | .cra_driver_name = "ctr-aes-ppc4xx" , |
1216 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1217 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
1218 | CRYPTO_ALG_ASYNC | |
1219 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1220 | .cra_blocksize = 1, |
1221 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1222 | .cra_module = THIS_MODULE, |
1223 | }, |
1224 | .min_keysize = AES_MIN_KEY_SIZE, |
1225 | .max_keysize = AES_MAX_KEY_SIZE, |
1226 | .ivsize = AES_IV_SIZE, |
1227 | .setkey = crypto4xx_setkey_aes_ctr, |
1228 | .encrypt = crypto4xx_encrypt_ctr, |
1229 | .decrypt = crypto4xx_decrypt_ctr, |
1230 | .init = crypto4xx_sk_init, |
1231 | .exit = crypto4xx_sk_exit, |
1232 | } }, |
1233 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { |
1234 | .base = { |
1235 | .cra_name = "rfc3686(ctr(aes))" , |
1236 | .cra_driver_name = "rfc3686-ctr-aes-ppc4xx" , |
1237 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1238 | .cra_flags = CRYPTO_ALG_ASYNC | |
1239 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1240 | .cra_blocksize = 1, |
1241 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1242 | .cra_module = THIS_MODULE, |
1243 | }, |
1244 | .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, |
1245 | .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, |
1246 | .ivsize = CTR_RFC3686_IV_SIZE, |
1247 | .setkey = crypto4xx_setkey_rfc3686, |
1248 | .encrypt = crypto4xx_rfc3686_encrypt, |
1249 | .decrypt = crypto4xx_rfc3686_decrypt, |
1250 | .init = crypto4xx_sk_init, |
1251 | .exit = crypto4xx_sk_exit, |
1252 | } }, |
1253 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = { |
1254 | .base = { |
1255 | .cra_name = "ecb(aes)" , |
1256 | .cra_driver_name = "ecb-aes-ppc4xx" , |
1257 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1258 | .cra_flags = CRYPTO_ALG_ASYNC | |
1259 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1260 | .cra_blocksize = AES_BLOCK_SIZE, |
1261 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1262 | .cra_module = THIS_MODULE, |
1263 | }, |
1264 | .min_keysize = AES_MIN_KEY_SIZE, |
1265 | .max_keysize = AES_MAX_KEY_SIZE, |
1266 | .setkey = crypto4xx_setkey_aes_ecb, |
1267 | .encrypt = crypto4xx_encrypt_noiv_block, |
1268 | .decrypt = crypto4xx_decrypt_noiv_block, |
1269 | .init = crypto4xx_sk_init, |
1270 | .exit = crypto4xx_sk_exit, |
1271 | } }, |
1272 | |
1273 | /* AEAD */ |
1274 | { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = { |
1275 | .setkey = crypto4xx_setkey_aes_ccm, |
1276 | .setauthsize = crypto4xx_setauthsize_aead, |
1277 | .encrypt = crypto4xx_encrypt_aes_ccm, |
1278 | .decrypt = crypto4xx_decrypt_aes_ccm, |
1279 | .init = crypto4xx_aead_init, |
1280 | .exit = crypto4xx_aead_exit, |
1281 | .ivsize = AES_BLOCK_SIZE, |
1282 | .maxauthsize = 16, |
1283 | .base = { |
1284 | .cra_name = "ccm(aes)" , |
1285 | .cra_driver_name = "ccm-aes-ppc4xx" , |
1286 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1287 | .cra_flags = CRYPTO_ALG_ASYNC | |
1288 | CRYPTO_ALG_NEED_FALLBACK | |
1289 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1290 | .cra_blocksize = 1, |
1291 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1292 | .cra_module = THIS_MODULE, |
1293 | }, |
1294 | } }, |
1295 | { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = { |
1296 | .setkey = crypto4xx_setkey_aes_gcm, |
1297 | .setauthsize = crypto4xx_setauthsize_aead, |
1298 | .encrypt = crypto4xx_encrypt_aes_gcm, |
1299 | .decrypt = crypto4xx_decrypt_aes_gcm, |
1300 | .init = crypto4xx_aead_init, |
1301 | .exit = crypto4xx_aead_exit, |
1302 | .ivsize = GCM_AES_IV_SIZE, |
1303 | .maxauthsize = 16, |
1304 | .base = { |
1305 | .cra_name = "gcm(aes)" , |
1306 | .cra_driver_name = "gcm-aes-ppc4xx" , |
1307 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1308 | .cra_flags = CRYPTO_ALG_ASYNC | |
1309 | CRYPTO_ALG_NEED_FALLBACK | |
1310 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1311 | .cra_blocksize = 1, |
1312 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1313 | .cra_module = THIS_MODULE, |
1314 | }, |
1315 | } }, |
1316 | { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = { |
1317 | .base = { |
1318 | .cra_name = "stdrng" , |
1319 | .cra_driver_name = "crypto4xx_rng" , |
1320 | .cra_priority = 300, |
1321 | .cra_ctxsize = 0, |
1322 | .cra_module = THIS_MODULE, |
1323 | }, |
1324 | .generate = crypto4xx_prng_generate, |
1325 | .seed = crypto4xx_prng_seed, |
1326 | .seedsize = 0, |
1327 | } }, |
1328 | }; |
1329 | |
1330 | /* |
1331 | * Module Initialization Routine |
1332 | */ |
1333 | static int crypto4xx_probe(struct platform_device *ofdev) |
1334 | { |
1335 | int rc; |
1336 | struct resource res; |
1337 | struct device *dev = &ofdev->dev; |
1338 | struct crypto4xx_core_device *core_dev; |
1339 | struct device_node *np; |
1340 | u32 pvr; |
1341 | bool is_revb = true; |
1342 | |
1343 | rc = of_address_to_resource(dev: ofdev->dev.of_node, index: 0, r: &res); |
1344 | if (rc) |
1345 | return -ENODEV; |
1346 | |
1347 | np = of_find_compatible_node(NULL, NULL, compat: "amcc,ppc460ex-crypto" ); |
1348 | if (np) { |
1349 | mtdcri(SDR0, PPC460EX_SDR0_SRST, |
1350 | mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET); |
1351 | mtdcri(SDR0, PPC460EX_SDR0_SRST, |
1352 | mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET); |
1353 | } else { |
1354 | np = of_find_compatible_node(NULL, NULL, compat: "amcc,ppc405ex-crypto" ); |
1355 | if (np) { |
1356 | mtdcri(SDR0, PPC405EX_SDR0_SRST, |
1357 | mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); |
1358 | mtdcri(SDR0, PPC405EX_SDR0_SRST, |
1359 | mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); |
1360 | is_revb = false; |
1361 | } else { |
1362 | np = of_find_compatible_node(NULL, NULL, compat: "amcc,ppc460sx-crypto" ); |
1363 | if (np) { |
1364 | mtdcri(SDR0, PPC460SX_SDR0_SRST, |
1365 | mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); |
1366 | mtdcri(SDR0, PPC460SX_SDR0_SRST, |
1367 | mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); |
1368 | } else { |
1369 | printk(KERN_ERR "Crypto Function Not supported!\n" ); |
1370 | return -EINVAL; |
1371 | } |
1372 | } |
1373 | } |
1374 | |
1375 | of_node_put(node: np); |
1376 | |
1377 | core_dev = kzalloc(size: sizeof(struct crypto4xx_core_device), GFP_KERNEL); |
1378 | if (!core_dev) |
1379 | return -ENOMEM; |
1380 | |
1381 | dev_set_drvdata(dev, data: core_dev); |
1382 | core_dev->ofdev = ofdev; |
1383 | core_dev->dev = kzalloc(size: sizeof(struct crypto4xx_device), GFP_KERNEL); |
1384 | rc = -ENOMEM; |
1385 | if (!core_dev->dev) |
1386 | goto err_alloc_dev; |
1387 | |
1388 | /* |
1389 | * Older version of 460EX/GT have a hardware bug. |
1390 | * Hence they do not support H/W based security intr coalescing |
1391 | */ |
1392 | pvr = mfspr(SPRN_PVR); |
1393 | if (is_revb && ((pvr >> 4) == 0x130218A)) { |
1394 | u32 min = PVR_MIN(pvr); |
1395 | |
1396 | if (min < 4) { |
1397 | dev_info(dev, "RevA detected - disable interrupt coalescing\n" ); |
1398 | is_revb = false; |
1399 | } |
1400 | } |
1401 | |
1402 | core_dev->dev->core_dev = core_dev; |
1403 | core_dev->dev->is_revb = is_revb; |
1404 | core_dev->device = dev; |
1405 | mutex_init(&core_dev->rng_lock); |
1406 | spin_lock_init(&core_dev->lock); |
1407 | INIT_LIST_HEAD(list: &core_dev->dev->alg_list); |
1408 | ratelimit_default_init(rs: &core_dev->dev->aead_ratelimit); |
1409 | rc = crypto4xx_build_sdr(dev: core_dev->dev); |
1410 | if (rc) |
1411 | goto err_build_sdr; |
1412 | rc = crypto4xx_build_pdr(dev: core_dev->dev); |
1413 | if (rc) |
1414 | goto err_build_sdr; |
1415 | |
1416 | rc = crypto4xx_build_gdr(dev: core_dev->dev); |
1417 | if (rc) |
1418 | goto err_build_sdr; |
1419 | |
1420 | /* Init tasklet for bottom half processing */ |
1421 | tasklet_init(t: &core_dev->tasklet, func: crypto4xx_bh_tasklet_cb, |
1422 | data: (unsigned long) dev); |
1423 | |
1424 | core_dev->dev->ce_base = of_iomap(node: ofdev->dev.of_node, index: 0); |
1425 | if (!core_dev->dev->ce_base) { |
1426 | dev_err(dev, "failed to of_iomap\n" ); |
1427 | rc = -ENOMEM; |
1428 | goto err_iomap; |
1429 | } |
1430 | |
1431 | /* Register for Crypto isr, Crypto Engine IRQ */ |
1432 | core_dev->irq = irq_of_parse_and_map(node: ofdev->dev.of_node, index: 0); |
1433 | rc = request_irq(irq: core_dev->irq, handler: is_revb ? |
1434 | crypto4xx_ce_interrupt_handler_revb : |
1435 | crypto4xx_ce_interrupt_handler, flags: 0, |
1436 | KBUILD_MODNAME, dev); |
1437 | if (rc) |
1438 | goto err_request_irq; |
1439 | |
1440 | /* need to setup pdr, rdr, gdr and sdr before this */ |
1441 | crypto4xx_hw_init(dev: core_dev->dev); |
1442 | |
1443 | /* Register security algorithms with Linux CryptoAPI */ |
1444 | rc = crypto4xx_register_alg(sec_dev: core_dev->dev, crypto_alg: crypto4xx_alg, |
1445 | ARRAY_SIZE(crypto4xx_alg)); |
1446 | if (rc) |
1447 | goto err_start_dev; |
1448 | |
1449 | ppc4xx_trng_probe(dev: core_dev); |
1450 | return 0; |
1451 | |
1452 | err_start_dev: |
1453 | free_irq(core_dev->irq, dev); |
1454 | err_request_irq: |
1455 | irq_dispose_mapping(virq: core_dev->irq); |
1456 | iounmap(addr: core_dev->dev->ce_base); |
1457 | err_iomap: |
1458 | tasklet_kill(t: &core_dev->tasklet); |
1459 | err_build_sdr: |
1460 | crypto4xx_destroy_sdr(dev: core_dev->dev); |
1461 | crypto4xx_destroy_gdr(dev: core_dev->dev); |
1462 | crypto4xx_destroy_pdr(dev: core_dev->dev); |
1463 | kfree(objp: core_dev->dev); |
1464 | err_alloc_dev: |
1465 | kfree(objp: core_dev); |
1466 | |
1467 | return rc; |
1468 | } |
1469 | |
1470 | static void crypto4xx_remove(struct platform_device *ofdev) |
1471 | { |
1472 | struct device *dev = &ofdev->dev; |
1473 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); |
1474 | |
1475 | ppc4xx_trng_remove(dev: core_dev); |
1476 | |
1477 | free_irq(core_dev->irq, dev); |
1478 | irq_dispose_mapping(virq: core_dev->irq); |
1479 | |
1480 | tasklet_kill(t: &core_dev->tasklet); |
1481 | /* Un-register with Linux CryptoAPI */ |
1482 | crypto4xx_unregister_alg(sec_dev: core_dev->dev); |
1483 | mutex_destroy(lock: &core_dev->rng_lock); |
1484 | /* Free all allocated memory */ |
1485 | crypto4xx_stop_all(core_dev); |
1486 | } |
1487 | |
1488 | static const struct of_device_id crypto4xx_match[] = { |
1489 | { .compatible = "amcc,ppc4xx-crypto" ,}, |
1490 | { }, |
1491 | }; |
1492 | MODULE_DEVICE_TABLE(of, crypto4xx_match); |
1493 | |
1494 | static struct platform_driver crypto4xx_driver = { |
1495 | .driver = { |
1496 | .name = KBUILD_MODNAME, |
1497 | .of_match_table = crypto4xx_match, |
1498 | }, |
1499 | .probe = crypto4xx_probe, |
1500 | .remove_new = crypto4xx_remove, |
1501 | }; |
1502 | |
1503 | module_platform_driver(crypto4xx_driver); |
1504 | |
1505 | MODULE_LICENSE("GPL" ); |
1506 | MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>" ); |
1507 | MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator" ); |
1508 | |