1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Freescale i.MX23/i.MX28 Data Co-Processor driver |
4 | * |
5 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> |
6 | */ |
7 | |
8 | #include <linux/dma-mapping.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/io.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/kthread.h> |
13 | #include <linux/module.h> |
14 | #include <linux/of.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/stmp_device.h> |
17 | #include <linux/clk.h> |
18 | |
19 | #include <crypto/aes.h> |
20 | #include <crypto/sha1.h> |
21 | #include <crypto/sha2.h> |
22 | #include <crypto/internal/hash.h> |
23 | #include <crypto/internal/skcipher.h> |
24 | #include <crypto/scatterwalk.h> |
25 | |
26 | #define DCP_MAX_CHANS 4 |
27 | #define DCP_BUF_SZ PAGE_SIZE |
28 | #define DCP_SHA_PAY_SZ 64 |
29 | |
30 | #define DCP_ALIGNMENT 64 |
31 | |
32 | /* |
33 | * Null hashes to align with hw behavior on imx6sl and ull |
34 | * these are flipped for consistency with hw output |
35 | */ |
36 | static const uint8_t sha1_null_hash[] = |
37 | "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" |
38 | "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda" ; |
39 | |
40 | static const uint8_t sha256_null_hash[] = |
41 | "\x55\xb8\x52\x78\x1b\x99\x95\xa4" |
42 | "\x4c\x93\x9b\x64\xe4\x41\xae\x27" |
43 | "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" |
44 | "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3" ; |
45 | |
46 | /* DCP DMA descriptor. */ |
47 | struct dcp_dma_desc { |
48 | uint32_t next_cmd_addr; |
49 | uint32_t control0; |
50 | uint32_t control1; |
51 | uint32_t source; |
52 | uint32_t destination; |
53 | uint32_t size; |
54 | uint32_t payload; |
55 | uint32_t status; |
56 | }; |
57 | |
58 | /* Coherent aligned block for bounce buffering. */ |
59 | struct dcp_coherent_block { |
60 | uint8_t aes_in_buf[DCP_BUF_SZ]; |
61 | uint8_t aes_out_buf[DCP_BUF_SZ]; |
62 | uint8_t sha_in_buf[DCP_BUF_SZ]; |
63 | uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; |
64 | |
65 | uint8_t aes_key[2 * AES_KEYSIZE_128]; |
66 | |
67 | struct dcp_dma_desc desc[DCP_MAX_CHANS]; |
68 | }; |
69 | |
70 | struct dcp { |
71 | struct device *dev; |
72 | void __iomem *base; |
73 | |
74 | uint32_t caps; |
75 | |
76 | struct dcp_coherent_block *coh; |
77 | |
78 | struct completion completion[DCP_MAX_CHANS]; |
79 | spinlock_t lock[DCP_MAX_CHANS]; |
80 | struct task_struct *thread[DCP_MAX_CHANS]; |
81 | struct crypto_queue queue[DCP_MAX_CHANS]; |
82 | struct clk *dcp_clk; |
83 | }; |
84 | |
85 | enum dcp_chan { |
86 | DCP_CHAN_HASH_SHA = 0, |
87 | DCP_CHAN_CRYPTO = 2, |
88 | }; |
89 | |
90 | struct dcp_async_ctx { |
91 | /* Common context */ |
92 | enum dcp_chan chan; |
93 | uint32_t fill; |
94 | |
95 | /* SHA Hash-specific context */ |
96 | struct mutex mutex; |
97 | uint32_t alg; |
98 | unsigned int hot:1; |
99 | |
100 | /* Crypto-specific context */ |
101 | struct crypto_skcipher *fallback; |
102 | unsigned int key_len; |
103 | uint8_t key[AES_KEYSIZE_128]; |
104 | }; |
105 | |
106 | struct dcp_aes_req_ctx { |
107 | unsigned int enc:1; |
108 | unsigned int ecb:1; |
109 | struct skcipher_request fallback_req; // keep at the end |
110 | }; |
111 | |
112 | struct dcp_sha_req_ctx { |
113 | unsigned int init:1; |
114 | unsigned int fini:1; |
115 | }; |
116 | |
117 | struct dcp_export_state { |
118 | struct dcp_sha_req_ctx req_ctx; |
119 | struct dcp_async_ctx async_ctx; |
120 | }; |
121 | |
122 | /* |
123 | * There can even be only one instance of the MXS DCP due to the |
124 | * design of Linux Crypto API. |
125 | */ |
126 | static struct dcp *global_sdcp; |
127 | |
128 | /* DCP register layout. */ |
129 | #define MXS_DCP_CTRL 0x00 |
130 | #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23) |
131 | #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22) |
132 | |
133 | #define MXS_DCP_STAT 0x10 |
134 | #define MXS_DCP_STAT_CLR 0x18 |
135 | #define MXS_DCP_STAT_IRQ_MASK 0xf |
136 | |
137 | #define MXS_DCP_CHANNELCTRL 0x20 |
138 | #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff |
139 | |
140 | #define MXS_DCP_CAPABILITY1 0x40 |
141 | #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16) |
142 | #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16) |
143 | #define MXS_DCP_CAPABILITY1_AES128 (1 << 0) |
144 | |
145 | #define MXS_DCP_CONTEXT 0x50 |
146 | |
147 | #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) |
148 | |
149 | #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) |
150 | |
151 | #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) |
152 | #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) |
153 | |
154 | /* DMA descriptor bits. */ |
155 | #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) |
156 | #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) |
157 | #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) |
158 | #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) |
159 | #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) |
160 | #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) |
161 | #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5) |
162 | #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1) |
163 | #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0) |
164 | |
165 | #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16) |
166 | #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16) |
167 | #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4) |
168 | #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) |
169 | #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) |
170 | |
171 | static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) |
172 | { |
173 | int dma_err; |
174 | struct dcp *sdcp = global_sdcp; |
175 | const int chan = actx->chan; |
176 | uint32_t stat; |
177 | unsigned long ret; |
178 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
179 | dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), |
180 | DMA_TO_DEVICE); |
181 | |
182 | dma_err = dma_mapping_error(dev: sdcp->dev, dma_addr: desc_phys); |
183 | if (dma_err) |
184 | return dma_err; |
185 | |
186 | reinit_completion(x: &sdcp->completion[chan]); |
187 | |
188 | /* Clear status register. */ |
189 | writel(val: 0xffffffff, addr: sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); |
190 | |
191 | /* Load the DMA descriptor. */ |
192 | writel(val: desc_phys, addr: sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); |
193 | |
194 | /* Increment the semaphore to start the DMA transfer. */ |
195 | writel(val: 1, addr: sdcp->base + MXS_DCP_CH_N_SEMA(chan)); |
196 | |
197 | ret = wait_for_completion_timeout(x: &sdcp->completion[chan], |
198 | timeout: msecs_to_jiffies(m: 1000)); |
199 | if (!ret) { |
200 | dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n" , |
201 | chan, readl(sdcp->base + MXS_DCP_STAT)); |
202 | return -ETIMEDOUT; |
203 | } |
204 | |
205 | stat = readl(addr: sdcp->base + MXS_DCP_CH_N_STAT(chan)); |
206 | if (stat & 0xff) { |
207 | dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n" , |
208 | chan, stat); |
209 | return -EINVAL; |
210 | } |
211 | |
212 | dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); |
213 | |
214 | return 0; |
215 | } |
216 | |
217 | /* |
218 | * Encryption (AES128) |
219 | */ |
220 | static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, |
221 | struct skcipher_request *req, int init) |
222 | { |
223 | dma_addr_t key_phys, src_phys, dst_phys; |
224 | struct dcp *sdcp = global_sdcp; |
225 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
226 | struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
227 | int ret; |
228 | |
229 | key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, |
230 | 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); |
231 | ret = dma_mapping_error(dev: sdcp->dev, dma_addr: key_phys); |
232 | if (ret) |
233 | return ret; |
234 | |
235 | src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, |
236 | DCP_BUF_SZ, DMA_TO_DEVICE); |
237 | ret = dma_mapping_error(dev: sdcp->dev, dma_addr: src_phys); |
238 | if (ret) |
239 | goto err_src; |
240 | |
241 | dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, |
242 | DCP_BUF_SZ, DMA_FROM_DEVICE); |
243 | ret = dma_mapping_error(dev: sdcp->dev, dma_addr: dst_phys); |
244 | if (ret) |
245 | goto err_dst; |
246 | |
247 | if (actx->fill % AES_BLOCK_SIZE) { |
248 | dev_err(sdcp->dev, "Invalid block size!\n" ); |
249 | ret = -EINVAL; |
250 | goto aes_done_run; |
251 | } |
252 | |
253 | /* Fill in the DMA descriptor. */ |
254 | desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | |
255 | MXS_DCP_CONTROL0_INTERRUPT | |
256 | MXS_DCP_CONTROL0_ENABLE_CIPHER; |
257 | |
258 | /* Payload contains the key. */ |
259 | desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; |
260 | |
261 | if (rctx->enc) |
262 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; |
263 | if (init) |
264 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; |
265 | |
266 | desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; |
267 | |
268 | if (rctx->ecb) |
269 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; |
270 | else |
271 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; |
272 | |
273 | desc->next_cmd_addr = 0; |
274 | desc->source = src_phys; |
275 | desc->destination = dst_phys; |
276 | desc->size = actx->fill; |
277 | desc->payload = key_phys; |
278 | desc->status = 0; |
279 | |
280 | ret = mxs_dcp_start_dma(actx); |
281 | |
282 | aes_done_run: |
283 | dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); |
284 | err_dst: |
285 | dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); |
286 | err_src: |
287 | dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, |
288 | DMA_TO_DEVICE); |
289 | |
290 | return ret; |
291 | } |
292 | |
293 | static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) |
294 | { |
295 | struct dcp *sdcp = global_sdcp; |
296 | |
297 | struct skcipher_request *req = skcipher_request_cast(req: arq); |
298 | struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm: arq->tfm); |
299 | struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
300 | |
301 | struct scatterlist *dst = req->dst; |
302 | struct scatterlist *src = req->src; |
303 | int dst_nents = sg_nents(sg: dst); |
304 | |
305 | const int out_off = DCP_BUF_SZ; |
306 | uint8_t *in_buf = sdcp->coh->aes_in_buf; |
307 | uint8_t *out_buf = sdcp->coh->aes_out_buf; |
308 | |
309 | uint32_t dst_off = 0; |
310 | uint8_t *src_buf = NULL; |
311 | uint32_t last_out_len = 0; |
312 | |
313 | uint8_t *key = sdcp->coh->aes_key; |
314 | |
315 | int ret = 0; |
316 | unsigned int i, len, clen, tlen = 0; |
317 | int init = 0; |
318 | bool limit_hit = false; |
319 | |
320 | actx->fill = 0; |
321 | |
322 | /* Copy the key from the temporary location. */ |
323 | memcpy(key, actx->key, actx->key_len); |
324 | |
325 | if (!rctx->ecb) { |
326 | /* Copy the CBC IV just past the key. */ |
327 | memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); |
328 | /* CBC needs the INIT set. */ |
329 | init = 1; |
330 | } else { |
331 | memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); |
332 | } |
333 | |
334 | for_each_sg(req->src, src, sg_nents(req->src), i) { |
335 | src_buf = sg_virt(sg: src); |
336 | len = sg_dma_len(src); |
337 | tlen += len; |
338 | limit_hit = tlen > req->cryptlen; |
339 | |
340 | if (limit_hit) |
341 | len = req->cryptlen - (tlen - len); |
342 | |
343 | do { |
344 | if (actx->fill + len > out_off) |
345 | clen = out_off - actx->fill; |
346 | else |
347 | clen = len; |
348 | |
349 | memcpy(in_buf + actx->fill, src_buf, clen); |
350 | len -= clen; |
351 | src_buf += clen; |
352 | actx->fill += clen; |
353 | |
354 | /* |
355 | * If we filled the buffer or this is the last SG, |
356 | * submit the buffer. |
357 | */ |
358 | if (actx->fill == out_off || sg_is_last(sg: src) || |
359 | limit_hit) { |
360 | ret = mxs_dcp_run_aes(actx, req, init); |
361 | if (ret) |
362 | return ret; |
363 | init = 0; |
364 | |
365 | sg_pcopy_from_buffer(sgl: dst, nents: dst_nents, buf: out_buf, |
366 | buflen: actx->fill, skip: dst_off); |
367 | dst_off += actx->fill; |
368 | last_out_len = actx->fill; |
369 | actx->fill = 0; |
370 | } |
371 | } while (len); |
372 | |
373 | if (limit_hit) |
374 | break; |
375 | } |
376 | |
377 | /* Copy the IV for CBC for chaining */ |
378 | if (!rctx->ecb) { |
379 | if (rctx->enc) |
380 | memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), |
381 | AES_BLOCK_SIZE); |
382 | else |
383 | memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), |
384 | AES_BLOCK_SIZE); |
385 | } |
386 | |
387 | return ret; |
388 | } |
389 | |
390 | static int dcp_chan_thread_aes(void *data) |
391 | { |
392 | struct dcp *sdcp = global_sdcp; |
393 | const int chan = DCP_CHAN_CRYPTO; |
394 | |
395 | struct crypto_async_request *backlog; |
396 | struct crypto_async_request *arq; |
397 | |
398 | int ret; |
399 | |
400 | while (!kthread_should_stop()) { |
401 | set_current_state(TASK_INTERRUPTIBLE); |
402 | |
403 | spin_lock(lock: &sdcp->lock[chan]); |
404 | backlog = crypto_get_backlog(queue: &sdcp->queue[chan]); |
405 | arq = crypto_dequeue_request(queue: &sdcp->queue[chan]); |
406 | spin_unlock(lock: &sdcp->lock[chan]); |
407 | |
408 | if (!backlog && !arq) { |
409 | schedule(); |
410 | continue; |
411 | } |
412 | |
413 | set_current_state(TASK_RUNNING); |
414 | |
415 | if (backlog) |
416 | crypto_request_complete(req: backlog, err: -EINPROGRESS); |
417 | |
418 | if (arq) { |
419 | ret = mxs_dcp_aes_block_crypt(arq); |
420 | crypto_request_complete(req: arq, err: ret); |
421 | } |
422 | } |
423 | |
424 | return 0; |
425 | } |
426 | |
427 | static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) |
428 | { |
429 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
430 | struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
431 | struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); |
432 | int ret; |
433 | |
434 | skcipher_request_set_tfm(req: &rctx->fallback_req, tfm: ctx->fallback); |
435 | skcipher_request_set_callback(req: &rctx->fallback_req, flags: req->base.flags, |
436 | compl: req->base.complete, data: req->base.data); |
437 | skcipher_request_set_crypt(req: &rctx->fallback_req, src: req->src, dst: req->dst, |
438 | cryptlen: req->cryptlen, iv: req->iv); |
439 | |
440 | if (enc) |
441 | ret = crypto_skcipher_encrypt(req: &rctx->fallback_req); |
442 | else |
443 | ret = crypto_skcipher_decrypt(req: &rctx->fallback_req); |
444 | |
445 | return ret; |
446 | } |
447 | |
448 | static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) |
449 | { |
450 | struct dcp *sdcp = global_sdcp; |
451 | struct crypto_async_request *arq = &req->base; |
452 | struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm: arq->tfm); |
453 | struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
454 | int ret; |
455 | |
456 | if (unlikely(actx->key_len != AES_KEYSIZE_128)) |
457 | return mxs_dcp_block_fallback(req, enc); |
458 | |
459 | rctx->enc = enc; |
460 | rctx->ecb = ecb; |
461 | actx->chan = DCP_CHAN_CRYPTO; |
462 | |
463 | spin_lock(lock: &sdcp->lock[actx->chan]); |
464 | ret = crypto_enqueue_request(queue: &sdcp->queue[actx->chan], request: &req->base); |
465 | spin_unlock(lock: &sdcp->lock[actx->chan]); |
466 | |
467 | wake_up_process(tsk: sdcp->thread[actx->chan]); |
468 | |
469 | return ret; |
470 | } |
471 | |
472 | static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) |
473 | { |
474 | return mxs_dcp_aes_enqueue(req, enc: 0, ecb: 1); |
475 | } |
476 | |
477 | static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) |
478 | { |
479 | return mxs_dcp_aes_enqueue(req, enc: 1, ecb: 1); |
480 | } |
481 | |
482 | static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) |
483 | { |
484 | return mxs_dcp_aes_enqueue(req, enc: 0, ecb: 0); |
485 | } |
486 | |
487 | static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) |
488 | { |
489 | return mxs_dcp_aes_enqueue(req, enc: 1, ecb: 0); |
490 | } |
491 | |
492 | static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, |
493 | unsigned int len) |
494 | { |
495 | struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); |
496 | |
497 | /* |
498 | * AES 128 is supposed by the hardware, store key into temporary |
499 | * buffer and exit. We must use the temporary buffer here, since |
500 | * there can still be an operation in progress. |
501 | */ |
502 | actx->key_len = len; |
503 | if (len == AES_KEYSIZE_128) { |
504 | memcpy(actx->key, key, len); |
505 | return 0; |
506 | } |
507 | |
508 | /* |
509 | * If the requested AES key size is not supported by the hardware, |
510 | * but is supported by in-kernel software implementation, we use |
511 | * software fallback. |
512 | */ |
513 | crypto_skcipher_clear_flags(tfm: actx->fallback, CRYPTO_TFM_REQ_MASK); |
514 | crypto_skcipher_set_flags(tfm: actx->fallback, |
515 | flags: tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); |
516 | return crypto_skcipher_setkey(tfm: actx->fallback, key, keylen: len); |
517 | } |
518 | |
519 | static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) |
520 | { |
521 | const char *name = crypto_tfm_alg_name(tfm: crypto_skcipher_tfm(tfm)); |
522 | struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); |
523 | struct crypto_skcipher *blk; |
524 | |
525 | blk = crypto_alloc_skcipher(alg_name: name, type: 0, CRYPTO_ALG_NEED_FALLBACK); |
526 | if (IS_ERR(ptr: blk)) |
527 | return PTR_ERR(ptr: blk); |
528 | |
529 | actx->fallback = blk; |
530 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: sizeof(struct dcp_aes_req_ctx) + |
531 | crypto_skcipher_reqsize(tfm: blk)); |
532 | return 0; |
533 | } |
534 | |
535 | static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) |
536 | { |
537 | struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); |
538 | |
539 | crypto_free_skcipher(tfm: actx->fallback); |
540 | } |
541 | |
542 | /* |
543 | * Hashing (SHA1/SHA256) |
544 | */ |
545 | static int mxs_dcp_run_sha(struct ahash_request *req) |
546 | { |
547 | struct dcp *sdcp = global_sdcp; |
548 | int ret; |
549 | |
550 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
551 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); |
552 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); |
553 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
554 | |
555 | dma_addr_t digest_phys = 0; |
556 | dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, |
557 | DCP_BUF_SZ, DMA_TO_DEVICE); |
558 | |
559 | ret = dma_mapping_error(dev: sdcp->dev, dma_addr: buf_phys); |
560 | if (ret) |
561 | return ret; |
562 | |
563 | /* Fill in the DMA descriptor. */ |
564 | desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | |
565 | MXS_DCP_CONTROL0_INTERRUPT | |
566 | MXS_DCP_CONTROL0_ENABLE_HASH; |
567 | if (rctx->init) |
568 | desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; |
569 | |
570 | desc->control1 = actx->alg; |
571 | desc->next_cmd_addr = 0; |
572 | desc->source = buf_phys; |
573 | desc->destination = 0; |
574 | desc->size = actx->fill; |
575 | desc->payload = 0; |
576 | desc->status = 0; |
577 | |
578 | /* |
579 | * Align driver with hw behavior when generating null hashes |
580 | */ |
581 | if (rctx->init && rctx->fini && desc->size == 0) { |
582 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); |
583 | const uint8_t *sha_buf = |
584 | (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? |
585 | sha1_null_hash : sha256_null_hash; |
586 | memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); |
587 | ret = 0; |
588 | goto done_run; |
589 | } |
590 | |
591 | /* Set HASH_TERM bit for last transfer block. */ |
592 | if (rctx->fini) { |
593 | digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, |
594 | DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); |
595 | ret = dma_mapping_error(dev: sdcp->dev, dma_addr: digest_phys); |
596 | if (ret) |
597 | goto done_run; |
598 | |
599 | desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; |
600 | desc->payload = digest_phys; |
601 | } |
602 | |
603 | ret = mxs_dcp_start_dma(actx); |
604 | |
605 | if (rctx->fini) |
606 | dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, |
607 | DMA_FROM_DEVICE); |
608 | |
609 | done_run: |
610 | dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); |
611 | |
612 | return ret; |
613 | } |
614 | |
615 | static int dcp_sha_req_to_buf(struct crypto_async_request *arq) |
616 | { |
617 | struct dcp *sdcp = global_sdcp; |
618 | |
619 | struct ahash_request *req = ahash_request_cast(req: arq); |
620 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
621 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); |
622 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); |
623 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); |
624 | |
625 | uint8_t *in_buf = sdcp->coh->sha_in_buf; |
626 | uint8_t *out_buf = sdcp->coh->sha_out_buf; |
627 | |
628 | struct scatterlist *src; |
629 | |
630 | unsigned int i, len, clen, oft = 0; |
631 | int ret; |
632 | |
633 | int fin = rctx->fini; |
634 | if (fin) |
635 | rctx->fini = 0; |
636 | |
637 | src = req->src; |
638 | len = req->nbytes; |
639 | |
640 | while (len) { |
641 | if (actx->fill + len > DCP_BUF_SZ) |
642 | clen = DCP_BUF_SZ - actx->fill; |
643 | else |
644 | clen = len; |
645 | |
646 | scatterwalk_map_and_copy(buf: in_buf + actx->fill, sg: src, start: oft, nbytes: clen, |
647 | out: 0); |
648 | |
649 | len -= clen; |
650 | oft += clen; |
651 | actx->fill += clen; |
652 | |
653 | /* |
654 | * If we filled the buffer and still have some |
655 | * more data, submit the buffer. |
656 | */ |
657 | if (len && actx->fill == DCP_BUF_SZ) { |
658 | ret = mxs_dcp_run_sha(req); |
659 | if (ret) |
660 | return ret; |
661 | actx->fill = 0; |
662 | rctx->init = 0; |
663 | } |
664 | } |
665 | |
666 | if (fin) { |
667 | rctx->fini = 1; |
668 | |
669 | /* Submit whatever is left. */ |
670 | if (!req->result) |
671 | return -EINVAL; |
672 | |
673 | ret = mxs_dcp_run_sha(req); |
674 | if (ret) |
675 | return ret; |
676 | |
677 | actx->fill = 0; |
678 | |
679 | /* For some reason the result is flipped */ |
680 | for (i = 0; i < halg->digestsize; i++) |
681 | req->result[i] = out_buf[halg->digestsize - i - 1]; |
682 | } |
683 | |
684 | return 0; |
685 | } |
686 | |
687 | static int dcp_chan_thread_sha(void *data) |
688 | { |
689 | struct dcp *sdcp = global_sdcp; |
690 | const int chan = DCP_CHAN_HASH_SHA; |
691 | |
692 | struct crypto_async_request *backlog; |
693 | struct crypto_async_request *arq; |
694 | int ret; |
695 | |
696 | while (!kthread_should_stop()) { |
697 | set_current_state(TASK_INTERRUPTIBLE); |
698 | |
699 | spin_lock(lock: &sdcp->lock[chan]); |
700 | backlog = crypto_get_backlog(queue: &sdcp->queue[chan]); |
701 | arq = crypto_dequeue_request(queue: &sdcp->queue[chan]); |
702 | spin_unlock(lock: &sdcp->lock[chan]); |
703 | |
704 | if (!backlog && !arq) { |
705 | schedule(); |
706 | continue; |
707 | } |
708 | |
709 | set_current_state(TASK_RUNNING); |
710 | |
711 | if (backlog) |
712 | crypto_request_complete(req: backlog, err: -EINPROGRESS); |
713 | |
714 | if (arq) { |
715 | ret = dcp_sha_req_to_buf(arq); |
716 | crypto_request_complete(req: arq, err: ret); |
717 | } |
718 | } |
719 | |
720 | return 0; |
721 | } |
722 | |
723 | static int dcp_sha_init(struct ahash_request *req) |
724 | { |
725 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
726 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); |
727 | |
728 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); |
729 | |
730 | /* |
731 | * Start hashing session. The code below only inits the |
732 | * hashing session context, nothing more. |
733 | */ |
734 | memset(actx, 0, sizeof(*actx)); |
735 | |
736 | if (strcmp(halg->base.cra_name, "sha1" ) == 0) |
737 | actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; |
738 | else |
739 | actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; |
740 | |
741 | actx->fill = 0; |
742 | actx->hot = 0; |
743 | actx->chan = DCP_CHAN_HASH_SHA; |
744 | |
745 | mutex_init(&actx->mutex); |
746 | |
747 | return 0; |
748 | } |
749 | |
750 | static int dcp_sha_update_fx(struct ahash_request *req, int fini) |
751 | { |
752 | struct dcp *sdcp = global_sdcp; |
753 | |
754 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); |
755 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
756 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); |
757 | |
758 | int ret; |
759 | |
760 | /* |
761 | * Ignore requests that have no data in them and are not |
762 | * the trailing requests in the stream of requests. |
763 | */ |
764 | if (!req->nbytes && !fini) |
765 | return 0; |
766 | |
767 | mutex_lock(&actx->mutex); |
768 | |
769 | rctx->fini = fini; |
770 | |
771 | if (!actx->hot) { |
772 | actx->hot = 1; |
773 | rctx->init = 1; |
774 | } |
775 | |
776 | spin_lock(lock: &sdcp->lock[actx->chan]); |
777 | ret = crypto_enqueue_request(queue: &sdcp->queue[actx->chan], request: &req->base); |
778 | spin_unlock(lock: &sdcp->lock[actx->chan]); |
779 | |
780 | wake_up_process(tsk: sdcp->thread[actx->chan]); |
781 | mutex_unlock(lock: &actx->mutex); |
782 | |
783 | return ret; |
784 | } |
785 | |
786 | static int dcp_sha_update(struct ahash_request *req) |
787 | { |
788 | return dcp_sha_update_fx(req, fini: 0); |
789 | } |
790 | |
791 | static int dcp_sha_final(struct ahash_request *req) |
792 | { |
793 | ahash_request_set_crypt(req, NULL, result: req->result, nbytes: 0); |
794 | req->nbytes = 0; |
795 | return dcp_sha_update_fx(req, fini: 1); |
796 | } |
797 | |
798 | static int dcp_sha_finup(struct ahash_request *req) |
799 | { |
800 | return dcp_sha_update_fx(req, fini: 1); |
801 | } |
802 | |
803 | static int dcp_sha_digest(struct ahash_request *req) |
804 | { |
805 | int ret; |
806 | |
807 | ret = dcp_sha_init(req); |
808 | if (ret) |
809 | return ret; |
810 | |
811 | return dcp_sha_finup(req); |
812 | } |
813 | |
814 | static int dcp_sha_import(struct ahash_request *req, const void *in) |
815 | { |
816 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); |
817 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
818 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); |
819 | const struct dcp_export_state *export = in; |
820 | |
821 | memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); |
822 | memset(actx, 0, sizeof(struct dcp_async_ctx)); |
823 | memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); |
824 | memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); |
825 | |
826 | return 0; |
827 | } |
828 | |
829 | static int dcp_sha_export(struct ahash_request *req, void *out) |
830 | { |
831 | struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); |
832 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
833 | struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); |
834 | struct dcp_export_state *export = out; |
835 | |
836 | memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); |
837 | memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); |
838 | |
839 | return 0; |
840 | } |
841 | |
842 | static int dcp_sha_cra_init(struct crypto_tfm *tfm) |
843 | { |
844 | crypto_ahash_set_reqsize(tfm: __crypto_ahash_cast(tfm), |
845 | reqsize: sizeof(struct dcp_sha_req_ctx)); |
846 | return 0; |
847 | } |
848 | |
849 | static void dcp_sha_cra_exit(struct crypto_tfm *tfm) |
850 | { |
851 | } |
852 | |
853 | /* AES 128 ECB and AES 128 CBC */ |
854 | static struct skcipher_alg dcp_aes_algs[] = { |
855 | { |
856 | .base.cra_name = "ecb(aes)" , |
857 | .base.cra_driver_name = "ecb-aes-dcp" , |
858 | .base.cra_priority = 400, |
859 | .base.cra_alignmask = 15, |
860 | .base.cra_flags = CRYPTO_ALG_ASYNC | |
861 | CRYPTO_ALG_NEED_FALLBACK, |
862 | .base.cra_blocksize = AES_BLOCK_SIZE, |
863 | .base.cra_ctxsize = sizeof(struct dcp_async_ctx), |
864 | .base.cra_module = THIS_MODULE, |
865 | |
866 | .min_keysize = AES_MIN_KEY_SIZE, |
867 | .max_keysize = AES_MAX_KEY_SIZE, |
868 | .setkey = mxs_dcp_aes_setkey, |
869 | .encrypt = mxs_dcp_aes_ecb_encrypt, |
870 | .decrypt = mxs_dcp_aes_ecb_decrypt, |
871 | .init = mxs_dcp_aes_fallback_init_tfm, |
872 | .exit = mxs_dcp_aes_fallback_exit_tfm, |
873 | }, { |
874 | .base.cra_name = "cbc(aes)" , |
875 | .base.cra_driver_name = "cbc-aes-dcp" , |
876 | .base.cra_priority = 400, |
877 | .base.cra_alignmask = 15, |
878 | .base.cra_flags = CRYPTO_ALG_ASYNC | |
879 | CRYPTO_ALG_NEED_FALLBACK, |
880 | .base.cra_blocksize = AES_BLOCK_SIZE, |
881 | .base.cra_ctxsize = sizeof(struct dcp_async_ctx), |
882 | .base.cra_module = THIS_MODULE, |
883 | |
884 | .min_keysize = AES_MIN_KEY_SIZE, |
885 | .max_keysize = AES_MAX_KEY_SIZE, |
886 | .setkey = mxs_dcp_aes_setkey, |
887 | .encrypt = mxs_dcp_aes_cbc_encrypt, |
888 | .decrypt = mxs_dcp_aes_cbc_decrypt, |
889 | .ivsize = AES_BLOCK_SIZE, |
890 | .init = mxs_dcp_aes_fallback_init_tfm, |
891 | .exit = mxs_dcp_aes_fallback_exit_tfm, |
892 | }, |
893 | }; |
894 | |
895 | /* SHA1 */ |
896 | static struct ahash_alg dcp_sha1_alg = { |
897 | .init = dcp_sha_init, |
898 | .update = dcp_sha_update, |
899 | .final = dcp_sha_final, |
900 | .finup = dcp_sha_finup, |
901 | .digest = dcp_sha_digest, |
902 | .import = dcp_sha_import, |
903 | .export = dcp_sha_export, |
904 | .halg = { |
905 | .digestsize = SHA1_DIGEST_SIZE, |
906 | .statesize = sizeof(struct dcp_export_state), |
907 | .base = { |
908 | .cra_name = "sha1" , |
909 | .cra_driver_name = "sha1-dcp" , |
910 | .cra_priority = 400, |
911 | .cra_flags = CRYPTO_ALG_ASYNC, |
912 | .cra_blocksize = SHA1_BLOCK_SIZE, |
913 | .cra_ctxsize = sizeof(struct dcp_async_ctx), |
914 | .cra_module = THIS_MODULE, |
915 | .cra_init = dcp_sha_cra_init, |
916 | .cra_exit = dcp_sha_cra_exit, |
917 | }, |
918 | }, |
919 | }; |
920 | |
921 | /* SHA256 */ |
922 | static struct ahash_alg dcp_sha256_alg = { |
923 | .init = dcp_sha_init, |
924 | .update = dcp_sha_update, |
925 | .final = dcp_sha_final, |
926 | .finup = dcp_sha_finup, |
927 | .digest = dcp_sha_digest, |
928 | .import = dcp_sha_import, |
929 | .export = dcp_sha_export, |
930 | .halg = { |
931 | .digestsize = SHA256_DIGEST_SIZE, |
932 | .statesize = sizeof(struct dcp_export_state), |
933 | .base = { |
934 | .cra_name = "sha256" , |
935 | .cra_driver_name = "sha256-dcp" , |
936 | .cra_priority = 400, |
937 | .cra_flags = CRYPTO_ALG_ASYNC, |
938 | .cra_blocksize = SHA256_BLOCK_SIZE, |
939 | .cra_ctxsize = sizeof(struct dcp_async_ctx), |
940 | .cra_module = THIS_MODULE, |
941 | .cra_init = dcp_sha_cra_init, |
942 | .cra_exit = dcp_sha_cra_exit, |
943 | }, |
944 | }, |
945 | }; |
946 | |
947 | static irqreturn_t mxs_dcp_irq(int irq, void *context) |
948 | { |
949 | struct dcp *sdcp = context; |
950 | uint32_t stat; |
951 | int i; |
952 | |
953 | stat = readl(addr: sdcp->base + MXS_DCP_STAT); |
954 | stat &= MXS_DCP_STAT_IRQ_MASK; |
955 | if (!stat) |
956 | return IRQ_NONE; |
957 | |
958 | /* Clear the interrupts. */ |
959 | writel(val: stat, addr: sdcp->base + MXS_DCP_STAT_CLR); |
960 | |
961 | /* Complete the DMA requests that finished. */ |
962 | for (i = 0; i < DCP_MAX_CHANS; i++) |
963 | if (stat & (1 << i)) |
964 | complete(&sdcp->completion[i]); |
965 | |
966 | return IRQ_HANDLED; |
967 | } |
968 | |
969 | static int mxs_dcp_probe(struct platform_device *pdev) |
970 | { |
971 | struct device *dev = &pdev->dev; |
972 | struct dcp *sdcp = NULL; |
973 | int i, ret; |
974 | int dcp_vmi_irq, dcp_irq; |
975 | |
976 | if (global_sdcp) { |
977 | dev_err(dev, "Only one DCP instance allowed!\n" ); |
978 | return -ENODEV; |
979 | } |
980 | |
981 | dcp_vmi_irq = platform_get_irq(pdev, 0); |
982 | if (dcp_vmi_irq < 0) |
983 | return dcp_vmi_irq; |
984 | |
985 | dcp_irq = platform_get_irq(pdev, 1); |
986 | if (dcp_irq < 0) |
987 | return dcp_irq; |
988 | |
989 | sdcp = devm_kzalloc(dev, size: sizeof(*sdcp), GFP_KERNEL); |
990 | if (!sdcp) |
991 | return -ENOMEM; |
992 | |
993 | sdcp->dev = dev; |
994 | sdcp->base = devm_platform_ioremap_resource(pdev, index: 0); |
995 | if (IS_ERR(ptr: sdcp->base)) |
996 | return PTR_ERR(ptr: sdcp->base); |
997 | |
998 | |
999 | ret = devm_request_irq(dev, irq: dcp_vmi_irq, handler: mxs_dcp_irq, irqflags: 0, |
1000 | devname: "dcp-vmi-irq" , dev_id: sdcp); |
1001 | if (ret) { |
1002 | dev_err(dev, "Failed to claim DCP VMI IRQ!\n" ); |
1003 | return ret; |
1004 | } |
1005 | |
1006 | ret = devm_request_irq(dev, irq: dcp_irq, handler: mxs_dcp_irq, irqflags: 0, |
1007 | devname: "dcp-irq" , dev_id: sdcp); |
1008 | if (ret) { |
1009 | dev_err(dev, "Failed to claim DCP IRQ!\n" ); |
1010 | return ret; |
1011 | } |
1012 | |
1013 | /* Allocate coherent helper block. */ |
1014 | sdcp->coh = devm_kzalloc(dev, size: sizeof(*sdcp->coh) + DCP_ALIGNMENT, |
1015 | GFP_KERNEL); |
1016 | if (!sdcp->coh) |
1017 | return -ENOMEM; |
1018 | |
1019 | /* Re-align the structure so it fits the DCP constraints. */ |
1020 | sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); |
1021 | |
1022 | /* DCP clock is optional, only used on some SOCs */ |
1023 | sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, id: "dcp" ); |
1024 | if (IS_ERR(ptr: sdcp->dcp_clk)) |
1025 | return PTR_ERR(ptr: sdcp->dcp_clk); |
1026 | |
1027 | /* Restart the DCP block. */ |
1028 | ret = stmp_reset_block(sdcp->base); |
1029 | if (ret) { |
1030 | dev_err(dev, "Failed reset\n" ); |
1031 | return ret; |
1032 | } |
1033 | |
1034 | /* Initialize control register. */ |
1035 | writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | |
1036 | MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf, |
1037 | addr: sdcp->base + MXS_DCP_CTRL); |
1038 | |
1039 | /* Enable all DCP DMA channels. */ |
1040 | writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK, |
1041 | addr: sdcp->base + MXS_DCP_CHANNELCTRL); |
1042 | |
1043 | /* |
1044 | * We do not enable context switching. Give the context buffer a |
1045 | * pointer to an illegal address so if context switching is |
1046 | * inadvertantly enabled, the DCP will return an error instead of |
1047 | * trashing good memory. The DCP DMA cannot access ROM, so any ROM |
1048 | * address will do. |
1049 | */ |
1050 | writel(val: 0xffff0000, addr: sdcp->base + MXS_DCP_CONTEXT); |
1051 | for (i = 0; i < DCP_MAX_CHANS; i++) |
1052 | writel(val: 0xffffffff, addr: sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); |
1053 | writel(val: 0xffffffff, addr: sdcp->base + MXS_DCP_STAT_CLR); |
1054 | |
1055 | global_sdcp = sdcp; |
1056 | |
1057 | platform_set_drvdata(pdev, data: sdcp); |
1058 | |
1059 | for (i = 0; i < DCP_MAX_CHANS; i++) { |
1060 | spin_lock_init(&sdcp->lock[i]); |
1061 | init_completion(x: &sdcp->completion[i]); |
1062 | crypto_init_queue(queue: &sdcp->queue[i], max_qlen: 50); |
1063 | } |
1064 | |
1065 | /* Create the SHA and AES handler threads. */ |
1066 | sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, |
1067 | NULL, "mxs_dcp_chan/sha" ); |
1068 | if (IS_ERR(ptr: sdcp->thread[DCP_CHAN_HASH_SHA])) { |
1069 | dev_err(dev, "Error starting SHA thread!\n" ); |
1070 | ret = PTR_ERR(ptr: sdcp->thread[DCP_CHAN_HASH_SHA]); |
1071 | return ret; |
1072 | } |
1073 | |
1074 | sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, |
1075 | NULL, "mxs_dcp_chan/aes" ); |
1076 | if (IS_ERR(ptr: sdcp->thread[DCP_CHAN_CRYPTO])) { |
1077 | dev_err(dev, "Error starting SHA thread!\n" ); |
1078 | ret = PTR_ERR(ptr: sdcp->thread[DCP_CHAN_CRYPTO]); |
1079 | goto err_destroy_sha_thread; |
1080 | } |
1081 | |
1082 | /* Register the various crypto algorithms. */ |
1083 | sdcp->caps = readl(addr: sdcp->base + MXS_DCP_CAPABILITY1); |
1084 | |
1085 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { |
1086 | ret = crypto_register_skciphers(algs: dcp_aes_algs, |
1087 | ARRAY_SIZE(dcp_aes_algs)); |
1088 | if (ret) { |
1089 | /* Failed to register algorithm. */ |
1090 | dev_err(dev, "Failed to register AES crypto!\n" ); |
1091 | goto err_destroy_aes_thread; |
1092 | } |
1093 | } |
1094 | |
1095 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { |
1096 | ret = crypto_register_ahash(alg: &dcp_sha1_alg); |
1097 | if (ret) { |
1098 | dev_err(dev, "Failed to register %s hash!\n" , |
1099 | dcp_sha1_alg.halg.base.cra_name); |
1100 | goto err_unregister_aes; |
1101 | } |
1102 | } |
1103 | |
1104 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { |
1105 | ret = crypto_register_ahash(alg: &dcp_sha256_alg); |
1106 | if (ret) { |
1107 | dev_err(dev, "Failed to register %s hash!\n" , |
1108 | dcp_sha256_alg.halg.base.cra_name); |
1109 | goto err_unregister_sha1; |
1110 | } |
1111 | } |
1112 | |
1113 | return 0; |
1114 | |
1115 | err_unregister_sha1: |
1116 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) |
1117 | crypto_unregister_ahash(alg: &dcp_sha1_alg); |
1118 | |
1119 | err_unregister_aes: |
1120 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) |
1121 | crypto_unregister_skciphers(algs: dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); |
1122 | |
1123 | err_destroy_aes_thread: |
1124 | kthread_stop(k: sdcp->thread[DCP_CHAN_CRYPTO]); |
1125 | |
1126 | err_destroy_sha_thread: |
1127 | kthread_stop(k: sdcp->thread[DCP_CHAN_HASH_SHA]); |
1128 | |
1129 | return ret; |
1130 | } |
1131 | |
1132 | static void mxs_dcp_remove(struct platform_device *pdev) |
1133 | { |
1134 | struct dcp *sdcp = platform_get_drvdata(pdev); |
1135 | |
1136 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) |
1137 | crypto_unregister_ahash(alg: &dcp_sha256_alg); |
1138 | |
1139 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) |
1140 | crypto_unregister_ahash(alg: &dcp_sha1_alg); |
1141 | |
1142 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) |
1143 | crypto_unregister_skciphers(algs: dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); |
1144 | |
1145 | kthread_stop(k: sdcp->thread[DCP_CHAN_HASH_SHA]); |
1146 | kthread_stop(k: sdcp->thread[DCP_CHAN_CRYPTO]); |
1147 | |
1148 | platform_set_drvdata(pdev, NULL); |
1149 | |
1150 | global_sdcp = NULL; |
1151 | } |
1152 | |
1153 | static const struct of_device_id mxs_dcp_dt_ids[] = { |
1154 | { .compatible = "fsl,imx23-dcp" , .data = NULL, }, |
1155 | { .compatible = "fsl,imx28-dcp" , .data = NULL, }, |
1156 | { /* sentinel */ } |
1157 | }; |
1158 | |
1159 | MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids); |
1160 | |
1161 | static struct platform_driver mxs_dcp_driver = { |
1162 | .probe = mxs_dcp_probe, |
1163 | .remove_new = mxs_dcp_remove, |
1164 | .driver = { |
1165 | .name = "mxs-dcp" , |
1166 | .of_match_table = mxs_dcp_dt_ids, |
1167 | }, |
1168 | }; |
1169 | |
1170 | module_platform_driver(mxs_dcp_driver); |
1171 | |
1172 | MODULE_AUTHOR("Marek Vasut <marex@denx.de>" ); |
1173 | MODULE_DESCRIPTION("Freescale MXS DCP Driver" ); |
1174 | MODULE_LICENSE("GPL" ); |
1175 | MODULE_ALIAS("platform:mxs-dcp" ); |
1176 | |