1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <crypto/algapi.h> |
7 | #include <crypto/hash.h> |
8 | #include <crypto/md5.h> |
9 | #include <crypto/sm3.h> |
10 | #include <crypto/internal/hash.h> |
11 | |
12 | #include "cc_driver.h" |
13 | #include "cc_request_mgr.h" |
14 | #include "cc_buffer_mgr.h" |
15 | #include "cc_hash.h" |
16 | #include "cc_sram_mgr.h" |
17 | |
18 | #define CC_MAX_HASH_SEQ_LEN 12 |
19 | #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE |
20 | #define CC_SM3_HASH_LEN_SIZE 8 |
21 | |
22 | struct cc_hash_handle { |
23 | u32 digest_len_sram_addr; /* const value in SRAM*/ |
24 | u32 larval_digest_sram_addr; /* const value in SRAM */ |
25 | struct list_head hash_list; |
26 | }; |
27 | |
28 | static const u32 cc_digest_len_init[] = { |
29 | 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; |
30 | static const u32 cc_md5_init[] = { |
31 | SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; |
32 | static const u32 cc_sha1_init[] = { |
33 | SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; |
34 | static const u32 cc_sha224_init[] = { |
35 | SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, |
36 | SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; |
37 | static const u32 cc_sha256_init[] = { |
38 | SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, |
39 | SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; |
40 | static const u32 cc_digest_len_sha512_init[] = { |
41 | 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; |
42 | |
43 | /* |
44 | * Due to the way the HW works, every double word in the SHA384 and SHA512 |
45 | * larval hashes must be stored in hi/lo order |
46 | */ |
47 | #define hilo(x) upper_32_bits(x), lower_32_bits(x) |
48 | static const u32 cc_sha384_init[] = { |
49 | hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4), |
50 | hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) }; |
51 | static const u32 cc_sha512_init[] = { |
52 | hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4), |
53 | hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) }; |
54 | |
55 | static const u32 cc_sm3_init[] = { |
56 | SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, |
57 | SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; |
58 | |
59 | static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], |
60 | unsigned int *seq_size); |
61 | |
62 | static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[], |
63 | unsigned int *seq_size); |
64 | |
65 | static const void *cc_larval_digest(struct device *dev, u32 mode); |
66 | |
67 | struct cc_hash_alg { |
68 | struct list_head entry; |
69 | int hash_mode; |
70 | int hw_mode; |
71 | int inter_digestsize; |
72 | struct cc_drvdata *drvdata; |
73 | struct ahash_alg ahash_alg; |
74 | }; |
75 | |
76 | struct hash_key_req_ctx { |
77 | u32 keylen; |
78 | dma_addr_t key_dma_addr; |
79 | u8 *key; |
80 | }; |
81 | |
82 | /* hash per-session context */ |
83 | struct cc_hash_ctx { |
84 | struct cc_drvdata *drvdata; |
85 | /* holds the origin digest; the digest after "setkey" if HMAC,* |
86 | * the initial digest if HASH. |
87 | */ |
88 | u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned; |
89 | u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned; |
90 | |
91 | dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned; |
92 | dma_addr_t digest_buff_dma_addr; |
93 | /* use for hmac with key large then mode block size */ |
94 | struct hash_key_req_ctx key_params; |
95 | int hash_mode; |
96 | int hw_mode; |
97 | int inter_digestsize; |
98 | unsigned int hash_len; |
99 | struct completion setkey_comp; |
100 | bool is_hmac; |
101 | }; |
102 | |
103 | static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx, |
104 | unsigned int flow_mode, struct cc_hw_desc desc[], |
105 | bool is_not_last_data, unsigned int *seq_size); |
106 | |
107 | static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc) |
108 | { |
109 | if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 || |
110 | mode == DRV_HASH_SHA512) { |
111 | set_bytes_swap(pdesc: desc, config: 1); |
112 | } else { |
113 | set_cipher_config0(pdesc: desc, mode: HASH_DIGEST_RESULT_LITTLE_ENDIAN); |
114 | } |
115 | } |
116 | |
117 | static int cc_map_result(struct device *dev, struct ahash_req_ctx *state, |
118 | unsigned int digestsize) |
119 | { |
120 | state->digest_result_dma_addr = |
121 | dma_map_single(dev, state->digest_result_buff, |
122 | digestsize, DMA_BIDIRECTIONAL); |
123 | if (dma_mapping_error(dev, dma_addr: state->digest_result_dma_addr)) { |
124 | dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n" , |
125 | digestsize); |
126 | return -ENOMEM; |
127 | } |
128 | dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n" , |
129 | digestsize, state->digest_result_buff, |
130 | &state->digest_result_dma_addr); |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | static void cc_init_req(struct device *dev, struct ahash_req_ctx *state, |
136 | struct cc_hash_ctx *ctx) |
137 | { |
138 | bool is_hmac = ctx->is_hmac; |
139 | |
140 | memset(state, 0, sizeof(*state)); |
141 | |
142 | if (is_hmac) { |
143 | if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC && |
144 | ctx->hw_mode != DRV_CIPHER_CMAC) { |
145 | dma_sync_single_for_cpu(dev, addr: ctx->digest_buff_dma_addr, |
146 | size: ctx->inter_digestsize, |
147 | dir: DMA_BIDIRECTIONAL); |
148 | |
149 | memcpy(state->digest_buff, ctx->digest_buff, |
150 | ctx->inter_digestsize); |
151 | if (ctx->hash_mode == DRV_HASH_SHA512 || |
152 | ctx->hash_mode == DRV_HASH_SHA384) |
153 | memcpy(state->digest_bytes_len, |
154 | cc_digest_len_sha512_init, |
155 | ctx->hash_len); |
156 | else |
157 | memcpy(state->digest_bytes_len, |
158 | cc_digest_len_init, |
159 | ctx->hash_len); |
160 | } |
161 | |
162 | if (ctx->hash_mode != DRV_HASH_NULL) { |
163 | dma_sync_single_for_cpu(dev, |
164 | addr: ctx->opad_tmp_keys_dma_addr, |
165 | size: ctx->inter_digestsize, |
166 | dir: DMA_BIDIRECTIONAL); |
167 | memcpy(state->opad_digest_buff, |
168 | ctx->opad_tmp_keys_buff, ctx->inter_digestsize); |
169 | } |
170 | } else { /*hash*/ |
171 | /* Copy the initial digests if hash flow. */ |
172 | const void *larval = cc_larval_digest(dev, mode: ctx->hash_mode); |
173 | |
174 | memcpy(state->digest_buff, larval, ctx->inter_digestsize); |
175 | } |
176 | } |
177 | |
178 | static int cc_map_req(struct device *dev, struct ahash_req_ctx *state, |
179 | struct cc_hash_ctx *ctx) |
180 | { |
181 | bool is_hmac = ctx->is_hmac; |
182 | |
183 | state->digest_buff_dma_addr = |
184 | dma_map_single(dev, state->digest_buff, |
185 | ctx->inter_digestsize, DMA_BIDIRECTIONAL); |
186 | if (dma_mapping_error(dev, dma_addr: state->digest_buff_dma_addr)) { |
187 | dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n" , |
188 | ctx->inter_digestsize, state->digest_buff); |
189 | return -EINVAL; |
190 | } |
191 | dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n" , |
192 | ctx->inter_digestsize, state->digest_buff, |
193 | &state->digest_buff_dma_addr); |
194 | |
195 | if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) { |
196 | state->digest_bytes_len_dma_addr = |
197 | dma_map_single(dev, state->digest_bytes_len, |
198 | HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); |
199 | if (dma_mapping_error(dev, dma_addr: state->digest_bytes_len_dma_addr)) { |
200 | dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n" , |
201 | HASH_MAX_LEN_SIZE, state->digest_bytes_len); |
202 | goto unmap_digest_buf; |
203 | } |
204 | dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n" , |
205 | HASH_MAX_LEN_SIZE, state->digest_bytes_len, |
206 | &state->digest_bytes_len_dma_addr); |
207 | } |
208 | |
209 | if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) { |
210 | state->opad_digest_dma_addr = |
211 | dma_map_single(dev, state->opad_digest_buff, |
212 | ctx->inter_digestsize, |
213 | DMA_BIDIRECTIONAL); |
214 | if (dma_mapping_error(dev, dma_addr: state->opad_digest_dma_addr)) { |
215 | dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n" , |
216 | ctx->inter_digestsize, |
217 | state->opad_digest_buff); |
218 | goto unmap_digest_len; |
219 | } |
220 | dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n" , |
221 | ctx->inter_digestsize, state->opad_digest_buff, |
222 | &state->opad_digest_dma_addr); |
223 | } |
224 | |
225 | return 0; |
226 | |
227 | unmap_digest_len: |
228 | if (state->digest_bytes_len_dma_addr) { |
229 | dma_unmap_single(dev, state->digest_bytes_len_dma_addr, |
230 | HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); |
231 | state->digest_bytes_len_dma_addr = 0; |
232 | } |
233 | unmap_digest_buf: |
234 | if (state->digest_buff_dma_addr) { |
235 | dma_unmap_single(dev, state->digest_buff_dma_addr, |
236 | ctx->inter_digestsize, DMA_BIDIRECTIONAL); |
237 | state->digest_buff_dma_addr = 0; |
238 | } |
239 | |
240 | return -EINVAL; |
241 | } |
242 | |
243 | static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state, |
244 | struct cc_hash_ctx *ctx) |
245 | { |
246 | if (state->digest_buff_dma_addr) { |
247 | dma_unmap_single(dev, state->digest_buff_dma_addr, |
248 | ctx->inter_digestsize, DMA_BIDIRECTIONAL); |
249 | dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n" , |
250 | &state->digest_buff_dma_addr); |
251 | state->digest_buff_dma_addr = 0; |
252 | } |
253 | if (state->digest_bytes_len_dma_addr) { |
254 | dma_unmap_single(dev, state->digest_bytes_len_dma_addr, |
255 | HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); |
256 | dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n" , |
257 | &state->digest_bytes_len_dma_addr); |
258 | state->digest_bytes_len_dma_addr = 0; |
259 | } |
260 | if (state->opad_digest_dma_addr) { |
261 | dma_unmap_single(dev, state->opad_digest_dma_addr, |
262 | ctx->inter_digestsize, DMA_BIDIRECTIONAL); |
263 | dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n" , |
264 | &state->opad_digest_dma_addr); |
265 | state->opad_digest_dma_addr = 0; |
266 | } |
267 | } |
268 | |
269 | static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state, |
270 | unsigned int digestsize, u8 *result) |
271 | { |
272 | if (state->digest_result_dma_addr) { |
273 | dma_unmap_single(dev, state->digest_result_dma_addr, digestsize, |
274 | DMA_BIDIRECTIONAL); |
275 | dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n" , |
276 | state->digest_result_buff, |
277 | &state->digest_result_dma_addr, digestsize); |
278 | memcpy(result, state->digest_result_buff, digestsize); |
279 | } |
280 | state->digest_result_dma_addr = 0; |
281 | } |
282 | |
283 | static void cc_update_complete(struct device *dev, void *cc_req, int err) |
284 | { |
285 | struct ahash_request *req = (struct ahash_request *)cc_req; |
286 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
287 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
288 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
289 | |
290 | dev_dbg(dev, "req=%pK\n" , req); |
291 | |
292 | if (err != -EINPROGRESS) { |
293 | /* Not a BACKLOG notification */ |
294 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: false); |
295 | cc_unmap_req(dev, state, ctx); |
296 | } |
297 | |
298 | ahash_request_complete(req, err); |
299 | } |
300 | |
301 | static void cc_digest_complete(struct device *dev, void *cc_req, int err) |
302 | { |
303 | struct ahash_request *req = (struct ahash_request *)cc_req; |
304 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
305 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
306 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
307 | u32 digestsize = crypto_ahash_digestsize(tfm); |
308 | |
309 | dev_dbg(dev, "req=%pK\n" , req); |
310 | |
311 | if (err != -EINPROGRESS) { |
312 | /* Not a BACKLOG notification */ |
313 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: false); |
314 | cc_unmap_result(dev, state, digestsize, result: req->result); |
315 | cc_unmap_req(dev, state, ctx); |
316 | } |
317 | |
318 | ahash_request_complete(req, err); |
319 | } |
320 | |
321 | static void cc_hash_complete(struct device *dev, void *cc_req, int err) |
322 | { |
323 | struct ahash_request *req = (struct ahash_request *)cc_req; |
324 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
325 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
326 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
327 | u32 digestsize = crypto_ahash_digestsize(tfm); |
328 | |
329 | dev_dbg(dev, "req=%pK\n" , req); |
330 | |
331 | if (err != -EINPROGRESS) { |
332 | /* Not a BACKLOG notification */ |
333 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: false); |
334 | cc_unmap_result(dev, state, digestsize, result: req->result); |
335 | cc_unmap_req(dev, state, ctx); |
336 | } |
337 | |
338 | ahash_request_complete(req, err); |
339 | } |
340 | |
341 | static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, |
342 | int idx) |
343 | { |
344 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
345 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
346 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
347 | u32 digestsize = crypto_ahash_digestsize(tfm); |
348 | |
349 | /* Get final MAC result */ |
350 | hw_desc_init(pdesc: &desc[idx]); |
351 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
352 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_result_dma_addr, size: digestsize, |
353 | NS_BIT, last_ind: 1); |
354 | set_queue_last_ind(drvdata: ctx->drvdata, pdesc: &desc[idx]); |
355 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
356 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
357 | set_cipher_config1(pdesc: &desc[idx], config: HASH_PADDING_DISABLED); |
358 | cc_set_endianity(mode: ctx->hash_mode, desc: &desc[idx]); |
359 | idx++; |
360 | |
361 | return idx; |
362 | } |
363 | |
364 | static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req, |
365 | int idx) |
366 | { |
367 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
368 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
369 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
370 | u32 digestsize = crypto_ahash_digestsize(tfm); |
371 | |
372 | /* store the hash digest result in the context */ |
373 | hw_desc_init(pdesc: &desc[idx]); |
374 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
375 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_buff_dma_addr, size: digestsize, |
376 | NS_BIT, last_ind: 0); |
377 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
378 | cc_set_endianity(mode: ctx->hash_mode, desc: &desc[idx]); |
379 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
380 | idx++; |
381 | |
382 | /* Loading hash opad xor key state */ |
383 | hw_desc_init(pdesc: &desc[idx]); |
384 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
385 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->opad_digest_dma_addr, |
386 | size: ctx->inter_digestsize, NS_BIT); |
387 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
388 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE0); |
389 | idx++; |
390 | |
391 | /* Load the hash current length */ |
392 | hw_desc_init(pdesc: &desc[idx]); |
393 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
394 | set_din_sram(pdesc: &desc[idx], |
395 | addr: cc_digest_len_addr(drvdata: ctx->drvdata, mode: ctx->hash_mode), |
396 | size: ctx->hash_len); |
397 | set_cipher_config1(pdesc: &desc[idx], config: HASH_PADDING_ENABLED); |
398 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
399 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
400 | idx++; |
401 | |
402 | /* Memory Barrier: wait for IPAD/OPAD axi write to complete */ |
403 | hw_desc_init(pdesc: &desc[idx]); |
404 | set_din_no_dma(pdesc: &desc[idx], addr: 0, size: 0xfffff0); |
405 | set_dout_no_dma(pdesc: &desc[idx], addr: 0, size: 0, write_enable: 1); |
406 | idx++; |
407 | |
408 | /* Perform HASH update */ |
409 | hw_desc_init(pdesc: &desc[idx]); |
410 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->digest_buff_dma_addr, |
411 | size: digestsize, NS_BIT); |
412 | set_flow_mode(pdesc: &desc[idx], mode: DIN_HASH); |
413 | idx++; |
414 | |
415 | return idx; |
416 | } |
417 | |
418 | static int cc_hash_digest(struct ahash_request *req) |
419 | { |
420 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
421 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
422 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
423 | u32 digestsize = crypto_ahash_digestsize(tfm); |
424 | struct scatterlist *src = req->src; |
425 | unsigned int nbytes = req->nbytes; |
426 | u8 *result = req->result; |
427 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
428 | bool is_hmac = ctx->is_hmac; |
429 | struct cc_crypto_req cc_req = {}; |
430 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
431 | u32 larval_digest_addr; |
432 | int idx = 0; |
433 | int rc = 0; |
434 | gfp_t flags = cc_gfp_flags(req: &req->base); |
435 | |
436 | dev_dbg(dev, "===== %s-digest (%d) ====\n" , is_hmac ? "hmac" : "hash" , |
437 | nbytes); |
438 | |
439 | cc_init_req(dev, state, ctx); |
440 | |
441 | if (cc_map_req(dev, state, ctx)) { |
442 | dev_err(dev, "map_ahash_source() failed\n" ); |
443 | return -ENOMEM; |
444 | } |
445 | |
446 | if (cc_map_result(dev, state, digestsize)) { |
447 | dev_err(dev, "map_ahash_digest() failed\n" ); |
448 | cc_unmap_req(dev, state, ctx); |
449 | return -ENOMEM; |
450 | } |
451 | |
452 | if (cc_map_hash_request_final(drvdata: ctx->drvdata, ctx: state, src, nbytes, do_update: 1, |
453 | flags)) { |
454 | dev_err(dev, "map_ahash_request_final() failed\n" ); |
455 | cc_unmap_result(dev, state, digestsize, result); |
456 | cc_unmap_req(dev, state, ctx); |
457 | return -ENOMEM; |
458 | } |
459 | |
460 | /* Setup request structure */ |
461 | cc_req.user_cb = cc_digest_complete; |
462 | cc_req.user_arg = req; |
463 | |
464 | /* If HMAC then load hash IPAD xor key, if HASH then load initial |
465 | * digest |
466 | */ |
467 | hw_desc_init(pdesc: &desc[idx]); |
468 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
469 | if (is_hmac) { |
470 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->digest_buff_dma_addr, |
471 | size: ctx->inter_digestsize, NS_BIT); |
472 | } else { |
473 | larval_digest_addr = cc_larval_digest_addr(drvdata: ctx->drvdata, |
474 | mode: ctx->hash_mode); |
475 | set_din_sram(pdesc: &desc[idx], addr: larval_digest_addr, |
476 | size: ctx->inter_digestsize); |
477 | } |
478 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
479 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE0); |
480 | idx++; |
481 | |
482 | /* Load the hash current length */ |
483 | hw_desc_init(pdesc: &desc[idx]); |
484 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
485 | |
486 | if (is_hmac) { |
487 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
488 | addr: state->digest_bytes_len_dma_addr, |
489 | size: ctx->hash_len, NS_BIT); |
490 | } else { |
491 | set_din_const(pdesc: &desc[idx], val: 0, size: ctx->hash_len); |
492 | if (nbytes) |
493 | set_cipher_config1(pdesc: &desc[idx], config: HASH_PADDING_ENABLED); |
494 | else |
495 | set_cipher_do(pdesc: &desc[idx], config: DO_PAD); |
496 | } |
497 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
498 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
499 | idx++; |
500 | |
501 | cc_set_desc(areq_ctx: state, ctx, flow_mode: DIN_HASH, desc, is_not_last_data: false, seq_size: &idx); |
502 | |
503 | if (is_hmac) { |
504 | /* HW last hash block padding (aka. "DO_PAD") */ |
505 | hw_desc_init(pdesc: &desc[idx]); |
506 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
507 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_buff_dma_addr, |
508 | size: ctx->hash_len, NS_BIT, last_ind: 0); |
509 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
510 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE1); |
511 | set_cipher_do(pdesc: &desc[idx], config: DO_PAD); |
512 | idx++; |
513 | |
514 | idx = cc_fin_hmac(desc, req, idx); |
515 | } |
516 | |
517 | idx = cc_fin_result(desc, req, idx); |
518 | |
519 | rc = cc_send_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx, req: &req->base); |
520 | if (rc != -EINPROGRESS && rc != -EBUSY) { |
521 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
522 | cc_unmap_hash_request(dev, ctx: state, src, do_revert: true); |
523 | cc_unmap_result(dev, state, digestsize, result); |
524 | cc_unmap_req(dev, state, ctx); |
525 | } |
526 | return rc; |
527 | } |
528 | |
529 | static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx, |
530 | struct ahash_req_ctx *state, unsigned int idx) |
531 | { |
532 | /* Restore hash digest */ |
533 | hw_desc_init(pdesc: &desc[idx]); |
534 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
535 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->digest_buff_dma_addr, |
536 | size: ctx->inter_digestsize, NS_BIT); |
537 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
538 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE0); |
539 | idx++; |
540 | |
541 | /* Restore hash current length */ |
542 | hw_desc_init(pdesc: &desc[idx]); |
543 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
544 | set_cipher_config1(pdesc: &desc[idx], config: HASH_PADDING_DISABLED); |
545 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->digest_bytes_len_dma_addr, |
546 | size: ctx->hash_len, NS_BIT); |
547 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
548 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
549 | idx++; |
550 | |
551 | cc_set_desc(areq_ctx: state, ctx, flow_mode: DIN_HASH, desc, is_not_last_data: false, seq_size: &idx); |
552 | |
553 | return idx; |
554 | } |
555 | |
556 | static int cc_hash_update(struct ahash_request *req) |
557 | { |
558 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
559 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
560 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
561 | unsigned int block_size = crypto_tfm_alg_blocksize(tfm: &tfm->base); |
562 | struct scatterlist *src = req->src; |
563 | unsigned int nbytes = req->nbytes; |
564 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
565 | struct cc_crypto_req cc_req = {}; |
566 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
567 | u32 idx = 0; |
568 | int rc; |
569 | gfp_t flags = cc_gfp_flags(req: &req->base); |
570 | |
571 | dev_dbg(dev, "===== %s-update (%d) ====\n" , ctx->is_hmac ? |
572 | "hmac" : "hash" , nbytes); |
573 | |
574 | if (nbytes == 0) { |
575 | /* no real updates required */ |
576 | return 0; |
577 | } |
578 | |
579 | rc = cc_map_hash_request_update(drvdata: ctx->drvdata, ctx: state, src, nbytes, |
580 | block_size, flags); |
581 | if (rc) { |
582 | if (rc == 1) { |
583 | dev_dbg(dev, " data size not require HW update %x\n" , |
584 | nbytes); |
585 | /* No hardware updates are required */ |
586 | return 0; |
587 | } |
588 | dev_err(dev, "map_ahash_request_update() failed\n" ); |
589 | return -ENOMEM; |
590 | } |
591 | |
592 | if (cc_map_req(dev, state, ctx)) { |
593 | dev_err(dev, "map_ahash_source() failed\n" ); |
594 | cc_unmap_hash_request(dev, ctx: state, src, do_revert: true); |
595 | return -EINVAL; |
596 | } |
597 | |
598 | /* Setup request structure */ |
599 | cc_req.user_cb = cc_update_complete; |
600 | cc_req.user_arg = req; |
601 | |
602 | idx = cc_restore_hash(desc, ctx, state, idx); |
603 | |
604 | /* store the hash digest result in context */ |
605 | hw_desc_init(pdesc: &desc[idx]); |
606 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
607 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_buff_dma_addr, |
608 | size: ctx->inter_digestsize, NS_BIT, last_ind: 0); |
609 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
610 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
611 | idx++; |
612 | |
613 | /* store current hash length in context */ |
614 | hw_desc_init(pdesc: &desc[idx]); |
615 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
616 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_bytes_len_dma_addr, |
617 | size: ctx->hash_len, NS_BIT, last_ind: 1); |
618 | set_queue_last_ind(drvdata: ctx->drvdata, pdesc: &desc[idx]); |
619 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
620 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE1); |
621 | idx++; |
622 | |
623 | rc = cc_send_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx, req: &req->base); |
624 | if (rc != -EINPROGRESS && rc != -EBUSY) { |
625 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
626 | cc_unmap_hash_request(dev, ctx: state, src, do_revert: true); |
627 | cc_unmap_req(dev, state, ctx); |
628 | } |
629 | return rc; |
630 | } |
631 | |
632 | static int cc_do_finup(struct ahash_request *req, bool update) |
633 | { |
634 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
635 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
636 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
637 | u32 digestsize = crypto_ahash_digestsize(tfm); |
638 | struct scatterlist *src = req->src; |
639 | unsigned int nbytes = req->nbytes; |
640 | u8 *result = req->result; |
641 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
642 | bool is_hmac = ctx->is_hmac; |
643 | struct cc_crypto_req cc_req = {}; |
644 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
645 | unsigned int idx = 0; |
646 | int rc; |
647 | gfp_t flags = cc_gfp_flags(req: &req->base); |
648 | |
649 | dev_dbg(dev, "===== %s-%s (%d) ====\n" , is_hmac ? "hmac" : "hash" , |
650 | update ? "finup" : "final" , nbytes); |
651 | |
652 | if (cc_map_req(dev, state, ctx)) { |
653 | dev_err(dev, "map_ahash_source() failed\n" ); |
654 | return -EINVAL; |
655 | } |
656 | |
657 | if (cc_map_hash_request_final(drvdata: ctx->drvdata, ctx: state, src, nbytes, do_update: update, |
658 | flags)) { |
659 | dev_err(dev, "map_ahash_request_final() failed\n" ); |
660 | cc_unmap_req(dev, state, ctx); |
661 | return -ENOMEM; |
662 | } |
663 | if (cc_map_result(dev, state, digestsize)) { |
664 | dev_err(dev, "map_ahash_digest() failed\n" ); |
665 | cc_unmap_hash_request(dev, ctx: state, src, do_revert: true); |
666 | cc_unmap_req(dev, state, ctx); |
667 | return -ENOMEM; |
668 | } |
669 | |
670 | /* Setup request structure */ |
671 | cc_req.user_cb = cc_hash_complete; |
672 | cc_req.user_arg = req; |
673 | |
674 | idx = cc_restore_hash(desc, ctx, state, idx); |
675 | |
676 | /* Pad the hash */ |
677 | hw_desc_init(pdesc: &desc[idx]); |
678 | set_cipher_do(pdesc: &desc[idx], config: DO_PAD); |
679 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: ctx->hw_mode, hash_mode: ctx->hash_mode); |
680 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_bytes_len_dma_addr, |
681 | size: ctx->hash_len, NS_BIT, last_ind: 0); |
682 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE1); |
683 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
684 | idx++; |
685 | |
686 | if (is_hmac) |
687 | idx = cc_fin_hmac(desc, req, idx); |
688 | |
689 | idx = cc_fin_result(desc, req, idx); |
690 | |
691 | rc = cc_send_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx, req: &req->base); |
692 | if (rc != -EINPROGRESS && rc != -EBUSY) { |
693 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
694 | cc_unmap_hash_request(dev, ctx: state, src, do_revert: true); |
695 | cc_unmap_result(dev, state, digestsize, result); |
696 | cc_unmap_req(dev, state, ctx); |
697 | } |
698 | return rc; |
699 | } |
700 | |
701 | static int cc_hash_finup(struct ahash_request *req) |
702 | { |
703 | return cc_do_finup(req, update: true); |
704 | } |
705 | |
706 | |
707 | static int cc_hash_final(struct ahash_request *req) |
708 | { |
709 | return cc_do_finup(req, update: false); |
710 | } |
711 | |
712 | static int cc_hash_init(struct ahash_request *req) |
713 | { |
714 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
715 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
716 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
717 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
718 | |
719 | dev_dbg(dev, "===== init (%d) ====\n" , req->nbytes); |
720 | |
721 | cc_init_req(dev, state, ctx); |
722 | |
723 | return 0; |
724 | } |
725 | |
726 | static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, |
727 | unsigned int keylen) |
728 | { |
729 | unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; |
730 | struct cc_crypto_req cc_req = {}; |
731 | struct cc_hash_ctx *ctx = NULL; |
732 | int blocksize = 0; |
733 | int digestsize = 0; |
734 | int i, idx = 0, rc = 0; |
735 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
736 | u32 larval_addr; |
737 | struct device *dev; |
738 | |
739 | ctx = crypto_ahash_ctx_dma(tfm: ahash); |
740 | dev = drvdata_to_dev(drvdata: ctx->drvdata); |
741 | dev_dbg(dev, "start keylen: %d" , keylen); |
742 | |
743 | blocksize = crypto_tfm_alg_blocksize(tfm: &ahash->base); |
744 | digestsize = crypto_ahash_digestsize(tfm: ahash); |
745 | |
746 | larval_addr = cc_larval_digest_addr(drvdata: ctx->drvdata, mode: ctx->hash_mode); |
747 | |
748 | /* The keylen value distinguishes HASH in case keylen is ZERO bytes, |
749 | * any NON-ZERO value utilizes HMAC flow |
750 | */ |
751 | ctx->key_params.keylen = keylen; |
752 | ctx->key_params.key_dma_addr = 0; |
753 | ctx->is_hmac = true; |
754 | ctx->key_params.key = NULL; |
755 | |
756 | if (keylen) { |
757 | ctx->key_params.key = kmemdup(p: key, size: keylen, GFP_KERNEL); |
758 | if (!ctx->key_params.key) |
759 | return -ENOMEM; |
760 | |
761 | ctx->key_params.key_dma_addr = |
762 | dma_map_single(dev, ctx->key_params.key, keylen, |
763 | DMA_TO_DEVICE); |
764 | if (dma_mapping_error(dev, dma_addr: ctx->key_params.key_dma_addr)) { |
765 | dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n" , |
766 | ctx->key_params.key, keylen); |
767 | kfree_sensitive(objp: ctx->key_params.key); |
768 | return -ENOMEM; |
769 | } |
770 | dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n" , |
771 | &ctx->key_params.key_dma_addr, ctx->key_params.keylen); |
772 | |
773 | if (keylen > blocksize) { |
774 | /* Load hash initial state */ |
775 | hw_desc_init(pdesc: &desc[idx]); |
776 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
777 | set_din_sram(pdesc: &desc[idx], addr: larval_addr, |
778 | size: ctx->inter_digestsize); |
779 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
780 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE0); |
781 | idx++; |
782 | |
783 | /* Load the hash current length*/ |
784 | hw_desc_init(pdesc: &desc[idx]); |
785 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
786 | set_din_const(pdesc: &desc[idx], val: 0, size: ctx->hash_len); |
787 | set_cipher_config1(pdesc: &desc[idx], config: HASH_PADDING_ENABLED); |
788 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
789 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
790 | idx++; |
791 | |
792 | hw_desc_init(pdesc: &desc[idx]); |
793 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
794 | addr: ctx->key_params.key_dma_addr, size: keylen, |
795 | NS_BIT); |
796 | set_flow_mode(pdesc: &desc[idx], mode: DIN_HASH); |
797 | idx++; |
798 | |
799 | /* Get hashed key */ |
800 | hw_desc_init(pdesc: &desc[idx]); |
801 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
802 | set_dout_dlli(pdesc: &desc[idx], addr: ctx->opad_tmp_keys_dma_addr, |
803 | size: digestsize, NS_BIT, last_ind: 0); |
804 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
805 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
806 | set_cipher_config1(pdesc: &desc[idx], config: HASH_PADDING_DISABLED); |
807 | cc_set_endianity(mode: ctx->hash_mode, desc: &desc[idx]); |
808 | idx++; |
809 | |
810 | hw_desc_init(pdesc: &desc[idx]); |
811 | set_din_const(pdesc: &desc[idx], val: 0, size: (blocksize - digestsize)); |
812 | set_flow_mode(pdesc: &desc[idx], mode: BYPASS); |
813 | set_dout_dlli(pdesc: &desc[idx], |
814 | addr: (ctx->opad_tmp_keys_dma_addr + |
815 | digestsize), |
816 | size: (blocksize - digestsize), NS_BIT, last_ind: 0); |
817 | idx++; |
818 | } else { |
819 | hw_desc_init(pdesc: &desc[idx]); |
820 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
821 | addr: ctx->key_params.key_dma_addr, size: keylen, |
822 | NS_BIT); |
823 | set_flow_mode(pdesc: &desc[idx], mode: BYPASS); |
824 | set_dout_dlli(pdesc: &desc[idx], addr: ctx->opad_tmp_keys_dma_addr, |
825 | size: keylen, NS_BIT, last_ind: 0); |
826 | idx++; |
827 | |
828 | if ((blocksize - keylen)) { |
829 | hw_desc_init(pdesc: &desc[idx]); |
830 | set_din_const(pdesc: &desc[idx], val: 0, |
831 | size: (blocksize - keylen)); |
832 | set_flow_mode(pdesc: &desc[idx], mode: BYPASS); |
833 | set_dout_dlli(pdesc: &desc[idx], |
834 | addr: (ctx->opad_tmp_keys_dma_addr + |
835 | keylen), size: (blocksize - keylen), |
836 | NS_BIT, last_ind: 0); |
837 | idx++; |
838 | } |
839 | } |
840 | } else { |
841 | hw_desc_init(pdesc: &desc[idx]); |
842 | set_din_const(pdesc: &desc[idx], val: 0, size: blocksize); |
843 | set_flow_mode(pdesc: &desc[idx], mode: BYPASS); |
844 | set_dout_dlli(pdesc: &desc[idx], addr: (ctx->opad_tmp_keys_dma_addr), |
845 | size: blocksize, NS_BIT, last_ind: 0); |
846 | idx++; |
847 | } |
848 | |
849 | rc = cc_send_sync_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx); |
850 | if (rc) { |
851 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
852 | goto out; |
853 | } |
854 | |
855 | /* calc derived HMAC key */ |
856 | for (idx = 0, i = 0; i < 2; i++) { |
857 | /* Load hash initial state */ |
858 | hw_desc_init(pdesc: &desc[idx]); |
859 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
860 | set_din_sram(pdesc: &desc[idx], addr: larval_addr, size: ctx->inter_digestsize); |
861 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
862 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE0); |
863 | idx++; |
864 | |
865 | /* Load the hash current length*/ |
866 | hw_desc_init(pdesc: &desc[idx]); |
867 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
868 | set_din_const(pdesc: &desc[idx], val: 0, size: ctx->hash_len); |
869 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
870 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
871 | idx++; |
872 | |
873 | /* Prepare ipad key */ |
874 | hw_desc_init(pdesc: &desc[idx]); |
875 | set_xor_val(pdesc: &desc[idx], val: hmac_pad_const[i]); |
876 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
877 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_HASH); |
878 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE1); |
879 | idx++; |
880 | |
881 | /* Perform HASH update */ |
882 | hw_desc_init(pdesc: &desc[idx]); |
883 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: ctx->opad_tmp_keys_dma_addr, |
884 | size: blocksize, NS_BIT); |
885 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
886 | set_xor_active(&desc[idx]); |
887 | set_flow_mode(pdesc: &desc[idx], mode: DIN_HASH); |
888 | idx++; |
889 | |
890 | /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest |
891 | * of the first HASH "update" state) |
892 | */ |
893 | hw_desc_init(pdesc: &desc[idx]); |
894 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
895 | if (i > 0) /* Not first iteration */ |
896 | set_dout_dlli(pdesc: &desc[idx], addr: ctx->opad_tmp_keys_dma_addr, |
897 | size: ctx->inter_digestsize, NS_BIT, last_ind: 0); |
898 | else /* First iteration */ |
899 | set_dout_dlli(pdesc: &desc[idx], addr: ctx->digest_buff_dma_addr, |
900 | size: ctx->inter_digestsize, NS_BIT, last_ind: 0); |
901 | set_flow_mode(pdesc: &desc[idx], mode: S_HASH_to_DOUT); |
902 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
903 | idx++; |
904 | } |
905 | |
906 | rc = cc_send_sync_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx); |
907 | |
908 | out: |
909 | if (ctx->key_params.key_dma_addr) { |
910 | dma_unmap_single(dev, ctx->key_params.key_dma_addr, |
911 | ctx->key_params.keylen, DMA_TO_DEVICE); |
912 | dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n" , |
913 | &ctx->key_params.key_dma_addr, ctx->key_params.keylen); |
914 | } |
915 | |
916 | kfree_sensitive(objp: ctx->key_params.key); |
917 | |
918 | return rc; |
919 | } |
920 | |
921 | static int cc_xcbc_setkey(struct crypto_ahash *ahash, |
922 | const u8 *key, unsigned int keylen) |
923 | { |
924 | struct cc_crypto_req cc_req = {}; |
925 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
926 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
927 | int rc = 0; |
928 | unsigned int idx = 0; |
929 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
930 | |
931 | dev_dbg(dev, "===== setkey (%d) ====\n" , keylen); |
932 | |
933 | switch (keylen) { |
934 | case AES_KEYSIZE_128: |
935 | case AES_KEYSIZE_192: |
936 | case AES_KEYSIZE_256: |
937 | break; |
938 | default: |
939 | return -EINVAL; |
940 | } |
941 | |
942 | ctx->key_params.keylen = keylen; |
943 | |
944 | ctx->key_params.key = kmemdup(p: key, size: keylen, GFP_KERNEL); |
945 | if (!ctx->key_params.key) |
946 | return -ENOMEM; |
947 | |
948 | ctx->key_params.key_dma_addr = |
949 | dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE); |
950 | if (dma_mapping_error(dev, dma_addr: ctx->key_params.key_dma_addr)) { |
951 | dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n" , |
952 | key, keylen); |
953 | kfree_sensitive(objp: ctx->key_params.key); |
954 | return -ENOMEM; |
955 | } |
956 | dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n" , |
957 | &ctx->key_params.key_dma_addr, ctx->key_params.keylen); |
958 | |
959 | ctx->is_hmac = true; |
960 | /* 1. Load the AES key */ |
961 | hw_desc_init(pdesc: &desc[idx]); |
962 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: ctx->key_params.key_dma_addr, |
963 | size: keylen, NS_BIT); |
964 | set_cipher_mode(pdesc: &desc[idx], mode: DRV_CIPHER_ECB); |
965 | set_cipher_config0(pdesc: &desc[idx], mode: DRV_CRYPTO_DIRECTION_ENCRYPT); |
966 | set_key_size_aes(pdesc: &desc[idx], size: keylen); |
967 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
968 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
969 | idx++; |
970 | |
971 | hw_desc_init(pdesc: &desc[idx]); |
972 | set_din_const(pdesc: &desc[idx], val: 0x01010101, CC_AES_128_BIT_KEY_SIZE); |
973 | set_flow_mode(pdesc: &desc[idx], mode: DIN_AES_DOUT); |
974 | set_dout_dlli(pdesc: &desc[idx], |
975 | addr: (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET), |
976 | CC_AES_128_BIT_KEY_SIZE, NS_BIT, last_ind: 0); |
977 | idx++; |
978 | |
979 | hw_desc_init(pdesc: &desc[idx]); |
980 | set_din_const(pdesc: &desc[idx], val: 0x02020202, CC_AES_128_BIT_KEY_SIZE); |
981 | set_flow_mode(pdesc: &desc[idx], mode: DIN_AES_DOUT); |
982 | set_dout_dlli(pdesc: &desc[idx], |
983 | addr: (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET), |
984 | CC_AES_128_BIT_KEY_SIZE, NS_BIT, last_ind: 0); |
985 | idx++; |
986 | |
987 | hw_desc_init(pdesc: &desc[idx]); |
988 | set_din_const(pdesc: &desc[idx], val: 0x03030303, CC_AES_128_BIT_KEY_SIZE); |
989 | set_flow_mode(pdesc: &desc[idx], mode: DIN_AES_DOUT); |
990 | set_dout_dlli(pdesc: &desc[idx], |
991 | addr: (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET), |
992 | CC_AES_128_BIT_KEY_SIZE, NS_BIT, last_ind: 0); |
993 | idx++; |
994 | |
995 | rc = cc_send_sync_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx); |
996 | |
997 | dma_unmap_single(dev, ctx->key_params.key_dma_addr, |
998 | ctx->key_params.keylen, DMA_TO_DEVICE); |
999 | dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n" , |
1000 | &ctx->key_params.key_dma_addr, ctx->key_params.keylen); |
1001 | |
1002 | kfree_sensitive(objp: ctx->key_params.key); |
1003 | |
1004 | return rc; |
1005 | } |
1006 | |
1007 | static int cc_cmac_setkey(struct crypto_ahash *ahash, |
1008 | const u8 *key, unsigned int keylen) |
1009 | { |
1010 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
1011 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1012 | |
1013 | dev_dbg(dev, "===== setkey (%d) ====\n" , keylen); |
1014 | |
1015 | ctx->is_hmac = true; |
1016 | |
1017 | switch (keylen) { |
1018 | case AES_KEYSIZE_128: |
1019 | case AES_KEYSIZE_192: |
1020 | case AES_KEYSIZE_256: |
1021 | break; |
1022 | default: |
1023 | return -EINVAL; |
1024 | } |
1025 | |
1026 | ctx->key_params.keylen = keylen; |
1027 | |
1028 | /* STAT_PHASE_1: Copy key to ctx */ |
1029 | |
1030 | dma_sync_single_for_cpu(dev, addr: ctx->opad_tmp_keys_dma_addr, |
1031 | size: keylen, dir: DMA_TO_DEVICE); |
1032 | |
1033 | memcpy(ctx->opad_tmp_keys_buff, key, keylen); |
1034 | if (keylen == 24) { |
1035 | memset(ctx->opad_tmp_keys_buff + 24, 0, |
1036 | CC_AES_KEY_SIZE_MAX - 24); |
1037 | } |
1038 | |
1039 | dma_sync_single_for_device(dev, addr: ctx->opad_tmp_keys_dma_addr, |
1040 | size: keylen, dir: DMA_TO_DEVICE); |
1041 | |
1042 | ctx->key_params.keylen = keylen; |
1043 | |
1044 | return 0; |
1045 | } |
1046 | |
1047 | static void cc_free_ctx(struct cc_hash_ctx *ctx) |
1048 | { |
1049 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1050 | |
1051 | if (ctx->digest_buff_dma_addr) { |
1052 | dma_unmap_single(dev, ctx->digest_buff_dma_addr, |
1053 | sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); |
1054 | dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n" , |
1055 | &ctx->digest_buff_dma_addr); |
1056 | ctx->digest_buff_dma_addr = 0; |
1057 | } |
1058 | if (ctx->opad_tmp_keys_dma_addr) { |
1059 | dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr, |
1060 | sizeof(ctx->opad_tmp_keys_buff), |
1061 | DMA_BIDIRECTIONAL); |
1062 | dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n" , |
1063 | &ctx->opad_tmp_keys_dma_addr); |
1064 | ctx->opad_tmp_keys_dma_addr = 0; |
1065 | } |
1066 | |
1067 | ctx->key_params.keylen = 0; |
1068 | } |
1069 | |
1070 | static int cc_alloc_ctx(struct cc_hash_ctx *ctx) |
1071 | { |
1072 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1073 | |
1074 | ctx->key_params.keylen = 0; |
1075 | |
1076 | ctx->digest_buff_dma_addr = |
1077 | dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff), |
1078 | DMA_BIDIRECTIONAL); |
1079 | if (dma_mapping_error(dev, dma_addr: ctx->digest_buff_dma_addr)) { |
1080 | dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n" , |
1081 | sizeof(ctx->digest_buff), ctx->digest_buff); |
1082 | goto fail; |
1083 | } |
1084 | dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n" , |
1085 | sizeof(ctx->digest_buff), ctx->digest_buff, |
1086 | &ctx->digest_buff_dma_addr); |
1087 | |
1088 | ctx->opad_tmp_keys_dma_addr = |
1089 | dma_map_single(dev, ctx->opad_tmp_keys_buff, |
1090 | sizeof(ctx->opad_tmp_keys_buff), |
1091 | DMA_BIDIRECTIONAL); |
1092 | if (dma_mapping_error(dev, dma_addr: ctx->opad_tmp_keys_dma_addr)) { |
1093 | dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n" , |
1094 | sizeof(ctx->opad_tmp_keys_buff), |
1095 | ctx->opad_tmp_keys_buff); |
1096 | goto fail; |
1097 | } |
1098 | dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n" , |
1099 | sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff, |
1100 | &ctx->opad_tmp_keys_dma_addr); |
1101 | |
1102 | ctx->is_hmac = false; |
1103 | return 0; |
1104 | |
1105 | fail: |
1106 | cc_free_ctx(ctx); |
1107 | return -ENOMEM; |
1108 | } |
1109 | |
1110 | static int cc_get_hash_len(struct crypto_tfm *tfm) |
1111 | { |
1112 | struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); |
1113 | |
1114 | if (ctx->hash_mode == DRV_HASH_SM3) |
1115 | return CC_SM3_HASH_LEN_SIZE; |
1116 | else |
1117 | return cc_get_default_hash_len(drvdata: ctx->drvdata); |
1118 | } |
1119 | |
1120 | static int cc_cra_init(struct crypto_tfm *tfm) |
1121 | { |
1122 | struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); |
1123 | struct hash_alg_common *hash_alg_common = |
1124 | container_of(tfm->__crt_alg, struct hash_alg_common, base); |
1125 | struct ahash_alg *ahash_alg = |
1126 | container_of(hash_alg_common, struct ahash_alg, halg); |
1127 | struct cc_hash_alg *cc_alg = |
1128 | container_of(ahash_alg, struct cc_hash_alg, ahash_alg); |
1129 | |
1130 | crypto_ahash_set_reqsize_dma(ahash: __crypto_ahash_cast(tfm), |
1131 | reqsize: sizeof(struct ahash_req_ctx)); |
1132 | |
1133 | ctx->hash_mode = cc_alg->hash_mode; |
1134 | ctx->hw_mode = cc_alg->hw_mode; |
1135 | ctx->inter_digestsize = cc_alg->inter_digestsize; |
1136 | ctx->drvdata = cc_alg->drvdata; |
1137 | ctx->hash_len = cc_get_hash_len(tfm); |
1138 | return cc_alloc_ctx(ctx); |
1139 | } |
1140 | |
1141 | static void cc_cra_exit(struct crypto_tfm *tfm) |
1142 | { |
1143 | struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); |
1144 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1145 | |
1146 | dev_dbg(dev, "cc_cra_exit" ); |
1147 | cc_free_ctx(ctx); |
1148 | } |
1149 | |
1150 | static int cc_mac_update(struct ahash_request *req) |
1151 | { |
1152 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
1153 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1154 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
1155 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1156 | unsigned int block_size = crypto_tfm_alg_blocksize(tfm: &tfm->base); |
1157 | struct cc_crypto_req cc_req = {}; |
1158 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
1159 | int rc; |
1160 | u32 idx = 0; |
1161 | gfp_t flags = cc_gfp_flags(req: &req->base); |
1162 | |
1163 | if (req->nbytes == 0) { |
1164 | /* no real updates required */ |
1165 | return 0; |
1166 | } |
1167 | |
1168 | state->xcbc_count++; |
1169 | |
1170 | rc = cc_map_hash_request_update(drvdata: ctx->drvdata, ctx: state, src: req->src, |
1171 | nbytes: req->nbytes, block_size, flags); |
1172 | if (rc) { |
1173 | if (rc == 1) { |
1174 | dev_dbg(dev, " data size not require HW update %x\n" , |
1175 | req->nbytes); |
1176 | /* No hardware updates are required */ |
1177 | return 0; |
1178 | } |
1179 | dev_err(dev, "map_ahash_request_update() failed\n" ); |
1180 | return -ENOMEM; |
1181 | } |
1182 | |
1183 | if (cc_map_req(dev, state, ctx)) { |
1184 | dev_err(dev, "map_ahash_source() failed\n" ); |
1185 | return -EINVAL; |
1186 | } |
1187 | |
1188 | if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) |
1189 | cc_setup_xcbc(areq: req, desc, seq_size: &idx); |
1190 | else |
1191 | cc_setup_cmac(areq: req, desc, seq_size: &idx); |
1192 | |
1193 | cc_set_desc(areq_ctx: state, ctx, flow_mode: DIN_AES_DOUT, desc, is_not_last_data: true, seq_size: &idx); |
1194 | |
1195 | /* store the hash digest result in context */ |
1196 | hw_desc_init(pdesc: &desc[idx]); |
1197 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
1198 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_buff_dma_addr, |
1199 | size: ctx->inter_digestsize, NS_BIT, last_ind: 1); |
1200 | set_queue_last_ind(drvdata: ctx->drvdata, pdesc: &desc[idx]); |
1201 | set_flow_mode(pdesc: &desc[idx], mode: S_AES_to_DOUT); |
1202 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
1203 | idx++; |
1204 | |
1205 | /* Setup request structure */ |
1206 | cc_req.user_cb = cc_update_complete; |
1207 | cc_req.user_arg = req; |
1208 | |
1209 | rc = cc_send_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx, req: &req->base); |
1210 | if (rc != -EINPROGRESS && rc != -EBUSY) { |
1211 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
1212 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: true); |
1213 | cc_unmap_req(dev, state, ctx); |
1214 | } |
1215 | return rc; |
1216 | } |
1217 | |
1218 | static int cc_mac_final(struct ahash_request *req) |
1219 | { |
1220 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
1221 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1222 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
1223 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1224 | struct cc_crypto_req cc_req = {}; |
1225 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
1226 | int idx = 0; |
1227 | int rc = 0; |
1228 | u32 key_size, key_len; |
1229 | u32 digestsize = crypto_ahash_digestsize(tfm); |
1230 | gfp_t flags = cc_gfp_flags(req: &req->base); |
1231 | u32 rem_cnt = *cc_hash_buf_cnt(state); |
1232 | |
1233 | if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { |
1234 | key_size = CC_AES_128_BIT_KEY_SIZE; |
1235 | key_len = CC_AES_128_BIT_KEY_SIZE; |
1236 | } else { |
1237 | key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : |
1238 | ctx->key_params.keylen; |
1239 | key_len = ctx->key_params.keylen; |
1240 | } |
1241 | |
1242 | dev_dbg(dev, "===== final xcbc reminder (%d) ====\n" , rem_cnt); |
1243 | |
1244 | if (cc_map_req(dev, state, ctx)) { |
1245 | dev_err(dev, "map_ahash_source() failed\n" ); |
1246 | return -EINVAL; |
1247 | } |
1248 | |
1249 | if (cc_map_hash_request_final(drvdata: ctx->drvdata, ctx: state, src: req->src, |
1250 | nbytes: req->nbytes, do_update: 0, flags)) { |
1251 | dev_err(dev, "map_ahash_request_final() failed\n" ); |
1252 | cc_unmap_req(dev, state, ctx); |
1253 | return -ENOMEM; |
1254 | } |
1255 | |
1256 | if (cc_map_result(dev, state, digestsize)) { |
1257 | dev_err(dev, "map_ahash_digest() failed\n" ); |
1258 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: true); |
1259 | cc_unmap_req(dev, state, ctx); |
1260 | return -ENOMEM; |
1261 | } |
1262 | |
1263 | /* Setup request structure */ |
1264 | cc_req.user_cb = cc_hash_complete; |
1265 | cc_req.user_arg = req; |
1266 | |
1267 | if (state->xcbc_count && rem_cnt == 0) { |
1268 | /* Load key for ECB decryption */ |
1269 | hw_desc_init(pdesc: &desc[idx]); |
1270 | set_cipher_mode(pdesc: &desc[idx], mode: DRV_CIPHER_ECB); |
1271 | set_cipher_config0(pdesc: &desc[idx], mode: DRV_CRYPTO_DIRECTION_DECRYPT); |
1272 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
1273 | addr: (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET), |
1274 | size: key_size, NS_BIT); |
1275 | set_key_size_aes(pdesc: &desc[idx], size: key_len); |
1276 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
1277 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
1278 | idx++; |
1279 | |
1280 | /* Initiate decryption of block state to previous |
1281 | * block_state-XOR-M[n] |
1282 | */ |
1283 | hw_desc_init(pdesc: &desc[idx]); |
1284 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->digest_buff_dma_addr, |
1285 | CC_AES_BLOCK_SIZE, NS_BIT); |
1286 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_buff_dma_addr, |
1287 | CC_AES_BLOCK_SIZE, NS_BIT, last_ind: 0); |
1288 | set_flow_mode(pdesc: &desc[idx], mode: DIN_AES_DOUT); |
1289 | idx++; |
1290 | |
1291 | /* Memory Barrier: wait for axi write to complete */ |
1292 | hw_desc_init(pdesc: &desc[idx]); |
1293 | set_din_no_dma(pdesc: &desc[idx], addr: 0, size: 0xfffff0); |
1294 | set_dout_no_dma(pdesc: &desc[idx], addr: 0, size: 0, write_enable: 1); |
1295 | idx++; |
1296 | } |
1297 | |
1298 | if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) |
1299 | cc_setup_xcbc(areq: req, desc, seq_size: &idx); |
1300 | else |
1301 | cc_setup_cmac(areq: req, desc, seq_size: &idx); |
1302 | |
1303 | if (state->xcbc_count == 0) { |
1304 | hw_desc_init(pdesc: &desc[idx]); |
1305 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
1306 | set_key_size_aes(pdesc: &desc[idx], size: key_len); |
1307 | set_cmac_size0_mode(&desc[idx]); |
1308 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
1309 | idx++; |
1310 | } else if (rem_cnt > 0) { |
1311 | cc_set_desc(areq_ctx: state, ctx, flow_mode: DIN_AES_DOUT, desc, is_not_last_data: false, seq_size: &idx); |
1312 | } else { |
1313 | hw_desc_init(pdesc: &desc[idx]); |
1314 | set_din_const(pdesc: &desc[idx], val: 0x00, CC_AES_BLOCK_SIZE); |
1315 | set_flow_mode(pdesc: &desc[idx], mode: DIN_AES_DOUT); |
1316 | idx++; |
1317 | } |
1318 | |
1319 | /* Get final MAC result */ |
1320 | hw_desc_init(pdesc: &desc[idx]); |
1321 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_result_dma_addr, |
1322 | size: digestsize, NS_BIT, last_ind: 1); |
1323 | set_queue_last_ind(drvdata: ctx->drvdata, pdesc: &desc[idx]); |
1324 | set_flow_mode(pdesc: &desc[idx], mode: S_AES_to_DOUT); |
1325 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
1326 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
1327 | idx++; |
1328 | |
1329 | rc = cc_send_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx, req: &req->base); |
1330 | if (rc != -EINPROGRESS && rc != -EBUSY) { |
1331 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
1332 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: true); |
1333 | cc_unmap_result(dev, state, digestsize, result: req->result); |
1334 | cc_unmap_req(dev, state, ctx); |
1335 | } |
1336 | return rc; |
1337 | } |
1338 | |
1339 | static int cc_mac_finup(struct ahash_request *req) |
1340 | { |
1341 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
1342 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1343 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
1344 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1345 | struct cc_crypto_req cc_req = {}; |
1346 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
1347 | int idx = 0; |
1348 | int rc = 0; |
1349 | u32 key_len = 0; |
1350 | u32 digestsize = crypto_ahash_digestsize(tfm); |
1351 | gfp_t flags = cc_gfp_flags(req: &req->base); |
1352 | |
1353 | dev_dbg(dev, "===== finup xcbc(%d) ====\n" , req->nbytes); |
1354 | if (state->xcbc_count > 0 && req->nbytes == 0) { |
1355 | dev_dbg(dev, "No data to update. Call to fdx_mac_final\n" ); |
1356 | return cc_mac_final(req); |
1357 | } |
1358 | |
1359 | if (cc_map_req(dev, state, ctx)) { |
1360 | dev_err(dev, "map_ahash_source() failed\n" ); |
1361 | return -EINVAL; |
1362 | } |
1363 | |
1364 | if (cc_map_hash_request_final(drvdata: ctx->drvdata, ctx: state, src: req->src, |
1365 | nbytes: req->nbytes, do_update: 1, flags)) { |
1366 | dev_err(dev, "map_ahash_request_final() failed\n" ); |
1367 | cc_unmap_req(dev, state, ctx); |
1368 | return -ENOMEM; |
1369 | } |
1370 | if (cc_map_result(dev, state, digestsize)) { |
1371 | dev_err(dev, "map_ahash_digest() failed\n" ); |
1372 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: true); |
1373 | cc_unmap_req(dev, state, ctx); |
1374 | return -ENOMEM; |
1375 | } |
1376 | |
1377 | /* Setup request structure */ |
1378 | cc_req.user_cb = cc_hash_complete; |
1379 | cc_req.user_arg = req; |
1380 | |
1381 | if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { |
1382 | key_len = CC_AES_128_BIT_KEY_SIZE; |
1383 | cc_setup_xcbc(areq: req, desc, seq_size: &idx); |
1384 | } else { |
1385 | key_len = ctx->key_params.keylen; |
1386 | cc_setup_cmac(areq: req, desc, seq_size: &idx); |
1387 | } |
1388 | |
1389 | if (req->nbytes == 0) { |
1390 | hw_desc_init(pdesc: &desc[idx]); |
1391 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
1392 | set_key_size_aes(pdesc: &desc[idx], size: key_len); |
1393 | set_cmac_size0_mode(&desc[idx]); |
1394 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
1395 | idx++; |
1396 | } else { |
1397 | cc_set_desc(areq_ctx: state, ctx, flow_mode: DIN_AES_DOUT, desc, is_not_last_data: false, seq_size: &idx); |
1398 | } |
1399 | |
1400 | /* Get final MAC result */ |
1401 | hw_desc_init(pdesc: &desc[idx]); |
1402 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_result_dma_addr, |
1403 | size: digestsize, NS_BIT, last_ind: 1); |
1404 | set_queue_last_ind(drvdata: ctx->drvdata, pdesc: &desc[idx]); |
1405 | set_flow_mode(pdesc: &desc[idx], mode: S_AES_to_DOUT); |
1406 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
1407 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
1408 | idx++; |
1409 | |
1410 | rc = cc_send_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx, req: &req->base); |
1411 | if (rc != -EINPROGRESS && rc != -EBUSY) { |
1412 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
1413 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: true); |
1414 | cc_unmap_result(dev, state, digestsize, result: req->result); |
1415 | cc_unmap_req(dev, state, ctx); |
1416 | } |
1417 | return rc; |
1418 | } |
1419 | |
1420 | static int cc_mac_digest(struct ahash_request *req) |
1421 | { |
1422 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
1423 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1424 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
1425 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1426 | u32 digestsize = crypto_ahash_digestsize(tfm); |
1427 | struct cc_crypto_req cc_req = {}; |
1428 | struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; |
1429 | u32 key_len; |
1430 | unsigned int idx = 0; |
1431 | int rc; |
1432 | gfp_t flags = cc_gfp_flags(req: &req->base); |
1433 | |
1434 | dev_dbg(dev, "===== -digest mac (%d) ====\n" , req->nbytes); |
1435 | |
1436 | cc_init_req(dev, state, ctx); |
1437 | |
1438 | if (cc_map_req(dev, state, ctx)) { |
1439 | dev_err(dev, "map_ahash_source() failed\n" ); |
1440 | return -ENOMEM; |
1441 | } |
1442 | if (cc_map_result(dev, state, digestsize)) { |
1443 | dev_err(dev, "map_ahash_digest() failed\n" ); |
1444 | cc_unmap_req(dev, state, ctx); |
1445 | return -ENOMEM; |
1446 | } |
1447 | |
1448 | if (cc_map_hash_request_final(drvdata: ctx->drvdata, ctx: state, src: req->src, |
1449 | nbytes: req->nbytes, do_update: 1, flags)) { |
1450 | dev_err(dev, "map_ahash_request_final() failed\n" ); |
1451 | cc_unmap_req(dev, state, ctx); |
1452 | return -ENOMEM; |
1453 | } |
1454 | |
1455 | /* Setup request structure */ |
1456 | cc_req.user_cb = cc_digest_complete; |
1457 | cc_req.user_arg = req; |
1458 | |
1459 | if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { |
1460 | key_len = CC_AES_128_BIT_KEY_SIZE; |
1461 | cc_setup_xcbc(areq: req, desc, seq_size: &idx); |
1462 | } else { |
1463 | key_len = ctx->key_params.keylen; |
1464 | cc_setup_cmac(areq: req, desc, seq_size: &idx); |
1465 | } |
1466 | |
1467 | if (req->nbytes == 0) { |
1468 | hw_desc_init(pdesc: &desc[idx]); |
1469 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
1470 | set_key_size_aes(pdesc: &desc[idx], size: key_len); |
1471 | set_cmac_size0_mode(&desc[idx]); |
1472 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
1473 | idx++; |
1474 | } else { |
1475 | cc_set_desc(areq_ctx: state, ctx, flow_mode: DIN_AES_DOUT, desc, is_not_last_data: false, seq_size: &idx); |
1476 | } |
1477 | |
1478 | /* Get final MAC result */ |
1479 | hw_desc_init(pdesc: &desc[idx]); |
1480 | set_dout_dlli(pdesc: &desc[idx], addr: state->digest_result_dma_addr, |
1481 | CC_AES_BLOCK_SIZE, NS_BIT, last_ind: 1); |
1482 | set_queue_last_ind(drvdata: ctx->drvdata, pdesc: &desc[idx]); |
1483 | set_flow_mode(pdesc: &desc[idx], mode: S_AES_to_DOUT); |
1484 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_WRITE_STATE0); |
1485 | set_cipher_config0(pdesc: &desc[idx], mode: DESC_DIRECTION_ENCRYPT_ENCRYPT); |
1486 | set_cipher_mode(pdesc: &desc[idx], mode: ctx->hw_mode); |
1487 | idx++; |
1488 | |
1489 | rc = cc_send_request(drvdata: ctx->drvdata, cc_req: &cc_req, desc, len: idx, req: &req->base); |
1490 | if (rc != -EINPROGRESS && rc != -EBUSY) { |
1491 | dev_err(dev, "send_request() failed (rc=%d)\n" , rc); |
1492 | cc_unmap_hash_request(dev, ctx: state, src: req->src, do_revert: true); |
1493 | cc_unmap_result(dev, state, digestsize, result: req->result); |
1494 | cc_unmap_req(dev, state, ctx); |
1495 | } |
1496 | return rc; |
1497 | } |
1498 | |
1499 | static int cc_hash_export(struct ahash_request *req, void *out) |
1500 | { |
1501 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
1502 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
1503 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
1504 | u8 *curr_buff = cc_hash_buf(state); |
1505 | u32 curr_buff_cnt = *cc_hash_buf_cnt(state); |
1506 | const u32 tmp = CC_EXPORT_MAGIC; |
1507 | |
1508 | memcpy(out, &tmp, sizeof(u32)); |
1509 | out += sizeof(u32); |
1510 | |
1511 | memcpy(out, state->digest_buff, ctx->inter_digestsize); |
1512 | out += ctx->inter_digestsize; |
1513 | |
1514 | memcpy(out, state->digest_bytes_len, ctx->hash_len); |
1515 | out += ctx->hash_len; |
1516 | |
1517 | memcpy(out, &curr_buff_cnt, sizeof(u32)); |
1518 | out += sizeof(u32); |
1519 | |
1520 | memcpy(out, curr_buff, curr_buff_cnt); |
1521 | |
1522 | return 0; |
1523 | } |
1524 | |
1525 | static int cc_hash_import(struct ahash_request *req, const void *in) |
1526 | { |
1527 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
1528 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
1529 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
1530 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req); |
1531 | u32 tmp; |
1532 | |
1533 | memcpy(&tmp, in, sizeof(u32)); |
1534 | if (tmp != CC_EXPORT_MAGIC) |
1535 | return -EINVAL; |
1536 | in += sizeof(u32); |
1537 | |
1538 | cc_init_req(dev, state, ctx); |
1539 | |
1540 | memcpy(state->digest_buff, in, ctx->inter_digestsize); |
1541 | in += ctx->inter_digestsize; |
1542 | |
1543 | memcpy(state->digest_bytes_len, in, ctx->hash_len); |
1544 | in += ctx->hash_len; |
1545 | |
1546 | /* Sanity check the data as much as possible */ |
1547 | memcpy(&tmp, in, sizeof(u32)); |
1548 | if (tmp > CC_MAX_HASH_BLCK_SIZE) |
1549 | return -EINVAL; |
1550 | in += sizeof(u32); |
1551 | |
1552 | state->buf_cnt[0] = tmp; |
1553 | memcpy(state->buffers[0], in, tmp); |
1554 | |
1555 | return 0; |
1556 | } |
1557 | |
1558 | struct cc_hash_template { |
1559 | char name[CRYPTO_MAX_ALG_NAME]; |
1560 | char driver_name[CRYPTO_MAX_ALG_NAME]; |
1561 | char mac_name[CRYPTO_MAX_ALG_NAME]; |
1562 | char mac_driver_name[CRYPTO_MAX_ALG_NAME]; |
1563 | unsigned int blocksize; |
1564 | bool is_mac; |
1565 | bool synchronize; |
1566 | struct ahash_alg template_ahash; |
1567 | int hash_mode; |
1568 | int hw_mode; |
1569 | int inter_digestsize; |
1570 | struct cc_drvdata *drvdata; |
1571 | u32 min_hw_rev; |
1572 | enum cc_std_body std_body; |
1573 | }; |
1574 | |
1575 | #define CC_STATE_SIZE(_x) \ |
1576 | ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32))) |
1577 | |
1578 | /* hash descriptors */ |
1579 | static struct cc_hash_template driver_hash[] = { |
1580 | //Asynchronize hash template |
1581 | { |
1582 | .name = "sha1" , |
1583 | .driver_name = "sha1-ccree" , |
1584 | .mac_name = "hmac(sha1)" , |
1585 | .mac_driver_name = "hmac-sha1-ccree" , |
1586 | .blocksize = SHA1_BLOCK_SIZE, |
1587 | .is_mac = true, |
1588 | .synchronize = false, |
1589 | .template_ahash = { |
1590 | .init = cc_hash_init, |
1591 | .update = cc_hash_update, |
1592 | .final = cc_hash_final, |
1593 | .finup = cc_hash_finup, |
1594 | .digest = cc_hash_digest, |
1595 | .export = cc_hash_export, |
1596 | .import = cc_hash_import, |
1597 | .setkey = cc_hash_setkey, |
1598 | .halg = { |
1599 | .digestsize = SHA1_DIGEST_SIZE, |
1600 | .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE), |
1601 | }, |
1602 | }, |
1603 | .hash_mode = DRV_HASH_SHA1, |
1604 | .hw_mode = DRV_HASH_HW_SHA1, |
1605 | .inter_digestsize = SHA1_DIGEST_SIZE, |
1606 | .min_hw_rev = CC_HW_REV_630, |
1607 | .std_body = CC_STD_NIST, |
1608 | }, |
1609 | { |
1610 | .name = "sha256" , |
1611 | .driver_name = "sha256-ccree" , |
1612 | .mac_name = "hmac(sha256)" , |
1613 | .mac_driver_name = "hmac-sha256-ccree" , |
1614 | .blocksize = SHA256_BLOCK_SIZE, |
1615 | .is_mac = true, |
1616 | .template_ahash = { |
1617 | .init = cc_hash_init, |
1618 | .update = cc_hash_update, |
1619 | .final = cc_hash_final, |
1620 | .finup = cc_hash_finup, |
1621 | .digest = cc_hash_digest, |
1622 | .export = cc_hash_export, |
1623 | .import = cc_hash_import, |
1624 | .setkey = cc_hash_setkey, |
1625 | .halg = { |
1626 | .digestsize = SHA256_DIGEST_SIZE, |
1627 | .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE) |
1628 | }, |
1629 | }, |
1630 | .hash_mode = DRV_HASH_SHA256, |
1631 | .hw_mode = DRV_HASH_HW_SHA256, |
1632 | .inter_digestsize = SHA256_DIGEST_SIZE, |
1633 | .min_hw_rev = CC_HW_REV_630, |
1634 | .std_body = CC_STD_NIST, |
1635 | }, |
1636 | { |
1637 | .name = "sha224" , |
1638 | .driver_name = "sha224-ccree" , |
1639 | .mac_name = "hmac(sha224)" , |
1640 | .mac_driver_name = "hmac-sha224-ccree" , |
1641 | .blocksize = SHA224_BLOCK_SIZE, |
1642 | .is_mac = true, |
1643 | .template_ahash = { |
1644 | .init = cc_hash_init, |
1645 | .update = cc_hash_update, |
1646 | .final = cc_hash_final, |
1647 | .finup = cc_hash_finup, |
1648 | .digest = cc_hash_digest, |
1649 | .export = cc_hash_export, |
1650 | .import = cc_hash_import, |
1651 | .setkey = cc_hash_setkey, |
1652 | .halg = { |
1653 | .digestsize = SHA224_DIGEST_SIZE, |
1654 | .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE), |
1655 | }, |
1656 | }, |
1657 | .hash_mode = DRV_HASH_SHA224, |
1658 | .hw_mode = DRV_HASH_HW_SHA256, |
1659 | .inter_digestsize = SHA256_DIGEST_SIZE, |
1660 | .min_hw_rev = CC_HW_REV_630, |
1661 | .std_body = CC_STD_NIST, |
1662 | }, |
1663 | { |
1664 | .name = "sha384" , |
1665 | .driver_name = "sha384-ccree" , |
1666 | .mac_name = "hmac(sha384)" , |
1667 | .mac_driver_name = "hmac-sha384-ccree" , |
1668 | .blocksize = SHA384_BLOCK_SIZE, |
1669 | .is_mac = true, |
1670 | .template_ahash = { |
1671 | .init = cc_hash_init, |
1672 | .update = cc_hash_update, |
1673 | .final = cc_hash_final, |
1674 | .finup = cc_hash_finup, |
1675 | .digest = cc_hash_digest, |
1676 | .export = cc_hash_export, |
1677 | .import = cc_hash_import, |
1678 | .setkey = cc_hash_setkey, |
1679 | .halg = { |
1680 | .digestsize = SHA384_DIGEST_SIZE, |
1681 | .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE), |
1682 | }, |
1683 | }, |
1684 | .hash_mode = DRV_HASH_SHA384, |
1685 | .hw_mode = DRV_HASH_HW_SHA512, |
1686 | .inter_digestsize = SHA512_DIGEST_SIZE, |
1687 | .min_hw_rev = CC_HW_REV_712, |
1688 | .std_body = CC_STD_NIST, |
1689 | }, |
1690 | { |
1691 | .name = "sha512" , |
1692 | .driver_name = "sha512-ccree" , |
1693 | .mac_name = "hmac(sha512)" , |
1694 | .mac_driver_name = "hmac-sha512-ccree" , |
1695 | .blocksize = SHA512_BLOCK_SIZE, |
1696 | .is_mac = true, |
1697 | .template_ahash = { |
1698 | .init = cc_hash_init, |
1699 | .update = cc_hash_update, |
1700 | .final = cc_hash_final, |
1701 | .finup = cc_hash_finup, |
1702 | .digest = cc_hash_digest, |
1703 | .export = cc_hash_export, |
1704 | .import = cc_hash_import, |
1705 | .setkey = cc_hash_setkey, |
1706 | .halg = { |
1707 | .digestsize = SHA512_DIGEST_SIZE, |
1708 | .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE), |
1709 | }, |
1710 | }, |
1711 | .hash_mode = DRV_HASH_SHA512, |
1712 | .hw_mode = DRV_HASH_HW_SHA512, |
1713 | .inter_digestsize = SHA512_DIGEST_SIZE, |
1714 | .min_hw_rev = CC_HW_REV_712, |
1715 | .std_body = CC_STD_NIST, |
1716 | }, |
1717 | { |
1718 | .name = "md5" , |
1719 | .driver_name = "md5-ccree" , |
1720 | .mac_name = "hmac(md5)" , |
1721 | .mac_driver_name = "hmac-md5-ccree" , |
1722 | .blocksize = MD5_HMAC_BLOCK_SIZE, |
1723 | .is_mac = true, |
1724 | .template_ahash = { |
1725 | .init = cc_hash_init, |
1726 | .update = cc_hash_update, |
1727 | .final = cc_hash_final, |
1728 | .finup = cc_hash_finup, |
1729 | .digest = cc_hash_digest, |
1730 | .export = cc_hash_export, |
1731 | .import = cc_hash_import, |
1732 | .setkey = cc_hash_setkey, |
1733 | .halg = { |
1734 | .digestsize = MD5_DIGEST_SIZE, |
1735 | .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE), |
1736 | }, |
1737 | }, |
1738 | .hash_mode = DRV_HASH_MD5, |
1739 | .hw_mode = DRV_HASH_HW_MD5, |
1740 | .inter_digestsize = MD5_DIGEST_SIZE, |
1741 | .min_hw_rev = CC_HW_REV_630, |
1742 | .std_body = CC_STD_NIST, |
1743 | }, |
1744 | { |
1745 | .name = "sm3" , |
1746 | .driver_name = "sm3-ccree" , |
1747 | .blocksize = SM3_BLOCK_SIZE, |
1748 | .is_mac = false, |
1749 | .template_ahash = { |
1750 | .init = cc_hash_init, |
1751 | .update = cc_hash_update, |
1752 | .final = cc_hash_final, |
1753 | .finup = cc_hash_finup, |
1754 | .digest = cc_hash_digest, |
1755 | .export = cc_hash_export, |
1756 | .import = cc_hash_import, |
1757 | .setkey = cc_hash_setkey, |
1758 | .halg = { |
1759 | .digestsize = SM3_DIGEST_SIZE, |
1760 | .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE), |
1761 | }, |
1762 | }, |
1763 | .hash_mode = DRV_HASH_SM3, |
1764 | .hw_mode = DRV_HASH_HW_SM3, |
1765 | .inter_digestsize = SM3_DIGEST_SIZE, |
1766 | .min_hw_rev = CC_HW_REV_713, |
1767 | .std_body = CC_STD_OSCCA, |
1768 | }, |
1769 | { |
1770 | .mac_name = "xcbc(aes)" , |
1771 | .mac_driver_name = "xcbc-aes-ccree" , |
1772 | .blocksize = AES_BLOCK_SIZE, |
1773 | .is_mac = true, |
1774 | .template_ahash = { |
1775 | .init = cc_hash_init, |
1776 | .update = cc_mac_update, |
1777 | .final = cc_mac_final, |
1778 | .finup = cc_mac_finup, |
1779 | .digest = cc_mac_digest, |
1780 | .setkey = cc_xcbc_setkey, |
1781 | .export = cc_hash_export, |
1782 | .import = cc_hash_import, |
1783 | .halg = { |
1784 | .digestsize = AES_BLOCK_SIZE, |
1785 | .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE), |
1786 | }, |
1787 | }, |
1788 | .hash_mode = DRV_HASH_NULL, |
1789 | .hw_mode = DRV_CIPHER_XCBC_MAC, |
1790 | .inter_digestsize = AES_BLOCK_SIZE, |
1791 | .min_hw_rev = CC_HW_REV_630, |
1792 | .std_body = CC_STD_NIST, |
1793 | }, |
1794 | { |
1795 | .mac_name = "cmac(aes)" , |
1796 | .mac_driver_name = "cmac-aes-ccree" , |
1797 | .blocksize = AES_BLOCK_SIZE, |
1798 | .is_mac = true, |
1799 | .template_ahash = { |
1800 | .init = cc_hash_init, |
1801 | .update = cc_mac_update, |
1802 | .final = cc_mac_final, |
1803 | .finup = cc_mac_finup, |
1804 | .digest = cc_mac_digest, |
1805 | .setkey = cc_cmac_setkey, |
1806 | .export = cc_hash_export, |
1807 | .import = cc_hash_import, |
1808 | .halg = { |
1809 | .digestsize = AES_BLOCK_SIZE, |
1810 | .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE), |
1811 | }, |
1812 | }, |
1813 | .hash_mode = DRV_HASH_NULL, |
1814 | .hw_mode = DRV_CIPHER_CMAC, |
1815 | .inter_digestsize = AES_BLOCK_SIZE, |
1816 | .min_hw_rev = CC_HW_REV_630, |
1817 | .std_body = CC_STD_NIST, |
1818 | }, |
1819 | }; |
1820 | |
1821 | static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template, |
1822 | struct device *dev, bool keyed) |
1823 | { |
1824 | struct cc_hash_alg *t_crypto_alg; |
1825 | struct crypto_alg *alg; |
1826 | struct ahash_alg *halg; |
1827 | |
1828 | t_crypto_alg = devm_kzalloc(dev, size: sizeof(*t_crypto_alg), GFP_KERNEL); |
1829 | if (!t_crypto_alg) |
1830 | return ERR_PTR(error: -ENOMEM); |
1831 | |
1832 | t_crypto_alg->ahash_alg = template->template_ahash; |
1833 | halg = &t_crypto_alg->ahash_alg; |
1834 | alg = &halg->halg.base; |
1835 | |
1836 | if (keyed) { |
1837 | snprintf(buf: alg->cra_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
1838 | template->mac_name); |
1839 | snprintf(buf: alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
1840 | template->mac_driver_name); |
1841 | } else { |
1842 | halg->setkey = NULL; |
1843 | snprintf(buf: alg->cra_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
1844 | template->name); |
1845 | snprintf(buf: alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
1846 | template->driver_name); |
1847 | } |
1848 | alg->cra_module = THIS_MODULE; |
1849 | alg->cra_ctxsize = sizeof(struct cc_hash_ctx) + crypto_dma_padding(); |
1850 | alg->cra_priority = CC_CRA_PRIO; |
1851 | alg->cra_blocksize = template->blocksize; |
1852 | alg->cra_alignmask = 0; |
1853 | alg->cra_exit = cc_cra_exit; |
1854 | |
1855 | alg->cra_init = cc_cra_init; |
1856 | alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; |
1857 | |
1858 | t_crypto_alg->hash_mode = template->hash_mode; |
1859 | t_crypto_alg->hw_mode = template->hw_mode; |
1860 | t_crypto_alg->inter_digestsize = template->inter_digestsize; |
1861 | |
1862 | return t_crypto_alg; |
1863 | } |
1864 | |
1865 | static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data, |
1866 | unsigned int size, u32 *sram_buff_ofs) |
1867 | { |
1868 | struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; |
1869 | unsigned int larval_seq_len = 0; |
1870 | int rc; |
1871 | |
1872 | cc_set_sram_desc(src: data, dst: *sram_buff_ofs, nelement: size / sizeof(*data), |
1873 | seq: larval_seq, seq_len: &larval_seq_len); |
1874 | rc = send_request_init(drvdata, desc: larval_seq, len: larval_seq_len); |
1875 | if (rc) |
1876 | return rc; |
1877 | |
1878 | *sram_buff_ofs += size; |
1879 | return 0; |
1880 | } |
1881 | |
1882 | int cc_init_hash_sram(struct cc_drvdata *drvdata) |
1883 | { |
1884 | struct cc_hash_handle *hash_handle = drvdata->hash_handle; |
1885 | u32 sram_buff_ofs = hash_handle->digest_len_sram_addr; |
1886 | bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712); |
1887 | bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713); |
1888 | int rc = 0; |
1889 | |
1890 | /* Copy-to-sram digest-len */ |
1891 | rc = cc_init_copy_sram(drvdata, data: cc_digest_len_init, |
1892 | size: sizeof(cc_digest_len_init), sram_buff_ofs: &sram_buff_ofs); |
1893 | if (rc) |
1894 | goto init_digest_const_err; |
1895 | |
1896 | if (large_sha_supported) { |
1897 | /* Copy-to-sram digest-len for sha384/512 */ |
1898 | rc = cc_init_copy_sram(drvdata, data: cc_digest_len_sha512_init, |
1899 | size: sizeof(cc_digest_len_sha512_init), |
1900 | sram_buff_ofs: &sram_buff_ofs); |
1901 | if (rc) |
1902 | goto init_digest_const_err; |
1903 | } |
1904 | |
1905 | /* The initial digests offset */ |
1906 | hash_handle->larval_digest_sram_addr = sram_buff_ofs; |
1907 | |
1908 | /* Copy-to-sram initial SHA* digests */ |
1909 | rc = cc_init_copy_sram(drvdata, data: cc_md5_init, size: sizeof(cc_md5_init), |
1910 | sram_buff_ofs: &sram_buff_ofs); |
1911 | if (rc) |
1912 | goto init_digest_const_err; |
1913 | |
1914 | rc = cc_init_copy_sram(drvdata, data: cc_sha1_init, size: sizeof(cc_sha1_init), |
1915 | sram_buff_ofs: &sram_buff_ofs); |
1916 | if (rc) |
1917 | goto init_digest_const_err; |
1918 | |
1919 | rc = cc_init_copy_sram(drvdata, data: cc_sha224_init, size: sizeof(cc_sha224_init), |
1920 | sram_buff_ofs: &sram_buff_ofs); |
1921 | if (rc) |
1922 | goto init_digest_const_err; |
1923 | |
1924 | rc = cc_init_copy_sram(drvdata, data: cc_sha256_init, size: sizeof(cc_sha256_init), |
1925 | sram_buff_ofs: &sram_buff_ofs); |
1926 | if (rc) |
1927 | goto init_digest_const_err; |
1928 | |
1929 | if (sm3_supported) { |
1930 | rc = cc_init_copy_sram(drvdata, data: cc_sm3_init, |
1931 | size: sizeof(cc_sm3_init), sram_buff_ofs: &sram_buff_ofs); |
1932 | if (rc) |
1933 | goto init_digest_const_err; |
1934 | } |
1935 | |
1936 | if (large_sha_supported) { |
1937 | rc = cc_init_copy_sram(drvdata, data: cc_sha384_init, |
1938 | size: sizeof(cc_sha384_init), sram_buff_ofs: &sram_buff_ofs); |
1939 | if (rc) |
1940 | goto init_digest_const_err; |
1941 | |
1942 | rc = cc_init_copy_sram(drvdata, data: cc_sha512_init, |
1943 | size: sizeof(cc_sha512_init), sram_buff_ofs: &sram_buff_ofs); |
1944 | if (rc) |
1945 | goto init_digest_const_err; |
1946 | } |
1947 | |
1948 | init_digest_const_err: |
1949 | return rc; |
1950 | } |
1951 | |
1952 | int cc_hash_alloc(struct cc_drvdata *drvdata) |
1953 | { |
1954 | struct cc_hash_handle *hash_handle; |
1955 | u32 sram_buff; |
1956 | u32 sram_size_to_alloc; |
1957 | struct device *dev = drvdata_to_dev(drvdata); |
1958 | int rc = 0; |
1959 | int alg; |
1960 | |
1961 | hash_handle = devm_kzalloc(dev, size: sizeof(*hash_handle), GFP_KERNEL); |
1962 | if (!hash_handle) |
1963 | return -ENOMEM; |
1964 | |
1965 | INIT_LIST_HEAD(list: &hash_handle->hash_list); |
1966 | drvdata->hash_handle = hash_handle; |
1967 | |
1968 | sram_size_to_alloc = sizeof(cc_digest_len_init) + |
1969 | sizeof(cc_md5_init) + |
1970 | sizeof(cc_sha1_init) + |
1971 | sizeof(cc_sha224_init) + |
1972 | sizeof(cc_sha256_init); |
1973 | |
1974 | if (drvdata->hw_rev >= CC_HW_REV_713) |
1975 | sram_size_to_alloc += sizeof(cc_sm3_init); |
1976 | |
1977 | if (drvdata->hw_rev >= CC_HW_REV_712) |
1978 | sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) + |
1979 | sizeof(cc_sha384_init) + sizeof(cc_sha512_init); |
1980 | |
1981 | sram_buff = cc_sram_alloc(drvdata, size: sram_size_to_alloc); |
1982 | if (sram_buff == NULL_SRAM_ADDR) { |
1983 | rc = -ENOMEM; |
1984 | goto fail; |
1985 | } |
1986 | |
1987 | /* The initial digest-len offset */ |
1988 | hash_handle->digest_len_sram_addr = sram_buff; |
1989 | |
1990 | /*must be set before the alg registration as it is being used there*/ |
1991 | rc = cc_init_hash_sram(drvdata); |
1992 | if (rc) { |
1993 | dev_err(dev, "Init digest CONST failed (rc=%d)\n" , rc); |
1994 | goto fail; |
1995 | } |
1996 | |
1997 | /* ahash registration */ |
1998 | for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) { |
1999 | struct cc_hash_alg *t_alg; |
2000 | int hw_mode = driver_hash[alg].hw_mode; |
2001 | |
2002 | /* Check that the HW revision and variants are suitable */ |
2003 | if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) || |
2004 | !(drvdata->std_bodies & driver_hash[alg].std_body)) |
2005 | continue; |
2006 | |
2007 | if (driver_hash[alg].is_mac) { |
2008 | /* register hmac version */ |
2009 | t_alg = cc_alloc_hash_alg(template: &driver_hash[alg], dev, keyed: true); |
2010 | if (IS_ERR(ptr: t_alg)) { |
2011 | rc = PTR_ERR(ptr: t_alg); |
2012 | dev_err(dev, "%s alg allocation failed\n" , |
2013 | driver_hash[alg].driver_name); |
2014 | goto fail; |
2015 | } |
2016 | t_alg->drvdata = drvdata; |
2017 | |
2018 | rc = crypto_register_ahash(alg: &t_alg->ahash_alg); |
2019 | if (rc) { |
2020 | dev_err(dev, "%s alg registration failed\n" , |
2021 | driver_hash[alg].driver_name); |
2022 | goto fail; |
2023 | } |
2024 | |
2025 | list_add_tail(new: &t_alg->entry, head: &hash_handle->hash_list); |
2026 | } |
2027 | if (hw_mode == DRV_CIPHER_XCBC_MAC || |
2028 | hw_mode == DRV_CIPHER_CMAC) |
2029 | continue; |
2030 | |
2031 | /* register hash version */ |
2032 | t_alg = cc_alloc_hash_alg(template: &driver_hash[alg], dev, keyed: false); |
2033 | if (IS_ERR(ptr: t_alg)) { |
2034 | rc = PTR_ERR(ptr: t_alg); |
2035 | dev_err(dev, "%s alg allocation failed\n" , |
2036 | driver_hash[alg].driver_name); |
2037 | goto fail; |
2038 | } |
2039 | t_alg->drvdata = drvdata; |
2040 | |
2041 | rc = crypto_register_ahash(alg: &t_alg->ahash_alg); |
2042 | if (rc) { |
2043 | dev_err(dev, "%s alg registration failed\n" , |
2044 | driver_hash[alg].driver_name); |
2045 | goto fail; |
2046 | } |
2047 | |
2048 | list_add_tail(new: &t_alg->entry, head: &hash_handle->hash_list); |
2049 | } |
2050 | |
2051 | return 0; |
2052 | |
2053 | fail: |
2054 | cc_hash_free(drvdata); |
2055 | return rc; |
2056 | } |
2057 | |
2058 | int cc_hash_free(struct cc_drvdata *drvdata) |
2059 | { |
2060 | struct cc_hash_alg *t_hash_alg, *hash_n; |
2061 | struct cc_hash_handle *hash_handle = drvdata->hash_handle; |
2062 | |
2063 | list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, |
2064 | entry) { |
2065 | crypto_unregister_ahash(alg: &t_hash_alg->ahash_alg); |
2066 | list_del(entry: &t_hash_alg->entry); |
2067 | } |
2068 | |
2069 | return 0; |
2070 | } |
2071 | |
2072 | static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], |
2073 | unsigned int *seq_size) |
2074 | { |
2075 | unsigned int idx = *seq_size; |
2076 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req: areq); |
2077 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req: areq); |
2078 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
2079 | |
2080 | /* Setup XCBC MAC K1 */ |
2081 | hw_desc_init(pdesc: &desc[idx]); |
2082 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: (ctx->opad_tmp_keys_dma_addr + |
2083 | XCBC_MAC_K1_OFFSET), |
2084 | CC_AES_128_BIT_KEY_SIZE, NS_BIT); |
2085 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
2086 | set_hash_cipher_mode(pdesc: &desc[idx], cipher_mode: DRV_CIPHER_XCBC_MAC, hash_mode: ctx->hash_mode); |
2087 | set_cipher_config0(pdesc: &desc[idx], mode: DESC_DIRECTION_ENCRYPT_ENCRYPT); |
2088 | set_key_size_aes(pdesc: &desc[idx], CC_AES_128_BIT_KEY_SIZE); |
2089 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
2090 | idx++; |
2091 | |
2092 | /* Setup XCBC MAC K2 */ |
2093 | hw_desc_init(pdesc: &desc[idx]); |
2094 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
2095 | addr: (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET), |
2096 | CC_AES_128_BIT_KEY_SIZE, NS_BIT); |
2097 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE1); |
2098 | set_cipher_mode(pdesc: &desc[idx], mode: DRV_CIPHER_XCBC_MAC); |
2099 | set_cipher_config0(pdesc: &desc[idx], mode: DESC_DIRECTION_ENCRYPT_ENCRYPT); |
2100 | set_key_size_aes(pdesc: &desc[idx], CC_AES_128_BIT_KEY_SIZE); |
2101 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
2102 | idx++; |
2103 | |
2104 | /* Setup XCBC MAC K3 */ |
2105 | hw_desc_init(pdesc: &desc[idx]); |
2106 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
2107 | addr: (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET), |
2108 | CC_AES_128_BIT_KEY_SIZE, NS_BIT); |
2109 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE2); |
2110 | set_cipher_mode(pdesc: &desc[idx], mode: DRV_CIPHER_XCBC_MAC); |
2111 | set_cipher_config0(pdesc: &desc[idx], mode: DESC_DIRECTION_ENCRYPT_ENCRYPT); |
2112 | set_key_size_aes(pdesc: &desc[idx], CC_AES_128_BIT_KEY_SIZE); |
2113 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
2114 | idx++; |
2115 | |
2116 | /* Loading MAC state */ |
2117 | hw_desc_init(pdesc: &desc[idx]); |
2118 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->digest_buff_dma_addr, |
2119 | CC_AES_BLOCK_SIZE, NS_BIT); |
2120 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE0); |
2121 | set_cipher_mode(pdesc: &desc[idx], mode: DRV_CIPHER_XCBC_MAC); |
2122 | set_cipher_config0(pdesc: &desc[idx], mode: DESC_DIRECTION_ENCRYPT_ENCRYPT); |
2123 | set_key_size_aes(pdesc: &desc[idx], CC_AES_128_BIT_KEY_SIZE); |
2124 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
2125 | idx++; |
2126 | *seq_size = idx; |
2127 | } |
2128 | |
2129 | static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[], |
2130 | unsigned int *seq_size) |
2131 | { |
2132 | unsigned int idx = *seq_size; |
2133 | struct ahash_req_ctx *state = ahash_request_ctx_dma(req: areq); |
2134 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req: areq); |
2135 | struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); |
2136 | |
2137 | /* Setup CMAC Key */ |
2138 | hw_desc_init(pdesc: &desc[idx]); |
2139 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: ctx->opad_tmp_keys_dma_addr, |
2140 | size: ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : |
2141 | ctx->key_params.keylen), NS_BIT); |
2142 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_KEY0); |
2143 | set_cipher_mode(pdesc: &desc[idx], mode: DRV_CIPHER_CMAC); |
2144 | set_cipher_config0(pdesc: &desc[idx], mode: DESC_DIRECTION_ENCRYPT_ENCRYPT); |
2145 | set_key_size_aes(pdesc: &desc[idx], size: ctx->key_params.keylen); |
2146 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
2147 | idx++; |
2148 | |
2149 | /* Load MAC state */ |
2150 | hw_desc_init(pdesc: &desc[idx]); |
2151 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, addr: state->digest_buff_dma_addr, |
2152 | CC_AES_BLOCK_SIZE, NS_BIT); |
2153 | set_setup_mode(pdesc: &desc[idx], mode: SETUP_LOAD_STATE0); |
2154 | set_cipher_mode(pdesc: &desc[idx], mode: DRV_CIPHER_CMAC); |
2155 | set_cipher_config0(pdesc: &desc[idx], mode: DESC_DIRECTION_ENCRYPT_ENCRYPT); |
2156 | set_key_size_aes(pdesc: &desc[idx], size: ctx->key_params.keylen); |
2157 | set_flow_mode(pdesc: &desc[idx], mode: S_DIN_to_AES); |
2158 | idx++; |
2159 | *seq_size = idx; |
2160 | } |
2161 | |
2162 | static void cc_set_desc(struct ahash_req_ctx *areq_ctx, |
2163 | struct cc_hash_ctx *ctx, unsigned int flow_mode, |
2164 | struct cc_hw_desc desc[], bool is_not_last_data, |
2165 | unsigned int *seq_size) |
2166 | { |
2167 | unsigned int idx = *seq_size; |
2168 | struct device *dev = drvdata_to_dev(drvdata: ctx->drvdata); |
2169 | |
2170 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) { |
2171 | hw_desc_init(pdesc: &desc[idx]); |
2172 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
2173 | sg_dma_address(areq_ctx->curr_sg), |
2174 | size: areq_ctx->curr_sg->length, NS_BIT); |
2175 | set_flow_mode(pdesc: &desc[idx], mode: flow_mode); |
2176 | idx++; |
2177 | } else { |
2178 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { |
2179 | dev_dbg(dev, " NULL mode\n" ); |
2180 | /* nothing to build */ |
2181 | return; |
2182 | } |
2183 | /* bypass */ |
2184 | hw_desc_init(pdesc: &desc[idx]); |
2185 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_DLLI, |
2186 | addr: areq_ctx->mlli_params.mlli_dma_addr, |
2187 | size: areq_ctx->mlli_params.mlli_len, NS_BIT); |
2188 | set_dout_sram(pdesc: &desc[idx], addr: ctx->drvdata->mlli_sram_addr, |
2189 | size: areq_ctx->mlli_params.mlli_len); |
2190 | set_flow_mode(pdesc: &desc[idx], mode: BYPASS); |
2191 | idx++; |
2192 | /* process */ |
2193 | hw_desc_init(pdesc: &desc[idx]); |
2194 | set_din_type(pdesc: &desc[idx], dma_mode: DMA_MLLI, |
2195 | addr: ctx->drvdata->mlli_sram_addr, |
2196 | size: areq_ctx->mlli_nents, NS_BIT); |
2197 | set_flow_mode(pdesc: &desc[idx], mode: flow_mode); |
2198 | idx++; |
2199 | } |
2200 | if (is_not_last_data) |
2201 | set_din_not_last_indication(&desc[(idx - 1)]); |
2202 | /* return updated desc sequence size */ |
2203 | *seq_size = idx; |
2204 | } |
2205 | |
2206 | static const void *cc_larval_digest(struct device *dev, u32 mode) |
2207 | { |
2208 | switch (mode) { |
2209 | case DRV_HASH_MD5: |
2210 | return cc_md5_init; |
2211 | case DRV_HASH_SHA1: |
2212 | return cc_sha1_init; |
2213 | case DRV_HASH_SHA224: |
2214 | return cc_sha224_init; |
2215 | case DRV_HASH_SHA256: |
2216 | return cc_sha256_init; |
2217 | case DRV_HASH_SHA384: |
2218 | return cc_sha384_init; |
2219 | case DRV_HASH_SHA512: |
2220 | return cc_sha512_init; |
2221 | case DRV_HASH_SM3: |
2222 | return cc_sm3_init; |
2223 | default: |
2224 | dev_err(dev, "Invalid hash mode (%d)\n" , mode); |
2225 | return cc_md5_init; |
2226 | } |
2227 | } |
2228 | |
2229 | /** |
2230 | * cc_larval_digest_addr() - Get the address of the initial digest in SRAM |
2231 | * according to the given hash mode |
2232 | * |
2233 | * @drvdata: Associated device driver context |
2234 | * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256 |
2235 | * |
2236 | * Return: |
2237 | * The address of the initial digest in SRAM |
2238 | */ |
2239 | u32 cc_larval_digest_addr(void *drvdata, u32 mode) |
2240 | { |
2241 | struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; |
2242 | struct cc_hash_handle *hash_handle = _drvdata->hash_handle; |
2243 | struct device *dev = drvdata_to_dev(drvdata: _drvdata); |
2244 | bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713); |
2245 | u32 addr; |
2246 | |
2247 | switch (mode) { |
2248 | case DRV_HASH_NULL: |
2249 | break; /*Ignore*/ |
2250 | case DRV_HASH_MD5: |
2251 | return (hash_handle->larval_digest_sram_addr); |
2252 | case DRV_HASH_SHA1: |
2253 | return (hash_handle->larval_digest_sram_addr + |
2254 | sizeof(cc_md5_init)); |
2255 | case DRV_HASH_SHA224: |
2256 | return (hash_handle->larval_digest_sram_addr + |
2257 | sizeof(cc_md5_init) + |
2258 | sizeof(cc_sha1_init)); |
2259 | case DRV_HASH_SHA256: |
2260 | return (hash_handle->larval_digest_sram_addr + |
2261 | sizeof(cc_md5_init) + |
2262 | sizeof(cc_sha1_init) + |
2263 | sizeof(cc_sha224_init)); |
2264 | case DRV_HASH_SM3: |
2265 | return (hash_handle->larval_digest_sram_addr + |
2266 | sizeof(cc_md5_init) + |
2267 | sizeof(cc_sha1_init) + |
2268 | sizeof(cc_sha224_init) + |
2269 | sizeof(cc_sha256_init)); |
2270 | case DRV_HASH_SHA384: |
2271 | addr = (hash_handle->larval_digest_sram_addr + |
2272 | sizeof(cc_md5_init) + |
2273 | sizeof(cc_sha1_init) + |
2274 | sizeof(cc_sha224_init) + |
2275 | sizeof(cc_sha256_init)); |
2276 | if (sm3_supported) |
2277 | addr += sizeof(cc_sm3_init); |
2278 | return addr; |
2279 | case DRV_HASH_SHA512: |
2280 | addr = (hash_handle->larval_digest_sram_addr + |
2281 | sizeof(cc_md5_init) + |
2282 | sizeof(cc_sha1_init) + |
2283 | sizeof(cc_sha224_init) + |
2284 | sizeof(cc_sha256_init) + |
2285 | sizeof(cc_sha384_init)); |
2286 | if (sm3_supported) |
2287 | addr += sizeof(cc_sm3_init); |
2288 | return addr; |
2289 | default: |
2290 | dev_err(dev, "Invalid hash mode (%d)\n" , mode); |
2291 | } |
2292 | |
2293 | /*This is valid wrong value to avoid kernel crash*/ |
2294 | return hash_handle->larval_digest_sram_addr; |
2295 | } |
2296 | |
2297 | u32 cc_digest_len_addr(void *drvdata, u32 mode) |
2298 | { |
2299 | struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; |
2300 | struct cc_hash_handle *hash_handle = _drvdata->hash_handle; |
2301 | u32 digest_len_addr = hash_handle->digest_len_sram_addr; |
2302 | |
2303 | switch (mode) { |
2304 | case DRV_HASH_SHA1: |
2305 | case DRV_HASH_SHA224: |
2306 | case DRV_HASH_SHA256: |
2307 | case DRV_HASH_MD5: |
2308 | return digest_len_addr; |
2309 | case DRV_HASH_SHA384: |
2310 | case DRV_HASH_SHA512: |
2311 | return digest_len_addr + sizeof(cc_digest_len_init); |
2312 | default: |
2313 | return digest_len_addr; /*to avoid kernel crash*/ |
2314 | } |
2315 | } |
2316 | |