1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/nospec.h> |
6 | #include "cc_driver.h" |
7 | #include "cc_buffer_mgr.h" |
8 | #include "cc_request_mgr.h" |
9 | #include "cc_pm.h" |
10 | |
11 | #define CC_MAX_POLL_ITER 10 |
12 | /* The highest descriptor count in used */ |
13 | #define CC_MAX_DESC_SEQ_LEN 23 |
14 | |
15 | struct cc_req_mgr_handle { |
16 | /* Request manager resources */ |
17 | unsigned int hw_queue_size; /* HW capability */ |
18 | unsigned int min_free_hw_slots; |
19 | unsigned int max_used_sw_slots; |
20 | struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE]; |
21 | u32 req_queue_head; |
22 | u32 req_queue_tail; |
23 | u32 axi_completed; |
24 | u32 q_free_slots; |
25 | /* This lock protects access to HW register |
26 | * that must be single request at a time |
27 | */ |
28 | spinlock_t hw_lock; |
29 | struct cc_hw_desc compl_desc; |
30 | u8 *dummy_comp_buff; |
31 | dma_addr_t dummy_comp_buff_dma; |
32 | |
33 | /* backlog queue */ |
34 | struct list_head backlog; |
35 | unsigned int bl_len; |
36 | spinlock_t bl_lock; /* protect backlog queue */ |
37 | |
38 | #ifdef COMP_IN_WQ |
39 | struct workqueue_struct *workq; |
40 | struct delayed_work compwork; |
41 | #else |
42 | struct tasklet_struct comptask; |
43 | #endif |
44 | }; |
45 | |
46 | struct cc_bl_item { |
47 | struct cc_crypto_req creq; |
48 | struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN]; |
49 | unsigned int len; |
50 | struct list_head list; |
51 | bool notif; |
52 | }; |
53 | |
54 | static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = { |
55 | { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT), |
56 | BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT), |
57 | BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT), |
58 | BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT), |
59 | BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT), |
60 | BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT), |
61 | BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT), |
62 | BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) }, |
63 | { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT), |
64 | BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT), |
65 | BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT), |
66 | BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT), |
67 | BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT), |
68 | BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT), |
69 | BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT), |
70 | BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) } |
71 | }; |
72 | |
73 | static void comp_handler(unsigned long devarg); |
74 | #ifdef COMP_IN_WQ |
75 | static void comp_work_handler(struct work_struct *work); |
76 | #endif |
77 | |
78 | static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot) |
79 | { |
80 | alg = array_index_nospec(alg, CC_CPP_NUM_ALGS); |
81 | slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS); |
82 | |
83 | return cc_cpp_int_masks[alg][slot]; |
84 | } |
85 | |
86 | void cc_req_mgr_fini(struct cc_drvdata *drvdata) |
87 | { |
88 | struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; |
89 | struct device *dev = drvdata_to_dev(drvdata); |
90 | |
91 | if (!req_mgr_h) |
92 | return; /* Not allocated */ |
93 | |
94 | if (req_mgr_h->dummy_comp_buff_dma) { |
95 | dma_free_coherent(dev, size: sizeof(u32), cpu_addr: req_mgr_h->dummy_comp_buff, |
96 | dma_handle: req_mgr_h->dummy_comp_buff_dma); |
97 | } |
98 | |
99 | dev_dbg(dev, "max_used_hw_slots=%d\n" , (req_mgr_h->hw_queue_size - |
100 | req_mgr_h->min_free_hw_slots)); |
101 | dev_dbg(dev, "max_used_sw_slots=%d\n" , req_mgr_h->max_used_sw_slots); |
102 | |
103 | #ifdef COMP_IN_WQ |
104 | destroy_workqueue(req_mgr_h->workq); |
105 | #else |
106 | /* Kill tasklet */ |
107 | tasklet_kill(t: &req_mgr_h->comptask); |
108 | #endif |
109 | kfree_sensitive(objp: req_mgr_h); |
110 | drvdata->request_mgr_handle = NULL; |
111 | } |
112 | |
113 | int cc_req_mgr_init(struct cc_drvdata *drvdata) |
114 | { |
115 | struct cc_req_mgr_handle *req_mgr_h; |
116 | struct device *dev = drvdata_to_dev(drvdata); |
117 | int rc = 0; |
118 | |
119 | req_mgr_h = kzalloc(size: sizeof(*req_mgr_h), GFP_KERNEL); |
120 | if (!req_mgr_h) { |
121 | rc = -ENOMEM; |
122 | goto req_mgr_init_err; |
123 | } |
124 | |
125 | drvdata->request_mgr_handle = req_mgr_h; |
126 | |
127 | spin_lock_init(&req_mgr_h->hw_lock); |
128 | spin_lock_init(&req_mgr_h->bl_lock); |
129 | INIT_LIST_HEAD(list: &req_mgr_h->backlog); |
130 | |
131 | #ifdef COMP_IN_WQ |
132 | dev_dbg(dev, "Initializing completion workqueue\n" ); |
133 | req_mgr_h->workq = create_singlethread_workqueue("ccree" ); |
134 | if (!req_mgr_h->workq) { |
135 | dev_err(dev, "Failed creating work queue\n" ); |
136 | rc = -ENOMEM; |
137 | goto req_mgr_init_err; |
138 | } |
139 | INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler); |
140 | #else |
141 | dev_dbg(dev, "Initializing completion tasklet\n" ); |
142 | tasklet_init(t: &req_mgr_h->comptask, func: comp_handler, |
143 | data: (unsigned long)drvdata); |
144 | #endif |
145 | req_mgr_h->hw_queue_size = cc_ioread(drvdata, |
146 | CC_REG(DSCRPTR_QUEUE_SRAM_SIZE)); |
147 | dev_dbg(dev, "hw_queue_size=0x%08X\n" , req_mgr_h->hw_queue_size); |
148 | if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { |
149 | dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n" , |
150 | req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); |
151 | rc = -ENOMEM; |
152 | goto req_mgr_init_err; |
153 | } |
154 | req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size; |
155 | req_mgr_h->max_used_sw_slots = 0; |
156 | |
157 | /* Allocate DMA word for "dummy" completion descriptor use */ |
158 | req_mgr_h->dummy_comp_buff = |
159 | dma_alloc_coherent(dev, size: sizeof(u32), |
160 | dma_handle: &req_mgr_h->dummy_comp_buff_dma, |
161 | GFP_KERNEL); |
162 | if (!req_mgr_h->dummy_comp_buff) { |
163 | dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n" , |
164 | sizeof(u32)); |
165 | rc = -ENOMEM; |
166 | goto req_mgr_init_err; |
167 | } |
168 | |
169 | /* Init. "dummy" completion descriptor */ |
170 | hw_desc_init(pdesc: &req_mgr_h->compl_desc); |
171 | set_din_const(pdesc: &req_mgr_h->compl_desc, val: 0, size: sizeof(u32)); |
172 | set_dout_dlli(pdesc: &req_mgr_h->compl_desc, addr: req_mgr_h->dummy_comp_buff_dma, |
173 | size: sizeof(u32), NS_BIT, last_ind: 1); |
174 | set_flow_mode(pdesc: &req_mgr_h->compl_desc, mode: BYPASS); |
175 | set_queue_last_ind(drvdata, pdesc: &req_mgr_h->compl_desc); |
176 | |
177 | return 0; |
178 | |
179 | req_mgr_init_err: |
180 | cc_req_mgr_fini(drvdata); |
181 | return rc; |
182 | } |
183 | |
184 | static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[], |
185 | unsigned int seq_len) |
186 | { |
187 | int i, w; |
188 | void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0); |
189 | struct device *dev = drvdata_to_dev(drvdata); |
190 | |
191 | /* |
192 | * We do indeed write all 6 command words to the same |
193 | * register. The HW supports this. |
194 | */ |
195 | |
196 | for (i = 0; i < seq_len; i++) { |
197 | for (w = 0; w <= 5; w++) |
198 | writel_relaxed(seq[i].word[w], reg); |
199 | |
200 | if (cc_dump_desc) |
201 | dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n" , |
202 | i, seq[i].word[0], seq[i].word[1], |
203 | seq[i].word[2], seq[i].word[3], |
204 | seq[i].word[4], seq[i].word[5]); |
205 | } |
206 | } |
207 | |
208 | /** |
209 | * request_mgr_complete() - Completion will take place if and only if user |
210 | * requested completion by cc_send_sync_request(). |
211 | * |
212 | * @dev: Device pointer |
213 | * @dx_compl_h: The completion event to signal |
214 | * @dummy: unused error code |
215 | */ |
216 | static void request_mgr_complete(struct device *dev, void *dx_compl_h, |
217 | int dummy) |
218 | { |
219 | struct completion *this_compl = dx_compl_h; |
220 | |
221 | complete(this_compl); |
222 | } |
223 | |
224 | static int cc_queues_status(struct cc_drvdata *drvdata, |
225 | struct cc_req_mgr_handle *req_mgr_h, |
226 | unsigned int total_seq_len) |
227 | { |
228 | unsigned long poll_queue; |
229 | struct device *dev = drvdata_to_dev(drvdata); |
230 | |
231 | /* SW queue is checked only once as it will not |
232 | * be changed during the poll because the spinlock_bh |
233 | * is held by the thread |
234 | */ |
235 | if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == |
236 | req_mgr_h->req_queue_tail) { |
237 | dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n" , |
238 | req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); |
239 | return -ENOSPC; |
240 | } |
241 | |
242 | if (req_mgr_h->q_free_slots >= total_seq_len) |
243 | return 0; |
244 | |
245 | /* Wait for space in HW queue. Poll constant num of iterations. */ |
246 | for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) { |
247 | req_mgr_h->q_free_slots = |
248 | cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); |
249 | if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) |
250 | req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; |
251 | |
252 | if (req_mgr_h->q_free_slots >= total_seq_len) { |
253 | /* If there is enough place return */ |
254 | return 0; |
255 | } |
256 | |
257 | dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n" , |
258 | req_mgr_h->q_free_slots, total_seq_len); |
259 | } |
260 | /* No room in the HW queue try again later */ |
261 | dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n" , |
262 | req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE, |
263 | req_mgr_h->q_free_slots, total_seq_len); |
264 | return -ENOSPC; |
265 | } |
266 | |
267 | /** |
268 | * cc_do_send_request() - Enqueue caller request to crypto hardware. |
269 | * Need to be called with HW lock held and PM running |
270 | * |
271 | * @drvdata: Associated device driver context |
272 | * @cc_req: The request to enqueue |
273 | * @desc: The crypto sequence |
274 | * @len: The crypto sequence length |
275 | * @add_comp: If "true": add an artificial dout DMA to mark completion |
276 | * |
277 | */ |
278 | static void cc_do_send_request(struct cc_drvdata *drvdata, |
279 | struct cc_crypto_req *cc_req, |
280 | struct cc_hw_desc *desc, unsigned int len, |
281 | bool add_comp) |
282 | { |
283 | struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; |
284 | unsigned int used_sw_slots; |
285 | unsigned int total_seq_len = len; /*initial sequence length*/ |
286 | struct device *dev = drvdata_to_dev(drvdata); |
287 | |
288 | used_sw_slots = ((req_mgr_h->req_queue_head - |
289 | req_mgr_h->req_queue_tail) & |
290 | (MAX_REQUEST_QUEUE_SIZE - 1)); |
291 | if (used_sw_slots > req_mgr_h->max_used_sw_slots) |
292 | req_mgr_h->max_used_sw_slots = used_sw_slots; |
293 | |
294 | /* Enqueue request - must be locked with HW lock*/ |
295 | req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; |
296 | req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & |
297 | (MAX_REQUEST_QUEUE_SIZE - 1); |
298 | |
299 | dev_dbg(dev, "Enqueue request head=%u\n" , req_mgr_h->req_queue_head); |
300 | |
301 | /* |
302 | * We are about to push command to the HW via the command registers |
303 | * that may reference host memory. We need to issue a memory barrier |
304 | * to make sure there are no outstanding memory writes |
305 | */ |
306 | wmb(); |
307 | |
308 | /* STAT_PHASE_4: Push sequence */ |
309 | |
310 | enqueue_seq(drvdata, seq: desc, seq_len: len); |
311 | |
312 | if (add_comp) { |
313 | enqueue_seq(drvdata, seq: &req_mgr_h->compl_desc, seq_len: 1); |
314 | total_seq_len++; |
315 | } |
316 | |
317 | if (req_mgr_h->q_free_slots < total_seq_len) { |
318 | /* This situation should never occur. Maybe indicating problem |
319 | * with resuming power. Set the free slot count to 0 and hope |
320 | * for the best. |
321 | */ |
322 | dev_err(dev, "HW free slot count mismatch." ); |
323 | req_mgr_h->q_free_slots = 0; |
324 | } else { |
325 | /* Update the free slots in HW queue */ |
326 | req_mgr_h->q_free_slots -= total_seq_len; |
327 | } |
328 | } |
329 | |
330 | static void cc_enqueue_backlog(struct cc_drvdata *drvdata, |
331 | struct cc_bl_item *bli) |
332 | { |
333 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
334 | struct device *dev = drvdata_to_dev(drvdata); |
335 | |
336 | spin_lock_bh(lock: &mgr->bl_lock); |
337 | list_add_tail(new: &bli->list, head: &mgr->backlog); |
338 | ++mgr->bl_len; |
339 | dev_dbg(dev, "+++bl len: %d\n" , mgr->bl_len); |
340 | spin_unlock_bh(lock: &mgr->bl_lock); |
341 | tasklet_schedule(t: &mgr->comptask); |
342 | } |
343 | |
344 | static void cc_proc_backlog(struct cc_drvdata *drvdata) |
345 | { |
346 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
347 | struct cc_bl_item *bli; |
348 | struct cc_crypto_req *creq; |
349 | void *req; |
350 | struct device *dev = drvdata_to_dev(drvdata); |
351 | int rc; |
352 | |
353 | spin_lock(lock: &mgr->bl_lock); |
354 | |
355 | while (mgr->bl_len) { |
356 | bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); |
357 | dev_dbg(dev, "---bl len: %d\n" , mgr->bl_len); |
358 | |
359 | spin_unlock(lock: &mgr->bl_lock); |
360 | |
361 | |
362 | creq = &bli->creq; |
363 | req = creq->user_arg; |
364 | |
365 | /* |
366 | * Notify the request we're moving out of the backlog |
367 | * but only if we haven't done so already. |
368 | */ |
369 | if (!bli->notif) { |
370 | creq->user_cb(dev, req, -EINPROGRESS); |
371 | bli->notif = true; |
372 | } |
373 | |
374 | spin_lock(lock: &mgr->hw_lock); |
375 | |
376 | rc = cc_queues_status(drvdata, req_mgr_h: mgr, total_seq_len: bli->len); |
377 | if (rc) { |
378 | /* |
379 | * There is still no room in the FIFO for |
380 | * this request. Bail out. We'll return here |
381 | * on the next completion irq. |
382 | */ |
383 | spin_unlock(lock: &mgr->hw_lock); |
384 | return; |
385 | } |
386 | |
387 | cc_do_send_request(drvdata, cc_req: &bli->creq, desc: bli->desc, len: bli->len, |
388 | add_comp: false); |
389 | spin_unlock(lock: &mgr->hw_lock); |
390 | |
391 | /* Remove ourselves from the backlog list */ |
392 | spin_lock(lock: &mgr->bl_lock); |
393 | list_del(entry: &bli->list); |
394 | --mgr->bl_len; |
395 | kfree(objp: bli); |
396 | } |
397 | |
398 | spin_unlock(lock: &mgr->bl_lock); |
399 | } |
400 | |
401 | int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, |
402 | struct cc_hw_desc *desc, unsigned int len, |
403 | struct crypto_async_request *req) |
404 | { |
405 | int rc; |
406 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
407 | struct device *dev = drvdata_to_dev(drvdata); |
408 | bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; |
409 | gfp_t flags = cc_gfp_flags(req); |
410 | struct cc_bl_item *bli; |
411 | |
412 | rc = cc_pm_get(dev); |
413 | if (rc) { |
414 | dev_err(dev, "cc_pm_get returned %x\n" , rc); |
415 | return rc; |
416 | } |
417 | |
418 | spin_lock_bh(lock: &mgr->hw_lock); |
419 | rc = cc_queues_status(drvdata, req_mgr_h: mgr, total_seq_len: len); |
420 | |
421 | #ifdef CC_DEBUG_FORCE_BACKLOG |
422 | if (backlog_ok) |
423 | rc = -ENOSPC; |
424 | #endif /* CC_DEBUG_FORCE_BACKLOG */ |
425 | |
426 | if (rc == -ENOSPC && backlog_ok) { |
427 | spin_unlock_bh(lock: &mgr->hw_lock); |
428 | |
429 | bli = kmalloc(size: sizeof(*bli), flags); |
430 | if (!bli) { |
431 | cc_pm_put_suspend(dev); |
432 | return -ENOMEM; |
433 | } |
434 | |
435 | memcpy(&bli->creq, cc_req, sizeof(*cc_req)); |
436 | memcpy(&bli->desc, desc, len * sizeof(*desc)); |
437 | bli->len = len; |
438 | bli->notif = false; |
439 | cc_enqueue_backlog(drvdata, bli); |
440 | return -EBUSY; |
441 | } |
442 | |
443 | if (!rc) { |
444 | cc_do_send_request(drvdata, cc_req, desc, len, add_comp: false); |
445 | rc = -EINPROGRESS; |
446 | } |
447 | |
448 | spin_unlock_bh(lock: &mgr->hw_lock); |
449 | return rc; |
450 | } |
451 | |
452 | int cc_send_sync_request(struct cc_drvdata *drvdata, |
453 | struct cc_crypto_req *cc_req, struct cc_hw_desc *desc, |
454 | unsigned int len) |
455 | { |
456 | int rc; |
457 | struct device *dev = drvdata_to_dev(drvdata); |
458 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
459 | |
460 | init_completion(x: &cc_req->seq_compl); |
461 | cc_req->user_cb = request_mgr_complete; |
462 | cc_req->user_arg = &cc_req->seq_compl; |
463 | |
464 | rc = cc_pm_get(dev); |
465 | if (rc) { |
466 | dev_err(dev, "cc_pm_get returned %x\n" , rc); |
467 | return rc; |
468 | } |
469 | |
470 | while (true) { |
471 | spin_lock_bh(lock: &mgr->hw_lock); |
472 | rc = cc_queues_status(drvdata, req_mgr_h: mgr, total_seq_len: len + 1); |
473 | |
474 | if (!rc) |
475 | break; |
476 | |
477 | spin_unlock_bh(lock: &mgr->hw_lock); |
478 | wait_for_completion_interruptible(x: &drvdata->hw_queue_avail); |
479 | reinit_completion(x: &drvdata->hw_queue_avail); |
480 | } |
481 | |
482 | cc_do_send_request(drvdata, cc_req, desc, len, add_comp: true); |
483 | spin_unlock_bh(lock: &mgr->hw_lock); |
484 | wait_for_completion(&cc_req->seq_compl); |
485 | return 0; |
486 | } |
487 | |
488 | /** |
489 | * send_request_init() - Enqueue caller request to crypto hardware during init |
490 | * process. |
491 | * Assume this function is not called in the middle of a flow, |
492 | * since we set QUEUE_LAST_IND flag in the last descriptor. |
493 | * |
494 | * @drvdata: Associated device driver context |
495 | * @desc: The crypto sequence |
496 | * @len: The crypto sequence length |
497 | * |
498 | * Return: |
499 | * Returns "0" upon success |
500 | */ |
501 | int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, |
502 | unsigned int len) |
503 | { |
504 | struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; |
505 | unsigned int total_seq_len = len; /*initial sequence length*/ |
506 | int rc = 0; |
507 | |
508 | /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. |
509 | */ |
510 | rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len); |
511 | if (rc) |
512 | return rc; |
513 | |
514 | set_queue_last_ind(drvdata, pdesc: &desc[(len - 1)]); |
515 | |
516 | /* |
517 | * We are about to push command to the HW via the command registers |
518 | * that may reference host memory. We need to issue a memory barrier |
519 | * to make sure there are no outstanding memory writes |
520 | */ |
521 | wmb(); |
522 | enqueue_seq(drvdata, seq: desc, seq_len: len); |
523 | |
524 | /* Update the free slots in HW queue */ |
525 | req_mgr_h->q_free_slots = |
526 | cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); |
527 | |
528 | return 0; |
529 | } |
530 | |
531 | void complete_request(struct cc_drvdata *drvdata) |
532 | { |
533 | struct cc_req_mgr_handle *request_mgr_handle = |
534 | drvdata->request_mgr_handle; |
535 | |
536 | complete(&drvdata->hw_queue_avail); |
537 | #ifdef COMP_IN_WQ |
538 | queue_delayed_work(request_mgr_handle->workq, |
539 | &request_mgr_handle->compwork, 0); |
540 | #else |
541 | tasklet_schedule(t: &request_mgr_handle->comptask); |
542 | #endif |
543 | } |
544 | |
545 | #ifdef COMP_IN_WQ |
546 | static void comp_work_handler(struct work_struct *work) |
547 | { |
548 | struct cc_drvdata *drvdata = |
549 | container_of(work, struct cc_drvdata, compwork.work); |
550 | |
551 | comp_handler((unsigned long)drvdata); |
552 | } |
553 | #endif |
554 | |
555 | static void proc_completions(struct cc_drvdata *drvdata) |
556 | { |
557 | struct cc_crypto_req *cc_req; |
558 | struct device *dev = drvdata_to_dev(drvdata); |
559 | struct cc_req_mgr_handle *request_mgr_handle = |
560 | drvdata->request_mgr_handle; |
561 | unsigned int *tail = &request_mgr_handle->req_queue_tail; |
562 | unsigned int *head = &request_mgr_handle->req_queue_head; |
563 | int rc; |
564 | u32 mask; |
565 | |
566 | while (request_mgr_handle->axi_completed) { |
567 | request_mgr_handle->axi_completed--; |
568 | |
569 | /* Dequeue request */ |
570 | if (*head == *tail) { |
571 | /* We are supposed to handle a completion but our |
572 | * queue is empty. This is not normal. Return and |
573 | * hope for the best. |
574 | */ |
575 | dev_err(dev, "Request queue is empty head == tail %u\n" , |
576 | *head); |
577 | break; |
578 | } |
579 | |
580 | cc_req = &request_mgr_handle->req_queue[*tail]; |
581 | |
582 | if (cc_req->cpp.is_cpp) { |
583 | |
584 | dev_dbg(dev, "CPP request completion slot: %d alg:%d\n" , |
585 | cc_req->cpp.slot, cc_req->cpp.alg); |
586 | mask = cc_cpp_int_mask(alg: cc_req->cpp.alg, |
587 | slot: cc_req->cpp.slot); |
588 | rc = (drvdata->irq & mask ? -EPERM : 0); |
589 | dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n" , mask, |
590 | drvdata->irq, rc); |
591 | } else { |
592 | dev_dbg(dev, "None CPP request completion\n" ); |
593 | rc = 0; |
594 | } |
595 | |
596 | if (cc_req->user_cb) |
597 | cc_req->user_cb(dev, cc_req->user_arg, rc); |
598 | *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); |
599 | dev_dbg(dev, "Dequeue request tail=%u\n" , *tail); |
600 | dev_dbg(dev, "Request completed. axi_completed=%d\n" , |
601 | request_mgr_handle->axi_completed); |
602 | cc_pm_put_suspend(dev); |
603 | } |
604 | } |
605 | |
606 | static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata) |
607 | { |
608 | return FIELD_GET(AXIM_MON_COMP_VALUE, |
609 | cc_ioread(drvdata, drvdata->axim_mon_offset)); |
610 | } |
611 | |
612 | /* Deferred service handler, run as interrupt-fired tasklet */ |
613 | static void comp_handler(unsigned long devarg) |
614 | { |
615 | struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; |
616 | struct cc_req_mgr_handle *request_mgr_handle = |
617 | drvdata->request_mgr_handle; |
618 | struct device *dev = drvdata_to_dev(drvdata); |
619 | u32 irq; |
620 | |
621 | dev_dbg(dev, "Completion handler called!\n" ); |
622 | irq = (drvdata->irq & drvdata->comp_mask); |
623 | |
624 | /* To avoid the interrupt from firing as we unmask it, |
625 | * we clear it now |
626 | */ |
627 | cc_iowrite(drvdata, CC_REG(HOST_ICR), val: irq); |
628 | |
629 | /* Avoid race with above clear: Test completion counter once more */ |
630 | |
631 | request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); |
632 | |
633 | dev_dbg(dev, "AXI completion after updated: %d\n" , |
634 | request_mgr_handle->axi_completed); |
635 | |
636 | while (request_mgr_handle->axi_completed) { |
637 | do { |
638 | drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR)); |
639 | irq = (drvdata->irq & drvdata->comp_mask); |
640 | proc_completions(drvdata); |
641 | |
642 | /* At this point (after proc_completions()), |
643 | * request_mgr_handle->axi_completed is 0. |
644 | */ |
645 | request_mgr_handle->axi_completed += |
646 | cc_axi_comp_count(drvdata); |
647 | } while (request_mgr_handle->axi_completed > 0); |
648 | |
649 | cc_iowrite(drvdata, CC_REG(HOST_ICR), val: irq); |
650 | |
651 | request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); |
652 | } |
653 | |
654 | /* after verifying that there is nothing to do, |
655 | * unmask AXI completion interrupt |
656 | */ |
657 | cc_iowrite(drvdata, CC_REG(HOST_IMR), |
658 | val: cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask); |
659 | |
660 | cc_proc_backlog(drvdata); |
661 | dev_dbg(dev, "Comp. handler done.\n" ); |
662 | } |
663 | |