1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) driver |
4 | * |
5 | * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. |
6 | * |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
8 | * Author: Gary R Hook <gary.hook@amd.com> |
9 | */ |
10 | |
11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/kthread.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/spinlock_types.h> |
18 | #include <linux/types.h> |
19 | #include <linux/mutex.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/hw_random.h> |
22 | #include <linux/cpu.h> |
23 | #include <linux/atomic.h> |
24 | #ifdef CONFIG_X86 |
25 | #include <asm/cpu_device_id.h> |
26 | #endif |
27 | #include <linux/ccp.h> |
28 | |
29 | #include "ccp-dev.h" |
30 | |
31 | #define MAX_CCPS 32 |
32 | |
33 | /* Limit CCP use to a specifed number of queues per device */ |
34 | static unsigned int nqueues; |
35 | module_param(nqueues, uint, 0444); |
36 | MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)" ); |
37 | |
38 | /* Limit the maximum number of configured CCPs */ |
39 | static atomic_t dev_count = ATOMIC_INIT(0); |
40 | static unsigned int max_devs = MAX_CCPS; |
41 | module_param(max_devs, uint, 0444); |
42 | MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)" ); |
43 | |
44 | struct ccp_tasklet_data { |
45 | struct completion completion; |
46 | struct ccp_cmd *cmd; |
47 | }; |
48 | |
49 | /* Human-readable error strings */ |
50 | #define CCP_MAX_ERROR_CODE 64 |
51 | static char *ccp_error_codes[] = { |
52 | "" , |
53 | "ILLEGAL_ENGINE" , |
54 | "ILLEGAL_KEY_ID" , |
55 | "ILLEGAL_FUNCTION_TYPE" , |
56 | "ILLEGAL_FUNCTION_MODE" , |
57 | "ILLEGAL_FUNCTION_ENCRYPT" , |
58 | "ILLEGAL_FUNCTION_SIZE" , |
59 | "Zlib_MISSING_INIT_EOM" , |
60 | "ILLEGAL_FUNCTION_RSVD" , |
61 | "ILLEGAL_BUFFER_LENGTH" , |
62 | "VLSB_FAULT" , |
63 | "ILLEGAL_MEM_ADDR" , |
64 | "ILLEGAL_MEM_SEL" , |
65 | "ILLEGAL_CONTEXT_ID" , |
66 | "ILLEGAL_KEY_ADDR" , |
67 | "0xF Reserved" , |
68 | "Zlib_ILLEGAL_MULTI_QUEUE" , |
69 | "Zlib_ILLEGAL_JOBID_CHANGE" , |
70 | "CMD_TIMEOUT" , |
71 | "IDMA0_AXI_SLVERR" , |
72 | "IDMA0_AXI_DECERR" , |
73 | "0x15 Reserved" , |
74 | "IDMA1_AXI_SLAVE_FAULT" , |
75 | "IDMA1_AIXI_DECERR" , |
76 | "0x18 Reserved" , |
77 | "ZLIBVHB_AXI_SLVERR" , |
78 | "ZLIBVHB_AXI_DECERR" , |
79 | "0x1B Reserved" , |
80 | "ZLIB_UNEXPECTED_EOM" , |
81 | "ZLIB_EXTRA_DATA" , |
82 | "ZLIB_BTYPE" , |
83 | "ZLIB_UNDEFINED_SYMBOL" , |
84 | "ZLIB_UNDEFINED_DISTANCE_S" , |
85 | "ZLIB_CODE_LENGTH_SYMBOL" , |
86 | "ZLIB _VHB_ILLEGAL_FETCH" , |
87 | "ZLIB_UNCOMPRESSED_LEN" , |
88 | "ZLIB_LIMIT_REACHED" , |
89 | "ZLIB_CHECKSUM_MISMATCH0" , |
90 | "ODMA0_AXI_SLVERR" , |
91 | "ODMA0_AXI_DECERR" , |
92 | "0x28 Reserved" , |
93 | "ODMA1_AXI_SLVERR" , |
94 | "ODMA1_AXI_DECERR" , |
95 | }; |
96 | |
97 | void ccp_log_error(struct ccp_device *d, unsigned int e) |
98 | { |
99 | if (WARN_ON(e >= CCP_MAX_ERROR_CODE)) |
100 | return; |
101 | |
102 | if (e < ARRAY_SIZE(ccp_error_codes)) |
103 | dev_err(d->dev, "CCP error %d: %s\n" , e, ccp_error_codes[e]); |
104 | else |
105 | dev_err(d->dev, "CCP error %d: Unknown Error\n" , e); |
106 | } |
107 | |
108 | /* List of CCPs, CCP count, read-write access lock, and access functions |
109 | * |
110 | * Lock structure: get ccp_unit_lock for reading whenever we need to |
111 | * examine the CCP list. While holding it for reading we can acquire |
112 | * the RR lock to update the round-robin next-CCP pointer. The unit lock |
113 | * must be acquired before the RR lock. |
114 | * |
115 | * If the unit-lock is acquired for writing, we have total control over |
116 | * the list, so there's no value in getting the RR lock. |
117 | */ |
118 | static DEFINE_RWLOCK(ccp_unit_lock); |
119 | static LIST_HEAD(ccp_units); |
120 | |
121 | /* Round-robin counter */ |
122 | static DEFINE_SPINLOCK(ccp_rr_lock); |
123 | static struct ccp_device *ccp_rr; |
124 | |
125 | /** |
126 | * ccp_add_device - add a CCP device to the list |
127 | * |
128 | * @ccp: ccp_device struct pointer |
129 | * |
130 | * Put this CCP on the unit list, which makes it available |
131 | * for use. |
132 | * |
133 | * Returns zero if a CCP device is present, -ENODEV otherwise. |
134 | */ |
135 | void ccp_add_device(struct ccp_device *ccp) |
136 | { |
137 | unsigned long flags; |
138 | |
139 | write_lock_irqsave(&ccp_unit_lock, flags); |
140 | list_add_tail(new: &ccp->entry, head: &ccp_units); |
141 | if (!ccp_rr) |
142 | /* We already have the list lock (we're first) so this |
143 | * pointer can't change on us. Set its initial value. |
144 | */ |
145 | ccp_rr = ccp; |
146 | write_unlock_irqrestore(&ccp_unit_lock, flags); |
147 | } |
148 | |
149 | /** |
150 | * ccp_del_device - remove a CCP device from the list |
151 | * |
152 | * @ccp: ccp_device struct pointer |
153 | * |
154 | * Remove this unit from the list of devices. If the next device |
155 | * up for use is this one, adjust the pointer. If this is the last |
156 | * device, NULL the pointer. |
157 | */ |
158 | void ccp_del_device(struct ccp_device *ccp) |
159 | { |
160 | unsigned long flags; |
161 | |
162 | write_lock_irqsave(&ccp_unit_lock, flags); |
163 | if (ccp_rr == ccp) { |
164 | /* ccp_unit_lock is read/write; any read access |
165 | * will be suspended while we make changes to the |
166 | * list and RR pointer. |
167 | */ |
168 | if (list_is_last(list: &ccp_rr->entry, head: &ccp_units)) |
169 | ccp_rr = list_first_entry(&ccp_units, struct ccp_device, |
170 | entry); |
171 | else |
172 | ccp_rr = list_next_entry(ccp_rr, entry); |
173 | } |
174 | list_del(entry: &ccp->entry); |
175 | if (list_empty(head: &ccp_units)) |
176 | ccp_rr = NULL; |
177 | write_unlock_irqrestore(&ccp_unit_lock, flags); |
178 | } |
179 | |
180 | |
181 | |
182 | int ccp_register_rng(struct ccp_device *ccp) |
183 | { |
184 | int ret = 0; |
185 | |
186 | dev_dbg(ccp->dev, "Registering RNG...\n" ); |
187 | /* Register an RNG */ |
188 | ccp->hwrng.name = ccp->rngname; |
189 | ccp->hwrng.read = ccp_trng_read; |
190 | ret = hwrng_register(rng: &ccp->hwrng); |
191 | if (ret) |
192 | dev_err(ccp->dev, "error registering hwrng (%d)\n" , ret); |
193 | |
194 | return ret; |
195 | } |
196 | |
197 | void ccp_unregister_rng(struct ccp_device *ccp) |
198 | { |
199 | if (ccp->hwrng.name) |
200 | hwrng_unregister(rng: &ccp->hwrng); |
201 | } |
202 | |
203 | static struct ccp_device *ccp_get_device(void) |
204 | { |
205 | unsigned long flags; |
206 | struct ccp_device *dp = NULL; |
207 | |
208 | /* We round-robin through the unit list. |
209 | * The (ccp_rr) pointer refers to the next unit to use. |
210 | */ |
211 | read_lock_irqsave(&ccp_unit_lock, flags); |
212 | if (!list_empty(head: &ccp_units)) { |
213 | spin_lock(lock: &ccp_rr_lock); |
214 | dp = ccp_rr; |
215 | if (list_is_last(list: &ccp_rr->entry, head: &ccp_units)) |
216 | ccp_rr = list_first_entry(&ccp_units, struct ccp_device, |
217 | entry); |
218 | else |
219 | ccp_rr = list_next_entry(ccp_rr, entry); |
220 | spin_unlock(lock: &ccp_rr_lock); |
221 | } |
222 | read_unlock_irqrestore(&ccp_unit_lock, flags); |
223 | |
224 | return dp; |
225 | } |
226 | |
227 | /** |
228 | * ccp_present - check if a CCP device is present |
229 | * |
230 | * Returns zero if a CCP device is present, -ENODEV otherwise. |
231 | */ |
232 | int ccp_present(void) |
233 | { |
234 | unsigned long flags; |
235 | int ret; |
236 | |
237 | read_lock_irqsave(&ccp_unit_lock, flags); |
238 | ret = list_empty(head: &ccp_units); |
239 | read_unlock_irqrestore(&ccp_unit_lock, flags); |
240 | |
241 | return ret ? -ENODEV : 0; |
242 | } |
243 | EXPORT_SYMBOL_GPL(ccp_present); |
244 | |
245 | /** |
246 | * ccp_version - get the version of the CCP device |
247 | * |
248 | * Returns the version from the first unit on the list; |
249 | * otherwise a zero if no CCP device is present |
250 | */ |
251 | unsigned int ccp_version(void) |
252 | { |
253 | struct ccp_device *dp; |
254 | unsigned long flags; |
255 | int ret = 0; |
256 | |
257 | read_lock_irqsave(&ccp_unit_lock, flags); |
258 | if (!list_empty(head: &ccp_units)) { |
259 | dp = list_first_entry(&ccp_units, struct ccp_device, entry); |
260 | ret = dp->vdata->version; |
261 | } |
262 | read_unlock_irqrestore(&ccp_unit_lock, flags); |
263 | |
264 | return ret; |
265 | } |
266 | EXPORT_SYMBOL_GPL(ccp_version); |
267 | |
268 | /** |
269 | * ccp_enqueue_cmd - queue an operation for processing by the CCP |
270 | * |
271 | * @cmd: ccp_cmd struct to be processed |
272 | * |
273 | * Queue a cmd to be processed by the CCP. If queueing the cmd |
274 | * would exceed the defined length of the cmd queue the cmd will |
275 | * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will |
276 | * result in a return code of -EBUSY. |
277 | * |
278 | * The callback routine specified in the ccp_cmd struct will be |
279 | * called to notify the caller of completion (if the cmd was not |
280 | * backlogged) or advancement out of the backlog. If the cmd has |
281 | * advanced out of the backlog the "err" value of the callback |
282 | * will be -EINPROGRESS. Any other "err" value during callback is |
283 | * the result of the operation. |
284 | * |
285 | * The cmd has been successfully queued if: |
286 | * the return code is -EINPROGRESS or |
287 | * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set |
288 | */ |
289 | int ccp_enqueue_cmd(struct ccp_cmd *cmd) |
290 | { |
291 | struct ccp_device *ccp; |
292 | unsigned long flags; |
293 | unsigned int i; |
294 | int ret; |
295 | |
296 | /* Some commands might need to be sent to a specific device */ |
297 | ccp = cmd->ccp ? cmd->ccp : ccp_get_device(); |
298 | |
299 | if (!ccp) |
300 | return -ENODEV; |
301 | |
302 | /* Caller must supply a callback routine */ |
303 | if (!cmd->callback) |
304 | return -EINVAL; |
305 | |
306 | cmd->ccp = ccp; |
307 | |
308 | spin_lock_irqsave(&ccp->cmd_lock, flags); |
309 | |
310 | i = ccp->cmd_q_count; |
311 | |
312 | if (ccp->cmd_count >= MAX_CMD_QLEN) { |
313 | if (cmd->flags & CCP_CMD_MAY_BACKLOG) { |
314 | ret = -EBUSY; |
315 | list_add_tail(new: &cmd->entry, head: &ccp->backlog); |
316 | } else { |
317 | ret = -ENOSPC; |
318 | } |
319 | } else { |
320 | ret = -EINPROGRESS; |
321 | ccp->cmd_count++; |
322 | list_add_tail(new: &cmd->entry, head: &ccp->cmd); |
323 | |
324 | /* Find an idle queue */ |
325 | if (!ccp->suspending) { |
326 | for (i = 0; i < ccp->cmd_q_count; i++) { |
327 | if (ccp->cmd_q[i].active) |
328 | continue; |
329 | |
330 | break; |
331 | } |
332 | } |
333 | } |
334 | |
335 | spin_unlock_irqrestore(lock: &ccp->cmd_lock, flags); |
336 | |
337 | /* If we found an idle queue, wake it up */ |
338 | if (i < ccp->cmd_q_count) |
339 | wake_up_process(tsk: ccp->cmd_q[i].kthread); |
340 | |
341 | return ret; |
342 | } |
343 | EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); |
344 | |
345 | static void ccp_do_cmd_backlog(struct work_struct *work) |
346 | { |
347 | struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); |
348 | struct ccp_device *ccp = cmd->ccp; |
349 | unsigned long flags; |
350 | unsigned int i; |
351 | |
352 | cmd->callback(cmd->data, -EINPROGRESS); |
353 | |
354 | spin_lock_irqsave(&ccp->cmd_lock, flags); |
355 | |
356 | ccp->cmd_count++; |
357 | list_add_tail(new: &cmd->entry, head: &ccp->cmd); |
358 | |
359 | /* Find an idle queue */ |
360 | for (i = 0; i < ccp->cmd_q_count; i++) { |
361 | if (ccp->cmd_q[i].active) |
362 | continue; |
363 | |
364 | break; |
365 | } |
366 | |
367 | spin_unlock_irqrestore(lock: &ccp->cmd_lock, flags); |
368 | |
369 | /* If we found an idle queue, wake it up */ |
370 | if (i < ccp->cmd_q_count) |
371 | wake_up_process(tsk: ccp->cmd_q[i].kthread); |
372 | } |
373 | |
374 | static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) |
375 | { |
376 | struct ccp_device *ccp = cmd_q->ccp; |
377 | struct ccp_cmd *cmd = NULL; |
378 | struct ccp_cmd *backlog = NULL; |
379 | unsigned long flags; |
380 | |
381 | spin_lock_irqsave(&ccp->cmd_lock, flags); |
382 | |
383 | cmd_q->active = 0; |
384 | |
385 | if (ccp->suspending) { |
386 | cmd_q->suspended = 1; |
387 | |
388 | spin_unlock_irqrestore(lock: &ccp->cmd_lock, flags); |
389 | wake_up_interruptible(&ccp->suspend_queue); |
390 | |
391 | return NULL; |
392 | } |
393 | |
394 | if (ccp->cmd_count) { |
395 | cmd_q->active = 1; |
396 | |
397 | cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); |
398 | list_del(entry: &cmd->entry); |
399 | |
400 | ccp->cmd_count--; |
401 | } |
402 | |
403 | if (!list_empty(head: &ccp->backlog)) { |
404 | backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, |
405 | entry); |
406 | list_del(entry: &backlog->entry); |
407 | } |
408 | |
409 | spin_unlock_irqrestore(lock: &ccp->cmd_lock, flags); |
410 | |
411 | if (backlog) { |
412 | INIT_WORK(&backlog->work, ccp_do_cmd_backlog); |
413 | schedule_work(work: &backlog->work); |
414 | } |
415 | |
416 | return cmd; |
417 | } |
418 | |
419 | static void ccp_do_cmd_complete(unsigned long data) |
420 | { |
421 | struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; |
422 | struct ccp_cmd *cmd = tdata->cmd; |
423 | |
424 | cmd->callback(cmd->data, cmd->ret); |
425 | |
426 | complete(&tdata->completion); |
427 | } |
428 | |
429 | /** |
430 | * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue |
431 | * |
432 | * @data: thread-specific data |
433 | */ |
434 | int ccp_cmd_queue_thread(void *data) |
435 | { |
436 | struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; |
437 | struct ccp_cmd *cmd; |
438 | struct ccp_tasklet_data tdata; |
439 | struct tasklet_struct tasklet; |
440 | |
441 | tasklet_init(t: &tasklet, func: ccp_do_cmd_complete, data: (unsigned long)&tdata); |
442 | |
443 | set_current_state(TASK_INTERRUPTIBLE); |
444 | while (!kthread_should_stop()) { |
445 | schedule(); |
446 | |
447 | set_current_state(TASK_INTERRUPTIBLE); |
448 | |
449 | cmd = ccp_dequeue_cmd(cmd_q); |
450 | if (!cmd) |
451 | continue; |
452 | |
453 | __set_current_state(TASK_RUNNING); |
454 | |
455 | /* Execute the command */ |
456 | cmd->ret = ccp_run_cmd(cmd_q, cmd); |
457 | |
458 | /* Schedule the completion callback */ |
459 | tdata.cmd = cmd; |
460 | init_completion(x: &tdata.completion); |
461 | tasklet_schedule(t: &tasklet); |
462 | wait_for_completion(&tdata.completion); |
463 | } |
464 | |
465 | __set_current_state(TASK_RUNNING); |
466 | |
467 | return 0; |
468 | } |
469 | |
470 | /** |
471 | * ccp_alloc_struct - allocate and initialize the ccp_device struct |
472 | * |
473 | * @sp: sp_device struct of the CCP |
474 | */ |
475 | struct ccp_device *ccp_alloc_struct(struct sp_device *sp) |
476 | { |
477 | struct device *dev = sp->dev; |
478 | struct ccp_device *ccp; |
479 | |
480 | ccp = devm_kzalloc(dev, size: sizeof(*ccp), GFP_KERNEL); |
481 | if (!ccp) |
482 | return NULL; |
483 | ccp->dev = dev; |
484 | ccp->sp = sp; |
485 | ccp->axcache = sp->axcache; |
486 | |
487 | INIT_LIST_HEAD(list: &ccp->cmd); |
488 | INIT_LIST_HEAD(list: &ccp->backlog); |
489 | |
490 | spin_lock_init(&ccp->cmd_lock); |
491 | mutex_init(&ccp->req_mutex); |
492 | mutex_init(&ccp->sb_mutex); |
493 | ccp->sb_count = KSB_COUNT; |
494 | ccp->sb_start = 0; |
495 | |
496 | /* Initialize the wait queues */ |
497 | init_waitqueue_head(&ccp->sb_queue); |
498 | init_waitqueue_head(&ccp->suspend_queue); |
499 | |
500 | snprintf(buf: ccp->name, MAX_CCP_NAME_LEN, fmt: "ccp-%u" , sp->ord); |
501 | snprintf(buf: ccp->rngname, MAX_CCP_NAME_LEN, fmt: "ccp-%u-rng" , sp->ord); |
502 | |
503 | return ccp; |
504 | } |
505 | |
506 | int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) |
507 | { |
508 | struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); |
509 | u32 trng_value; |
510 | int len = min_t(int, sizeof(trng_value), max); |
511 | |
512 | /* Locking is provided by the caller so we can update device |
513 | * hwrng-related fields safely |
514 | */ |
515 | trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); |
516 | if (!trng_value) { |
517 | /* Zero is returned if not data is available or if a |
518 | * bad-entropy error is present. Assume an error if |
519 | * we exceed TRNG_RETRIES reads of zero. |
520 | */ |
521 | if (ccp->hwrng_retries++ > TRNG_RETRIES) |
522 | return -EIO; |
523 | |
524 | return 0; |
525 | } |
526 | |
527 | /* Reset the counter and save the rng value */ |
528 | ccp->hwrng_retries = 0; |
529 | memcpy(data, &trng_value, len); |
530 | |
531 | return len; |
532 | } |
533 | |
534 | bool ccp_queues_suspended(struct ccp_device *ccp) |
535 | { |
536 | unsigned int suspended = 0; |
537 | unsigned long flags; |
538 | unsigned int i; |
539 | |
540 | spin_lock_irqsave(&ccp->cmd_lock, flags); |
541 | |
542 | for (i = 0; i < ccp->cmd_q_count; i++) |
543 | if (ccp->cmd_q[i].suspended) |
544 | suspended++; |
545 | |
546 | spin_unlock_irqrestore(lock: &ccp->cmd_lock, flags); |
547 | |
548 | return ccp->cmd_q_count == suspended; |
549 | } |
550 | |
551 | void ccp_dev_suspend(struct sp_device *sp) |
552 | { |
553 | struct ccp_device *ccp = sp->ccp_data; |
554 | unsigned long flags; |
555 | unsigned int i; |
556 | |
557 | /* If there's no device there's nothing to do */ |
558 | if (!ccp) |
559 | return; |
560 | |
561 | spin_lock_irqsave(&ccp->cmd_lock, flags); |
562 | |
563 | ccp->suspending = 1; |
564 | |
565 | /* Wake all the queue kthreads to prepare for suspend */ |
566 | for (i = 0; i < ccp->cmd_q_count; i++) |
567 | wake_up_process(tsk: ccp->cmd_q[i].kthread); |
568 | |
569 | spin_unlock_irqrestore(lock: &ccp->cmd_lock, flags); |
570 | |
571 | /* Wait for all queue kthreads to say they're done */ |
572 | while (!ccp_queues_suspended(ccp)) |
573 | wait_event_interruptible(ccp->suspend_queue, |
574 | ccp_queues_suspended(ccp)); |
575 | } |
576 | |
577 | void ccp_dev_resume(struct sp_device *sp) |
578 | { |
579 | struct ccp_device *ccp = sp->ccp_data; |
580 | unsigned long flags; |
581 | unsigned int i; |
582 | |
583 | /* If there's no device there's nothing to do */ |
584 | if (!ccp) |
585 | return; |
586 | |
587 | spin_lock_irqsave(&ccp->cmd_lock, flags); |
588 | |
589 | ccp->suspending = 0; |
590 | |
591 | /* Wake up all the kthreads */ |
592 | for (i = 0; i < ccp->cmd_q_count; i++) { |
593 | ccp->cmd_q[i].suspended = 0; |
594 | wake_up_process(tsk: ccp->cmd_q[i].kthread); |
595 | } |
596 | |
597 | spin_unlock_irqrestore(lock: &ccp->cmd_lock, flags); |
598 | } |
599 | |
600 | int ccp_dev_init(struct sp_device *sp) |
601 | { |
602 | struct device *dev = sp->dev; |
603 | struct ccp_device *ccp; |
604 | int ret; |
605 | |
606 | /* |
607 | * Check how many we have so far, and stop after reaching |
608 | * that number |
609 | */ |
610 | if (atomic_inc_return(v: &dev_count) > max_devs) |
611 | return 0; /* don't fail the load */ |
612 | |
613 | ret = -ENOMEM; |
614 | ccp = ccp_alloc_struct(sp); |
615 | if (!ccp) |
616 | goto e_err; |
617 | sp->ccp_data = ccp; |
618 | |
619 | if (!nqueues || (nqueues > MAX_HW_QUEUES)) |
620 | ccp->max_q_count = MAX_HW_QUEUES; |
621 | else |
622 | ccp->max_q_count = nqueues; |
623 | |
624 | ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; |
625 | if (!ccp->vdata || !ccp->vdata->version) { |
626 | ret = -ENODEV; |
627 | dev_err(dev, "missing driver data\n" ); |
628 | goto e_err; |
629 | } |
630 | |
631 | ccp->use_tasklet = sp->use_tasklet; |
632 | |
633 | ccp->io_regs = sp->io_map + ccp->vdata->offset; |
634 | if (ccp->vdata->setup) |
635 | ccp->vdata->setup(ccp); |
636 | |
637 | ret = ccp->vdata->perform->init(ccp); |
638 | if (ret) { |
639 | /* A positive number means that the device cannot be initialized, |
640 | * but no additional message is required. |
641 | */ |
642 | if (ret > 0) |
643 | goto e_quiet; |
644 | |
645 | /* An unexpected problem occurred, and should be reported in the log */ |
646 | goto e_err; |
647 | } |
648 | |
649 | dev_notice(dev, "ccp enabled\n" ); |
650 | |
651 | return 0; |
652 | |
653 | e_err: |
654 | dev_notice(dev, "ccp initialization failed\n" ); |
655 | |
656 | e_quiet: |
657 | sp->ccp_data = NULL; |
658 | |
659 | return ret; |
660 | } |
661 | |
662 | void ccp_dev_destroy(struct sp_device *sp) |
663 | { |
664 | struct ccp_device *ccp = sp->ccp_data; |
665 | |
666 | if (!ccp) |
667 | return; |
668 | |
669 | ccp->vdata->perform->destroy(ccp); |
670 | } |
671 | |