1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2019-2020 ARM Limited or its affiliates. */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/clk.h> |
7 | #include <linux/hw_random.h> |
8 | #include <linux/io.h> |
9 | #include <linux/platform_device.h> |
10 | #include <linux/pm_runtime.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/irqreturn.h> |
13 | #include <linux/workqueue.h> |
14 | #include <linux/circ_buf.h> |
15 | #include <linux/completion.h> |
16 | #include <linux/of.h> |
17 | #include <linux/bitfield.h> |
18 | #include <linux/fips.h> |
19 | |
20 | #include "cctrng.h" |
21 | |
22 | #define CC_REG_LOW(name) (name ## _BIT_SHIFT) |
23 | #define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1) |
24 | #define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name)) |
25 | |
26 | #define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \ |
27 | (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val)) |
28 | |
29 | #define CC_HW_RESET_LOOP_COUNT 10 |
30 | #define CC_TRNG_SUSPEND_TIMEOUT 3000 |
31 | |
32 | /* data circular buffer in words must be: |
33 | * - of a power-of-2 size (limitation of circ_buf.h macros) |
34 | * - at least 6, the size generated in the EHR according to HW implementation |
35 | */ |
36 | #define CCTRNG_DATA_BUF_WORDS 32 |
37 | |
38 | /* The timeout for the TRNG operation should be calculated with the formula: |
39 | * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE |
40 | * while: |
41 | * - SAMPLE_CNT is input value from the characterisation process |
42 | * - all the rest are constants |
43 | */ |
44 | #define EHR_NUM 1 |
45 | #define VN_COEFF 4 |
46 | #define EHR_LENGTH CC_TRNG_EHR_IN_BITS |
47 | #define SCALE_VALUE 2 |
48 | #define CCTRNG_TIMEOUT(smpl_cnt) \ |
49 | (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE) |
50 | |
51 | struct cctrng_drvdata { |
52 | struct platform_device *pdev; |
53 | void __iomem *cc_base; |
54 | struct clk *clk; |
55 | struct hwrng rng; |
56 | u32 active_rosc; |
57 | /* Sampling interval for each ring oscillator: |
58 | * count of ring oscillator cycles between consecutive bits sampling. |
59 | * Value of 0 indicates non-valid rosc |
60 | */ |
61 | u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS]; |
62 | |
63 | u32 data_buf[CCTRNG_DATA_BUF_WORDS]; |
64 | struct circ_buf circ; |
65 | struct work_struct compwork; |
66 | struct work_struct startwork; |
67 | |
68 | /* pending_hw - 1 when HW is pending, 0 when it is idle */ |
69 | atomic_t pending_hw; |
70 | |
71 | /* protects against multiple concurrent consumers of data_buf */ |
72 | spinlock_t read_lock; |
73 | }; |
74 | |
75 | |
76 | /* functions for write/read CC registers */ |
77 | static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val) |
78 | { |
79 | iowrite32(val, (drvdata->cc_base + reg)); |
80 | } |
81 | static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg) |
82 | { |
83 | return ioread32(drvdata->cc_base + reg); |
84 | } |
85 | |
86 | |
87 | static int cc_trng_pm_get(struct device *dev) |
88 | { |
89 | int rc = 0; |
90 | |
91 | rc = pm_runtime_get_sync(dev); |
92 | |
93 | /* pm_runtime_get_sync() can return 1 as a valid return code */ |
94 | return (rc == 1 ? 0 : rc); |
95 | } |
96 | |
97 | static void cc_trng_pm_put_suspend(struct device *dev) |
98 | { |
99 | int rc = 0; |
100 | |
101 | pm_runtime_mark_last_busy(dev); |
102 | rc = pm_runtime_put_autosuspend(dev); |
103 | if (rc) |
104 | dev_err(dev, "pm_runtime_put_autosuspend returned %x\n" , rc); |
105 | } |
106 | |
107 | static int cc_trng_pm_init(struct cctrng_drvdata *drvdata) |
108 | { |
109 | struct device *dev = &(drvdata->pdev->dev); |
110 | |
111 | /* must be before the enabling to avoid redundant suspending */ |
112 | pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT); |
113 | pm_runtime_use_autosuspend(dev); |
114 | /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */ |
115 | return pm_runtime_set_active(dev); |
116 | } |
117 | |
118 | static void cc_trng_pm_go(struct cctrng_drvdata *drvdata) |
119 | { |
120 | struct device *dev = &(drvdata->pdev->dev); |
121 | |
122 | /* enable the PM module*/ |
123 | pm_runtime_enable(dev); |
124 | } |
125 | |
126 | static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata) |
127 | { |
128 | struct device *dev = &(drvdata->pdev->dev); |
129 | |
130 | pm_runtime_disable(dev); |
131 | } |
132 | |
133 | |
134 | static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata) |
135 | { |
136 | struct device *dev = &(drvdata->pdev->dev); |
137 | struct device_node *np = drvdata->pdev->dev.of_node; |
138 | int rc; |
139 | int i; |
140 | /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */ |
141 | int ret = -EINVAL; |
142 | |
143 | rc = of_property_read_u32_array(np, propname: "arm,rosc-ratio" , |
144 | out_values: drvdata->smpl_ratio, |
145 | CC_TRNG_NUM_OF_ROSCS); |
146 | if (rc) { |
147 | /* arm,rosc-ratio was not found in device tree */ |
148 | return rc; |
149 | } |
150 | |
151 | /* verify that at least one rosc has (sampling ratio > 0) */ |
152 | for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) { |
153 | dev_dbg(dev, "rosc %d sampling ratio %u" , |
154 | i, drvdata->smpl_ratio[i]); |
155 | |
156 | if (drvdata->smpl_ratio[i] > 0) |
157 | ret = 0; |
158 | } |
159 | |
160 | return ret; |
161 | } |
162 | |
163 | static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata) |
164 | { |
165 | struct device *dev = &(drvdata->pdev->dev); |
166 | |
167 | dev_dbg(dev, "cctrng change rosc (was %d)\n" , drvdata->active_rosc); |
168 | drvdata->active_rosc += 1; |
169 | |
170 | while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) { |
171 | if (drvdata->smpl_ratio[drvdata->active_rosc] > 0) |
172 | return 0; |
173 | |
174 | drvdata->active_rosc += 1; |
175 | } |
176 | return -EINVAL; |
177 | } |
178 | |
179 | |
180 | static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata) |
181 | { |
182 | u32 max_cycles; |
183 | |
184 | /* Set watchdog threshold to maximal allowed time (in CPU cycles) */ |
185 | max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]); |
186 | cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, val: max_cycles); |
187 | |
188 | /* enable the RND source */ |
189 | cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, val: 0x1); |
190 | |
191 | /* unmask RNG interrupts */ |
192 | cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, val: (u32)~CC_RNG_INT_MASK); |
193 | } |
194 | |
195 | |
196 | /* increase circular data buffer index (head/tail) */ |
197 | static inline void circ_idx_inc(int *idx, int bytes) |
198 | { |
199 | *idx += (bytes + 3) >> 2; |
200 | *idx &= (CCTRNG_DATA_BUF_WORDS - 1); |
201 | } |
202 | |
203 | static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata) |
204 | { |
205 | return CIRC_SPACE(drvdata->circ.head, |
206 | drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); |
207 | |
208 | } |
209 | |
210 | static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait) |
211 | { |
212 | /* current implementation ignores "wait" */ |
213 | |
214 | struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv; |
215 | struct device *dev = &(drvdata->pdev->dev); |
216 | u32 *buf = (u32 *)drvdata->circ.buf; |
217 | size_t copied = 0; |
218 | size_t cnt_w; |
219 | size_t size; |
220 | size_t left; |
221 | |
222 | if (!spin_trylock(lock: &drvdata->read_lock)) { |
223 | /* concurrent consumers from data_buf cannot be served */ |
224 | dev_dbg_ratelimited(dev, "unable to hold lock\n" ); |
225 | return 0; |
226 | } |
227 | |
228 | /* copy till end of data buffer (without wrap back) */ |
229 | cnt_w = CIRC_CNT_TO_END(drvdata->circ.head, |
230 | drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); |
231 | size = min((cnt_w<<2), max); |
232 | memcpy(data, &(buf[drvdata->circ.tail]), size); |
233 | copied = size; |
234 | circ_idx_inc(idx: &drvdata->circ.tail, bytes: size); |
235 | /* copy rest of data in data buffer */ |
236 | left = max - copied; |
237 | if (left > 0) { |
238 | cnt_w = CIRC_CNT(drvdata->circ.head, |
239 | drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); |
240 | size = min((cnt_w<<2), left); |
241 | memcpy(data, &(buf[drvdata->circ.tail]), size); |
242 | copied += size; |
243 | circ_idx_inc(idx: &drvdata->circ.tail, bytes: size); |
244 | } |
245 | |
246 | spin_unlock(lock: &drvdata->read_lock); |
247 | |
248 | if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { |
249 | if (atomic_cmpxchg(v: &drvdata->pending_hw, old: 0, new: 1) == 0) { |
250 | /* re-check space in buffer to avoid potential race */ |
251 | if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { |
252 | /* increment device's usage counter */ |
253 | int rc = cc_trng_pm_get(dev); |
254 | |
255 | if (rc) { |
256 | dev_err(dev, |
257 | "cc_trng_pm_get returned %x\n" , |
258 | rc); |
259 | return rc; |
260 | } |
261 | |
262 | /* schedule execution of deferred work handler |
263 | * for filling of data buffer |
264 | */ |
265 | schedule_work(work: &drvdata->startwork); |
266 | } else { |
267 | atomic_set(v: &drvdata->pending_hw, i: 0); |
268 | } |
269 | } |
270 | } |
271 | |
272 | return copied; |
273 | } |
274 | |
275 | static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata) |
276 | { |
277 | u32 tmp_smpl_cnt = 0; |
278 | struct device *dev = &(drvdata->pdev->dev); |
279 | |
280 | dev_dbg(dev, "cctrng hw trigger.\n" ); |
281 | |
282 | /* enable the HW RND clock */ |
283 | cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, val: 0x1); |
284 | |
285 | /* do software reset */ |
286 | cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, val: 0x1); |
287 | /* in order to verify that the reset has completed, |
288 | * the sample count need to be verified |
289 | */ |
290 | do { |
291 | /* enable the HW RND clock */ |
292 | cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, val: 0x1); |
293 | |
294 | /* set sampling ratio (rng_clocks) between consecutive bits */ |
295 | cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET, |
296 | val: drvdata->smpl_ratio[drvdata->active_rosc]); |
297 | |
298 | /* read the sampling ratio */ |
299 | tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET); |
300 | |
301 | } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]); |
302 | |
303 | /* disable the RND source for setting new parameters in HW */ |
304 | cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, val: 0); |
305 | |
306 | cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, val: 0xFFFFFFFF); |
307 | |
308 | cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, val: drvdata->active_rosc); |
309 | |
310 | /* Debug Control register: set to 0 - no bypasses */ |
311 | cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, val: 0); |
312 | |
313 | cc_trng_enable_rnd_source(drvdata); |
314 | } |
315 | |
316 | static void cc_trng_compwork_handler(struct work_struct *w) |
317 | { |
318 | u32 isr = 0; |
319 | u32 ehr_valid = 0; |
320 | struct cctrng_drvdata *drvdata = |
321 | container_of(w, struct cctrng_drvdata, compwork); |
322 | struct device *dev = &(drvdata->pdev->dev); |
323 | int i; |
324 | |
325 | /* stop DMA and the RNG source */ |
326 | cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, val: 0); |
327 | cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, val: 0); |
328 | |
329 | /* read RNG_ISR and check for errors */ |
330 | isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET); |
331 | ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr); |
332 | dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n" , isr, ehr_valid); |
333 | |
334 | if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) { |
335 | fips_fail_notify(); |
336 | /* FIPS error is fatal */ |
337 | panic(fmt: "Got HW CRNGT error while fips is enabled!\n" ); |
338 | } |
339 | |
340 | /* Clear all pending RNG interrupts */ |
341 | cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, val: isr); |
342 | |
343 | |
344 | if (!ehr_valid) { |
345 | /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */ |
346 | if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) || |
347 | CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) { |
348 | dev_dbg(dev, "cctrng autocorr/timeout error.\n" ); |
349 | goto next_rosc; |
350 | } |
351 | |
352 | /* in case of VN error, ignore it */ |
353 | } |
354 | |
355 | /* read EHR data from registers */ |
356 | for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) { |
357 | /* calc word ptr in data_buf */ |
358 | u32 *buf = (u32 *)drvdata->circ.buf; |
359 | |
360 | buf[drvdata->circ.head] = cc_ioread(drvdata, |
361 | CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32))); |
362 | |
363 | /* EHR_DATA registers are cleared on read. In case 0 value was |
364 | * returned, restart the entropy collection. |
365 | */ |
366 | if (buf[drvdata->circ.head] == 0) { |
367 | dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n" , |
368 | drvdata->active_rosc); |
369 | goto next_rosc; |
370 | } |
371 | |
372 | circ_idx_inc(idx: &drvdata->circ.head, bytes: 1<<2); |
373 | } |
374 | |
375 | atomic_set(v: &drvdata->pending_hw, i: 0); |
376 | |
377 | /* continue to fill data buffer if needed */ |
378 | if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { |
379 | if (atomic_cmpxchg(v: &drvdata->pending_hw, old: 0, new: 1) == 0) { |
380 | /* Re-enable rnd source */ |
381 | cc_trng_enable_rnd_source(drvdata); |
382 | return; |
383 | } |
384 | } |
385 | |
386 | cc_trng_pm_put_suspend(dev); |
387 | |
388 | dev_dbg(dev, "compwork handler done\n" ); |
389 | return; |
390 | |
391 | next_rosc: |
392 | if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) && |
393 | (cc_trng_change_rosc(drvdata) == 0)) { |
394 | /* trigger trng hw with next rosc */ |
395 | cc_trng_hw_trigger(drvdata); |
396 | } else { |
397 | atomic_set(v: &drvdata->pending_hw, i: 0); |
398 | cc_trng_pm_put_suspend(dev); |
399 | } |
400 | } |
401 | |
402 | static irqreturn_t cc_isr(int irq, void *dev_id) |
403 | { |
404 | struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id; |
405 | struct device *dev = &(drvdata->pdev->dev); |
406 | u32 irr; |
407 | |
408 | /* if driver suspended return, probably shared interrupt */ |
409 | if (pm_runtime_suspended(dev)) |
410 | return IRQ_NONE; |
411 | |
412 | /* read the interrupt status */ |
413 | irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); |
414 | dev_dbg(dev, "Got IRR=0x%08X\n" , irr); |
415 | |
416 | if (irr == 0) /* Probably shared interrupt line */ |
417 | return IRQ_NONE; |
418 | |
419 | /* clear interrupt - must be before processing events */ |
420 | cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val: irr); |
421 | |
422 | /* RNG interrupt - most probable */ |
423 | if (irr & CC_HOST_RNG_IRQ_MASK) { |
424 | /* Mask RNG interrupts - will be unmasked in deferred work */ |
425 | cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, val: 0xFFFFFFFF); |
426 | |
427 | /* We clear RNG interrupt here, |
428 | * to avoid it from firing as we'll unmask RNG interrupts. |
429 | */ |
430 | cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, |
431 | CC_HOST_RNG_IRQ_MASK); |
432 | |
433 | irr &= ~CC_HOST_RNG_IRQ_MASK; |
434 | |
435 | /* schedule execution of deferred work handler */ |
436 | schedule_work(work: &drvdata->compwork); |
437 | } |
438 | |
439 | if (irr) { |
440 | dev_dbg_ratelimited(dev, |
441 | "IRR includes unknown cause bits (0x%08X)\n" , |
442 | irr); |
443 | /* Just warning */ |
444 | } |
445 | |
446 | return IRQ_HANDLED; |
447 | } |
448 | |
449 | static void cc_trng_startwork_handler(struct work_struct *w) |
450 | { |
451 | struct cctrng_drvdata *drvdata = |
452 | container_of(w, struct cctrng_drvdata, startwork); |
453 | |
454 | drvdata->active_rosc = 0; |
455 | cc_trng_hw_trigger(drvdata); |
456 | } |
457 | |
458 | static int cctrng_probe(struct platform_device *pdev) |
459 | { |
460 | struct cctrng_drvdata *drvdata; |
461 | struct device *dev = &pdev->dev; |
462 | int rc = 0; |
463 | u32 val; |
464 | int irq; |
465 | |
466 | /* Compile time assertion checks */ |
467 | BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6); |
468 | BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0); |
469 | |
470 | drvdata = devm_kzalloc(dev, size: sizeof(*drvdata), GFP_KERNEL); |
471 | if (!drvdata) |
472 | return -ENOMEM; |
473 | |
474 | drvdata->rng.name = devm_kstrdup(dev, s: dev_name(dev), GFP_KERNEL); |
475 | if (!drvdata->rng.name) |
476 | return -ENOMEM; |
477 | |
478 | drvdata->rng.read = cctrng_read; |
479 | drvdata->rng.priv = (unsigned long)drvdata; |
480 | drvdata->rng.quality = CC_TRNG_QUALITY; |
481 | |
482 | platform_set_drvdata(pdev, data: drvdata); |
483 | drvdata->pdev = pdev; |
484 | |
485 | drvdata->circ.buf = (char *)drvdata->data_buf; |
486 | |
487 | drvdata->cc_base = devm_platform_ioremap_resource(pdev, index: 0); |
488 | if (IS_ERR(ptr: drvdata->cc_base)) |
489 | return dev_err_probe(dev, err: PTR_ERR(ptr: drvdata->cc_base), fmt: "Failed to ioremap registers" ); |
490 | |
491 | /* Then IRQ */ |
492 | irq = platform_get_irq(pdev, 0); |
493 | if (irq < 0) |
494 | return irq; |
495 | |
496 | /* parse sampling rate from device tree */ |
497 | rc = cc_trng_parse_sampling_ratio(drvdata); |
498 | if (rc) |
499 | return dev_err_probe(dev, err: rc, fmt: "Failed to get legal sampling ratio for rosc\n" ); |
500 | |
501 | drvdata->clk = devm_clk_get_optional_enabled(dev, NULL); |
502 | if (IS_ERR(ptr: drvdata->clk)) |
503 | return dev_err_probe(dev, err: PTR_ERR(ptr: drvdata->clk), |
504 | fmt: "Failed to get or enable the clock\n" ); |
505 | |
506 | INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler); |
507 | INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler); |
508 | spin_lock_init(&drvdata->read_lock); |
509 | |
510 | /* register the driver isr function */ |
511 | rc = devm_request_irq(dev, irq, handler: cc_isr, IRQF_SHARED, devname: "cctrng" , dev_id: drvdata); |
512 | if (rc) |
513 | return dev_err_probe(dev, err: rc, fmt: "Could not register to interrupt %d\n" , irq); |
514 | dev_dbg(dev, "Registered to IRQ: %d\n" , irq); |
515 | |
516 | /* Clear all pending interrupts */ |
517 | val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); |
518 | dev_dbg(dev, "IRR=0x%08X\n" , val); |
519 | cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val); |
520 | |
521 | /* unmask HOST RNG interrupt */ |
522 | cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, |
523 | val: cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & |
524 | ~CC_HOST_RNG_IRQ_MASK); |
525 | |
526 | /* init PM */ |
527 | rc = cc_trng_pm_init(drvdata); |
528 | if (rc) |
529 | return dev_err_probe(dev, err: rc, fmt: "cc_trng_pm_init failed\n" ); |
530 | |
531 | /* increment device's usage counter */ |
532 | rc = cc_trng_pm_get(dev); |
533 | if (rc) |
534 | return dev_err_probe(dev, err: rc, fmt: "cc_trng_pm_get returned %x\n" , rc); |
535 | |
536 | /* set pending_hw to verify that HW won't be triggered from read */ |
537 | atomic_set(v: &drvdata->pending_hw, i: 1); |
538 | |
539 | /* registration of the hwrng device */ |
540 | rc = devm_hwrng_register(dev, rng: &drvdata->rng); |
541 | if (rc) { |
542 | dev_err(dev, "Could not register hwrng device.\n" ); |
543 | goto post_pm_err; |
544 | } |
545 | |
546 | /* trigger HW to start generate data */ |
547 | drvdata->active_rosc = 0; |
548 | cc_trng_hw_trigger(drvdata); |
549 | |
550 | /* All set, we can allow auto-suspend */ |
551 | cc_trng_pm_go(drvdata); |
552 | |
553 | dev_info(dev, "ARM cctrng device initialized\n" ); |
554 | |
555 | return 0; |
556 | |
557 | post_pm_err: |
558 | cc_trng_pm_fini(drvdata); |
559 | |
560 | return rc; |
561 | } |
562 | |
563 | static void cctrng_remove(struct platform_device *pdev) |
564 | { |
565 | struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev); |
566 | struct device *dev = &pdev->dev; |
567 | |
568 | dev_dbg(dev, "Releasing cctrng resources...\n" ); |
569 | |
570 | cc_trng_pm_fini(drvdata); |
571 | |
572 | dev_info(dev, "ARM cctrng device terminated\n" ); |
573 | } |
574 | |
575 | static int __maybe_unused cctrng_suspend(struct device *dev) |
576 | { |
577 | struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); |
578 | |
579 | dev_dbg(dev, "set HOST_POWER_DOWN_EN\n" ); |
580 | cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, |
581 | POWER_DOWN_ENABLE); |
582 | |
583 | clk_disable_unprepare(clk: drvdata->clk); |
584 | |
585 | return 0; |
586 | } |
587 | |
588 | static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata) |
589 | { |
590 | unsigned int val; |
591 | unsigned int i; |
592 | |
593 | for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) { |
594 | /* in cc7x3 NVM_IS_IDLE indicates that CC reset is |
595 | * completed and device is fully functional |
596 | */ |
597 | val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET); |
598 | if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) { |
599 | /* hw indicate reset completed */ |
600 | return true; |
601 | } |
602 | /* allow scheduling other process on the processor */ |
603 | schedule(); |
604 | } |
605 | /* reset not completed */ |
606 | return false; |
607 | } |
608 | |
609 | static int __maybe_unused cctrng_resume(struct device *dev) |
610 | { |
611 | struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); |
612 | int rc; |
613 | |
614 | dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n" ); |
615 | /* Enables the device source clk */ |
616 | rc = clk_prepare_enable(clk: drvdata->clk); |
617 | if (rc) { |
618 | dev_err(dev, "failed getting clock back on. We're toast.\n" ); |
619 | return rc; |
620 | } |
621 | |
622 | /* wait for Cryptocell reset completion */ |
623 | if (!cctrng_wait_for_reset_completion(drvdata)) { |
624 | dev_err(dev, "Cryptocell reset not completed" ); |
625 | return -EBUSY; |
626 | } |
627 | |
628 | /* unmask HOST RNG interrupt */ |
629 | cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, |
630 | val: cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & |
631 | ~CC_HOST_RNG_IRQ_MASK); |
632 | |
633 | cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, |
634 | POWER_DOWN_DISABLE); |
635 | |
636 | return 0; |
637 | } |
638 | |
639 | static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL); |
640 | |
641 | static const struct of_device_id arm_cctrng_dt_match[] = { |
642 | { .compatible = "arm,cryptocell-713-trng" , }, |
643 | { .compatible = "arm,cryptocell-703-trng" , }, |
644 | {}, |
645 | }; |
646 | MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match); |
647 | |
648 | static struct platform_driver cctrng_driver = { |
649 | .driver = { |
650 | .name = "cctrng" , |
651 | .of_match_table = arm_cctrng_dt_match, |
652 | .pm = &cctrng_pm, |
653 | }, |
654 | .probe = cctrng_probe, |
655 | .remove_new = cctrng_remove, |
656 | }; |
657 | |
658 | module_platform_driver(cctrng_driver); |
659 | |
660 | /* Module description */ |
661 | MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver" ); |
662 | MODULE_AUTHOR("ARM" ); |
663 | MODULE_LICENSE("GPL v2" ); |
664 | |