1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/bitops.h> |
7 | #include <linux/delay.h> |
8 | #include <linux/err.h> |
9 | #include <linux/export.h> |
10 | #include <linux/jiffies.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/ktime.h> |
13 | #include <linux/pm_domain.h> |
14 | #include <linux/regmap.h> |
15 | #include <linux/regulator/consumer.h> |
16 | #include <linux/reset-controller.h> |
17 | #include <linux/slab.h> |
18 | #include "gdsc.h" |
19 | |
20 | #define PWR_ON_MASK BIT(31) |
21 | #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20) |
22 | #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16) |
23 | #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12) |
24 | #define SW_OVERRIDE_MASK BIT(2) |
25 | #define HW_CONTROL_MASK BIT(1) |
26 | #define SW_COLLAPSE_MASK BIT(0) |
27 | #define GMEM_CLAMP_IO_MASK BIT(0) |
28 | #define GMEM_RESET_MASK BIT(4) |
29 | |
30 | /* CFG_GDSCR */ |
31 | #define GDSC_POWER_UP_COMPLETE BIT(16) |
32 | #define GDSC_POWER_DOWN_COMPLETE BIT(15) |
33 | #define GDSC_RETAIN_FF_ENABLE BIT(11) |
34 | #define CFG_GDSCR_OFFSET 0x4 |
35 | |
36 | /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ |
37 | #define EN_REST_WAIT_VAL 0x2 |
38 | #define EN_FEW_WAIT_VAL 0x8 |
39 | #define CLK_DIS_WAIT_VAL 0x2 |
40 | |
41 | /* Transition delay shifts */ |
42 | #define EN_REST_WAIT_SHIFT 20 |
43 | #define EN_FEW_WAIT_SHIFT 16 |
44 | #define CLK_DIS_WAIT_SHIFT 12 |
45 | |
46 | #define RETAIN_MEM BIT(14) |
47 | #define RETAIN_PERIPH BIT(13) |
48 | |
49 | #define STATUS_POLL_TIMEOUT_US 1500 |
50 | #define TIMEOUT_US 500 |
51 | |
52 | #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd) |
53 | |
54 | enum gdsc_status { |
55 | GDSC_OFF, |
56 | GDSC_ON |
57 | }; |
58 | |
59 | /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */ |
60 | static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status) |
61 | { |
62 | unsigned int reg; |
63 | u32 val; |
64 | int ret; |
65 | |
66 | if (sc->flags & POLL_CFG_GDSCR) |
67 | reg = sc->gdscr + CFG_GDSCR_OFFSET; |
68 | else if (sc->gds_hw_ctrl) |
69 | reg = sc->gds_hw_ctrl; |
70 | else |
71 | reg = sc->gdscr; |
72 | |
73 | ret = regmap_read(map: sc->regmap, reg, val: &val); |
74 | if (ret) |
75 | return ret; |
76 | |
77 | if (sc->flags & POLL_CFG_GDSCR) { |
78 | switch (status) { |
79 | case GDSC_ON: |
80 | return !!(val & GDSC_POWER_UP_COMPLETE); |
81 | case GDSC_OFF: |
82 | return !!(val & GDSC_POWER_DOWN_COMPLETE); |
83 | } |
84 | } |
85 | |
86 | switch (status) { |
87 | case GDSC_ON: |
88 | return !!(val & PWR_ON_MASK); |
89 | case GDSC_OFF: |
90 | return !(val & PWR_ON_MASK); |
91 | } |
92 | |
93 | return -EINVAL; |
94 | } |
95 | |
96 | static int gdsc_hwctrl(struct gdsc *sc, bool en) |
97 | { |
98 | u32 val = en ? HW_CONTROL_MASK : 0; |
99 | |
100 | return regmap_update_bits(map: sc->regmap, reg: sc->gdscr, HW_CONTROL_MASK, val); |
101 | } |
102 | |
103 | static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status) |
104 | { |
105 | ktime_t start; |
106 | |
107 | start = ktime_get(); |
108 | do { |
109 | if (gdsc_check_status(sc, status)) |
110 | return 0; |
111 | } while (ktime_us_delta(later: ktime_get(), earlier: start) < STATUS_POLL_TIMEOUT_US); |
112 | |
113 | if (gdsc_check_status(sc, status)) |
114 | return 0; |
115 | |
116 | return -ETIMEDOUT; |
117 | } |
118 | |
119 | static int gdsc_update_collapse_bit(struct gdsc *sc, bool val) |
120 | { |
121 | u32 reg, mask; |
122 | int ret; |
123 | |
124 | if (sc->collapse_mask) { |
125 | reg = sc->collapse_ctrl; |
126 | mask = sc->collapse_mask; |
127 | } else { |
128 | reg = sc->gdscr; |
129 | mask = SW_COLLAPSE_MASK; |
130 | } |
131 | |
132 | ret = regmap_update_bits(map: sc->regmap, reg, mask, val: val ? mask : 0); |
133 | if (ret) |
134 | return ret; |
135 | |
136 | return 0; |
137 | } |
138 | |
139 | static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status, |
140 | bool wait) |
141 | { |
142 | int ret; |
143 | |
144 | if (status == GDSC_ON && sc->rsupply) { |
145 | ret = regulator_enable(regulator: sc->rsupply); |
146 | if (ret < 0) |
147 | return ret; |
148 | } |
149 | |
150 | ret = gdsc_update_collapse_bit(sc, val: status == GDSC_OFF); |
151 | |
152 | /* If disabling votable gdscs, don't poll on status */ |
153 | if ((sc->flags & VOTABLE) && status == GDSC_OFF && !wait) { |
154 | /* |
155 | * Add a short delay here to ensure that an enable |
156 | * right after it was disabled does not put it in an |
157 | * unknown state |
158 | */ |
159 | udelay(TIMEOUT_US); |
160 | return 0; |
161 | } |
162 | |
163 | if (sc->gds_hw_ctrl) { |
164 | /* |
165 | * The gds hw controller asserts/de-asserts the status bit soon |
166 | * after it receives a power on/off request from a master. |
167 | * The controller then takes around 8 xo cycles to start its |
168 | * internal state machine and update the status bit. During |
169 | * this time, the status bit does not reflect the true status |
170 | * of the core. |
171 | * Add a delay of 1 us between writing to the SW_COLLAPSE bit |
172 | * and polling the status bit. |
173 | */ |
174 | udelay(1); |
175 | } |
176 | |
177 | ret = gdsc_poll_status(sc, status); |
178 | WARN(ret, "%s status stuck at 'o%s'" , sc->pd.name, status ? "ff" : "n" ); |
179 | |
180 | if (!ret && status == GDSC_OFF && sc->rsupply) { |
181 | ret = regulator_disable(regulator: sc->rsupply); |
182 | if (ret < 0) |
183 | return ret; |
184 | } |
185 | |
186 | return ret; |
187 | } |
188 | |
189 | static inline int gdsc_deassert_reset(struct gdsc *sc) |
190 | { |
191 | int i; |
192 | |
193 | for (i = 0; i < sc->reset_count; i++) |
194 | sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]); |
195 | return 0; |
196 | } |
197 | |
198 | static inline int gdsc_assert_reset(struct gdsc *sc) |
199 | { |
200 | int i; |
201 | |
202 | for (i = 0; i < sc->reset_count; i++) |
203 | sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]); |
204 | return 0; |
205 | } |
206 | |
207 | static inline void gdsc_force_mem_on(struct gdsc *sc) |
208 | { |
209 | int i; |
210 | u32 mask = RETAIN_MEM; |
211 | |
212 | if (!(sc->flags & NO_RET_PERIPH)) |
213 | mask |= RETAIN_PERIPH; |
214 | |
215 | for (i = 0; i < sc->cxc_count; i++) |
216 | regmap_update_bits(map: sc->regmap, reg: sc->cxcs[i], mask, val: mask); |
217 | } |
218 | |
219 | static inline void gdsc_clear_mem_on(struct gdsc *sc) |
220 | { |
221 | int i; |
222 | u32 mask = RETAIN_MEM; |
223 | |
224 | if (!(sc->flags & NO_RET_PERIPH)) |
225 | mask |= RETAIN_PERIPH; |
226 | |
227 | for (i = 0; i < sc->cxc_count; i++) |
228 | regmap_update_bits(map: sc->regmap, reg: sc->cxcs[i], mask, val: 0); |
229 | } |
230 | |
231 | static inline void gdsc_deassert_clamp_io(struct gdsc *sc) |
232 | { |
233 | regmap_update_bits(map: sc->regmap, reg: sc->clamp_io_ctrl, |
234 | GMEM_CLAMP_IO_MASK, val: 0); |
235 | } |
236 | |
237 | static inline void gdsc_assert_clamp_io(struct gdsc *sc) |
238 | { |
239 | regmap_update_bits(map: sc->regmap, reg: sc->clamp_io_ctrl, |
240 | GMEM_CLAMP_IO_MASK, val: 1); |
241 | } |
242 | |
243 | static inline void gdsc_assert_reset_aon(struct gdsc *sc) |
244 | { |
245 | regmap_update_bits(map: sc->regmap, reg: sc->clamp_io_ctrl, |
246 | GMEM_RESET_MASK, val: 1); |
247 | udelay(1); |
248 | regmap_update_bits(map: sc->regmap, reg: sc->clamp_io_ctrl, |
249 | GMEM_RESET_MASK, val: 0); |
250 | } |
251 | |
252 | static void gdsc_retain_ff_on(struct gdsc *sc) |
253 | { |
254 | u32 mask = GDSC_RETAIN_FF_ENABLE; |
255 | |
256 | regmap_update_bits(map: sc->regmap, reg: sc->gdscr, mask, val: mask); |
257 | } |
258 | |
259 | static int gdsc_enable(struct generic_pm_domain *domain) |
260 | { |
261 | struct gdsc *sc = domain_to_gdsc(domain); |
262 | int ret; |
263 | |
264 | if (sc->pwrsts == PWRSTS_ON) |
265 | return gdsc_deassert_reset(sc); |
266 | |
267 | if (sc->flags & SW_RESET) { |
268 | gdsc_assert_reset(sc); |
269 | udelay(1); |
270 | gdsc_deassert_reset(sc); |
271 | } |
272 | |
273 | if (sc->flags & CLAMP_IO) { |
274 | if (sc->flags & AON_RESET) |
275 | gdsc_assert_reset_aon(sc); |
276 | gdsc_deassert_clamp_io(sc); |
277 | } |
278 | |
279 | ret = gdsc_toggle_logic(sc, status: GDSC_ON, wait: false); |
280 | if (ret) |
281 | return ret; |
282 | |
283 | if (sc->pwrsts & PWRSTS_OFF) |
284 | gdsc_force_mem_on(sc); |
285 | |
286 | /* |
287 | * If clocks to this power domain were already on, they will take an |
288 | * additional 4 clock cycles to re-enable after the power domain is |
289 | * enabled. Delay to account for this. A delay is also needed to ensure |
290 | * clocks are not enabled within 400ns of enabling power to the |
291 | * memories. |
292 | */ |
293 | udelay(1); |
294 | |
295 | /* Turn on HW trigger mode if supported */ |
296 | if (sc->flags & HW_CTRL) { |
297 | ret = gdsc_hwctrl(sc, en: true); |
298 | if (ret) |
299 | return ret; |
300 | /* |
301 | * Wait for the GDSC to go through a power down and |
302 | * up cycle. In case a firmware ends up polling status |
303 | * bits for the gdsc, it might read an 'on' status before |
304 | * the GDSC can finish the power cycle. |
305 | * We wait 1us before returning to ensure the firmware |
306 | * can't immediately poll the status bits. |
307 | */ |
308 | udelay(1); |
309 | } |
310 | |
311 | if (sc->flags & RETAIN_FF_ENABLE) |
312 | gdsc_retain_ff_on(sc); |
313 | |
314 | return 0; |
315 | } |
316 | |
317 | static int gdsc_disable(struct generic_pm_domain *domain) |
318 | { |
319 | struct gdsc *sc = domain_to_gdsc(domain); |
320 | int ret; |
321 | |
322 | if (sc->pwrsts == PWRSTS_ON) |
323 | return gdsc_assert_reset(sc); |
324 | |
325 | /* Turn off HW trigger mode if supported */ |
326 | if (sc->flags & HW_CTRL) { |
327 | ret = gdsc_hwctrl(sc, en: false); |
328 | if (ret < 0) |
329 | return ret; |
330 | /* |
331 | * Wait for the GDSC to go through a power down and |
332 | * up cycle. In case we end up polling status |
333 | * bits for the gdsc before the power cycle is completed |
334 | * it might read an 'on' status wrongly. |
335 | */ |
336 | udelay(1); |
337 | |
338 | ret = gdsc_poll_status(sc, status: GDSC_ON); |
339 | if (ret) |
340 | return ret; |
341 | } |
342 | |
343 | if (sc->pwrsts & PWRSTS_OFF) |
344 | gdsc_clear_mem_on(sc); |
345 | |
346 | /* |
347 | * If the GDSC supports only a Retention state, apart from ON, |
348 | * leave it in ON state. |
349 | * There is no SW control to transition the GDSC into |
350 | * Retention state. This happens in HW when the parent |
351 | * domain goes down to a Low power state |
352 | */ |
353 | if (sc->pwrsts == PWRSTS_RET_ON) |
354 | return 0; |
355 | |
356 | ret = gdsc_toggle_logic(sc, status: GDSC_OFF, wait: domain->synced_poweroff); |
357 | if (ret) |
358 | return ret; |
359 | |
360 | if (sc->flags & CLAMP_IO) |
361 | gdsc_assert_clamp_io(sc); |
362 | |
363 | return 0; |
364 | } |
365 | |
366 | static int gdsc_init(struct gdsc *sc) |
367 | { |
368 | u32 mask, val; |
369 | int on, ret; |
370 | |
371 | /* |
372 | * Disable HW trigger: collapse/restore occur based on registers writes. |
373 | * Disable SW override: Use hardware state-machine for sequencing. |
374 | * Configure wait time between states. |
375 | */ |
376 | mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | |
377 | EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; |
378 | |
379 | if (!sc->en_rest_wait_val) |
380 | sc->en_rest_wait_val = EN_REST_WAIT_VAL; |
381 | if (!sc->en_few_wait_val) |
382 | sc->en_few_wait_val = EN_FEW_WAIT_VAL; |
383 | if (!sc->clk_dis_wait_val) |
384 | sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; |
385 | |
386 | val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | |
387 | sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | |
388 | sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; |
389 | |
390 | ret = regmap_update_bits(map: sc->regmap, reg: sc->gdscr, mask, val); |
391 | if (ret) |
392 | return ret; |
393 | |
394 | /* Force gdsc ON if only ON state is supported */ |
395 | if (sc->pwrsts == PWRSTS_ON) { |
396 | ret = gdsc_toggle_logic(sc, status: GDSC_ON, wait: false); |
397 | if (ret) |
398 | return ret; |
399 | } |
400 | |
401 | on = gdsc_check_status(sc, status: GDSC_ON); |
402 | if (on < 0) |
403 | return on; |
404 | |
405 | if (on) { |
406 | /* The regulator must be on, sync the kernel state */ |
407 | if (sc->rsupply) { |
408 | ret = regulator_enable(regulator: sc->rsupply); |
409 | if (ret < 0) |
410 | return ret; |
411 | } |
412 | |
413 | /* |
414 | * Votable GDSCs can be ON due to Vote from other masters. |
415 | * If a Votable GDSC is ON, make sure we have a Vote. |
416 | */ |
417 | if (sc->flags & VOTABLE) { |
418 | ret = gdsc_update_collapse_bit(sc, val: false); |
419 | if (ret) |
420 | goto err_disable_supply; |
421 | } |
422 | |
423 | /* Turn on HW trigger mode if supported */ |
424 | if (sc->flags & HW_CTRL) { |
425 | ret = gdsc_hwctrl(sc, en: true); |
426 | if (ret < 0) |
427 | goto err_disable_supply; |
428 | } |
429 | |
430 | /* |
431 | * Make sure the retain bit is set if the GDSC is already on, |
432 | * otherwise we end up turning off the GDSC and destroying all |
433 | * the register contents that we thought we were saving. |
434 | */ |
435 | if (sc->flags & RETAIN_FF_ENABLE) |
436 | gdsc_retain_ff_on(sc); |
437 | } else if (sc->flags & ALWAYS_ON) { |
438 | /* If ALWAYS_ON GDSCs are not ON, turn them ON */ |
439 | gdsc_enable(domain: &sc->pd); |
440 | on = true; |
441 | } |
442 | |
443 | if (on || (sc->pwrsts & PWRSTS_RET)) |
444 | gdsc_force_mem_on(sc); |
445 | else |
446 | gdsc_clear_mem_on(sc); |
447 | |
448 | if (sc->flags & ALWAYS_ON) |
449 | sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; |
450 | if (!sc->pd.power_off) |
451 | sc->pd.power_off = gdsc_disable; |
452 | if (!sc->pd.power_on) |
453 | sc->pd.power_on = gdsc_enable; |
454 | |
455 | ret = pm_genpd_init(genpd: &sc->pd, NULL, is_off: !on); |
456 | if (ret) |
457 | goto err_disable_supply; |
458 | |
459 | return 0; |
460 | |
461 | err_disable_supply: |
462 | if (on && sc->rsupply) |
463 | regulator_disable(regulator: sc->rsupply); |
464 | |
465 | return ret; |
466 | } |
467 | |
468 | int gdsc_register(struct gdsc_desc *desc, |
469 | struct reset_controller_dev *rcdev, struct regmap *regmap) |
470 | { |
471 | int i, ret; |
472 | struct genpd_onecell_data *data; |
473 | struct device *dev = desc->dev; |
474 | struct gdsc **scs = desc->scs; |
475 | size_t num = desc->num; |
476 | |
477 | data = devm_kzalloc(dev, size: sizeof(*data), GFP_KERNEL); |
478 | if (!data) |
479 | return -ENOMEM; |
480 | |
481 | data->domains = devm_kcalloc(dev, n: num, size: sizeof(*data->domains), |
482 | GFP_KERNEL); |
483 | if (!data->domains) |
484 | return -ENOMEM; |
485 | |
486 | for (i = 0; i < num; i++) { |
487 | if (!scs[i] || !scs[i]->supply) |
488 | continue; |
489 | |
490 | scs[i]->rsupply = devm_regulator_get(dev, id: scs[i]->supply); |
491 | if (IS_ERR(ptr: scs[i]->rsupply)) |
492 | return PTR_ERR(ptr: scs[i]->rsupply); |
493 | } |
494 | |
495 | data->num_domains = num; |
496 | for (i = 0; i < num; i++) { |
497 | if (!scs[i]) |
498 | continue; |
499 | scs[i]->regmap = regmap; |
500 | scs[i]->rcdev = rcdev; |
501 | ret = gdsc_init(sc: scs[i]); |
502 | if (ret) |
503 | return ret; |
504 | data->domains[i] = &scs[i]->pd; |
505 | } |
506 | |
507 | /* Add subdomains */ |
508 | for (i = 0; i < num; i++) { |
509 | if (!scs[i]) |
510 | continue; |
511 | if (scs[i]->parent) |
512 | pm_genpd_add_subdomain(genpd: scs[i]->parent, subdomain: &scs[i]->pd); |
513 | else if (!IS_ERR_OR_NULL(ptr: dev->pm_domain)) |
514 | pm_genpd_add_subdomain(genpd: pd_to_genpd(pd: dev->pm_domain), subdomain: &scs[i]->pd); |
515 | } |
516 | |
517 | return of_genpd_add_provider_onecell(np: dev->of_node, data); |
518 | } |
519 | |
520 | void gdsc_unregister(struct gdsc_desc *desc) |
521 | { |
522 | int i; |
523 | struct device *dev = desc->dev; |
524 | struct gdsc **scs = desc->scs; |
525 | size_t num = desc->num; |
526 | |
527 | /* Remove subdomains */ |
528 | for (i = 0; i < num; i++) { |
529 | if (!scs[i]) |
530 | continue; |
531 | if (scs[i]->parent) |
532 | pm_genpd_remove_subdomain(genpd: scs[i]->parent, subdomain: &scs[i]->pd); |
533 | else if (!IS_ERR_OR_NULL(ptr: dev->pm_domain)) |
534 | pm_genpd_remove_subdomain(genpd: pd_to_genpd(pd: dev->pm_domain), subdomain: &scs[i]->pd); |
535 | } |
536 | of_genpd_del_provider(np: dev->of_node); |
537 | } |
538 | |
539 | /* |
540 | * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU |
541 | * running in the CX domain so the CPU doesn't need to know anything about the |
542 | * GX domain EXCEPT.... |
543 | * |
544 | * Hardware constraints dictate that the GX be powered down before the CX. If |
545 | * the GMU crashes it could leave the GX on. In order to successfully bring back |
546 | * the device the CPU needs to disable the GX headswitch. There being no sane |
547 | * way to reach in and touch that register from deep inside the GPU driver we |
548 | * need to set up the infrastructure to be able to ensure that the GPU can |
549 | * ensure that the GX is off during this super special case. We do this by |
550 | * defining a GX gdsc with a dummy enable function and a "default" disable |
551 | * function. |
552 | * |
553 | * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU |
554 | * driver. During power up, nothing will happen from the CPU (and the GMU will |
555 | * power up normally but during power down this will ensure that the GX domain |
556 | * is *really* off - this gives us a semi standard way of doing what we need. |
557 | */ |
558 | int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain) |
559 | { |
560 | /* Do nothing but give genpd the impression that we were successful */ |
561 | return 0; |
562 | } |
563 | EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable); |
564 | |