1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2018, The Linux Foundation. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/bitfield.h> |
7 | #include <linux/clk-provider.h> |
8 | #include <linux/cpufreq.h> |
9 | #include <linux/init.h> |
10 | #include <linux/interconnect.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> |
14 | #include <linux/of.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/pm_opp.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/units.h> |
20 | |
21 | #define LUT_MAX_ENTRIES 40U |
22 | #define LUT_SRC GENMASK(31, 30) |
23 | #define LUT_L_VAL GENMASK(7, 0) |
24 | #define LUT_CORE_COUNT GENMASK(18, 16) |
25 | #define LUT_VOLT GENMASK(11, 0) |
26 | #define CLK_HW_DIV 2 |
27 | #define LUT_TURBO_IND 1 |
28 | |
29 | #define GT_IRQ_STATUS BIT(2) |
30 | |
31 | #define MAX_FREQ_DOMAINS 4 |
32 | |
33 | struct qcom_cpufreq_soc_data { |
34 | u32 reg_enable; |
35 | u32 reg_domain_state; |
36 | u32 reg_dcvs_ctrl; |
37 | u32 reg_freq_lut; |
38 | u32 reg_volt_lut; |
39 | u32 reg_intr_clr; |
40 | u32 reg_current_vote; |
41 | u32 reg_perf_state; |
42 | u8 lut_row_size; |
43 | }; |
44 | |
45 | struct qcom_cpufreq_data { |
46 | void __iomem *base; |
47 | |
48 | /* |
49 | * Mutex to synchronize between de-init sequence and re-starting LMh |
50 | * polling/interrupts |
51 | */ |
52 | struct mutex throttle_lock; |
53 | int throttle_irq; |
54 | char irq_name[15]; |
55 | bool cancel_throttle; |
56 | struct delayed_work throttle_work; |
57 | struct cpufreq_policy *policy; |
58 | struct clk_hw cpu_clk; |
59 | |
60 | bool per_core_dcvs; |
61 | }; |
62 | |
63 | static struct { |
64 | struct qcom_cpufreq_data *data; |
65 | const struct qcom_cpufreq_soc_data *soc_data; |
66 | } qcom_cpufreq; |
67 | |
68 | static unsigned long cpu_hw_rate, xo_rate; |
69 | static bool icc_scaling_enabled; |
70 | |
71 | static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy, |
72 | unsigned long freq_khz) |
73 | { |
74 | unsigned long freq_hz = freq_khz * 1000; |
75 | struct dev_pm_opp *opp; |
76 | struct device *dev; |
77 | int ret; |
78 | |
79 | dev = get_cpu_device(cpu: policy->cpu); |
80 | if (!dev) |
81 | return -ENODEV; |
82 | |
83 | opp = dev_pm_opp_find_freq_exact(dev, freq: freq_hz, available: true); |
84 | if (IS_ERR(ptr: opp)) |
85 | return PTR_ERR(ptr: opp); |
86 | |
87 | ret = dev_pm_opp_set_opp(dev, opp); |
88 | dev_pm_opp_put(opp); |
89 | return ret; |
90 | } |
91 | |
92 | static int qcom_cpufreq_update_opp(struct device *cpu_dev, |
93 | unsigned long freq_khz, |
94 | unsigned long volt) |
95 | { |
96 | unsigned long freq_hz = freq_khz * 1000; |
97 | int ret; |
98 | |
99 | /* Skip voltage update if the opp table is not available */ |
100 | if (!icc_scaling_enabled) |
101 | return dev_pm_opp_add(dev: cpu_dev, freq: freq_hz, u_volt: volt); |
102 | |
103 | ret = dev_pm_opp_adjust_voltage(dev: cpu_dev, freq: freq_hz, u_volt: volt, u_volt_min: volt, u_volt_max: volt); |
104 | if (ret) { |
105 | dev_err(cpu_dev, "Voltage update failed freq=%ld\n" , freq_khz); |
106 | return ret; |
107 | } |
108 | |
109 | return dev_pm_opp_enable(dev: cpu_dev, freq: freq_hz); |
110 | } |
111 | |
112 | static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy, |
113 | unsigned int index) |
114 | { |
115 | struct qcom_cpufreq_data *data = policy->driver_data; |
116 | const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data; |
117 | unsigned long freq = policy->freq_table[index].frequency; |
118 | unsigned int i; |
119 | |
120 | writel_relaxed(index, data->base + soc_data->reg_perf_state); |
121 | |
122 | if (data->per_core_dcvs) |
123 | for (i = 1; i < cpumask_weight(srcp: policy->related_cpus); i++) |
124 | writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4); |
125 | |
126 | if (icc_scaling_enabled) |
127 | qcom_cpufreq_set_bw(policy, freq_khz: freq); |
128 | |
129 | return 0; |
130 | } |
131 | |
132 | static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) |
133 | { |
134 | unsigned int lval; |
135 | |
136 | if (qcom_cpufreq.soc_data->reg_current_vote) |
137 | lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_current_vote) & 0x3ff; |
138 | else |
139 | lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_domain_state) & 0xff; |
140 | |
141 | return lval * xo_rate; |
142 | } |
143 | |
144 | /* Get the frequency requested by the cpufreq core for the CPU */ |
145 | static unsigned int qcom_cpufreq_get_freq(unsigned int cpu) |
146 | { |
147 | struct qcom_cpufreq_data *data; |
148 | const struct qcom_cpufreq_soc_data *soc_data; |
149 | struct cpufreq_policy *policy; |
150 | unsigned int index; |
151 | |
152 | policy = cpufreq_cpu_get_raw(cpu); |
153 | if (!policy) |
154 | return 0; |
155 | |
156 | data = policy->driver_data; |
157 | soc_data = qcom_cpufreq.soc_data; |
158 | |
159 | index = readl_relaxed(data->base + soc_data->reg_perf_state); |
160 | index = min(index, LUT_MAX_ENTRIES - 1); |
161 | |
162 | return policy->freq_table[index].frequency; |
163 | } |
164 | |
165 | static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) |
166 | { |
167 | struct qcom_cpufreq_data *data; |
168 | struct cpufreq_policy *policy; |
169 | |
170 | policy = cpufreq_cpu_get_raw(cpu); |
171 | if (!policy) |
172 | return 0; |
173 | |
174 | data = policy->driver_data; |
175 | |
176 | if (data->throttle_irq >= 0) |
177 | return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ; |
178 | |
179 | return qcom_cpufreq_get_freq(cpu); |
180 | } |
181 | |
182 | static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, |
183 | unsigned int target_freq) |
184 | { |
185 | struct qcom_cpufreq_data *data = policy->driver_data; |
186 | const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data; |
187 | unsigned int index; |
188 | unsigned int i; |
189 | |
190 | index = policy->cached_resolved_idx; |
191 | writel_relaxed(index, data->base + soc_data->reg_perf_state); |
192 | |
193 | if (data->per_core_dcvs) |
194 | for (i = 1; i < cpumask_weight(srcp: policy->related_cpus); i++) |
195 | writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4); |
196 | |
197 | return policy->freq_table[index].frequency; |
198 | } |
199 | |
200 | static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, |
201 | struct cpufreq_policy *policy) |
202 | { |
203 | u32 data, src, lval, i, core_count, prev_freq = 0, freq; |
204 | u32 volt; |
205 | struct cpufreq_frequency_table *table; |
206 | struct dev_pm_opp *opp; |
207 | unsigned long rate; |
208 | int ret; |
209 | struct qcom_cpufreq_data *drv_data = policy->driver_data; |
210 | const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data; |
211 | |
212 | table = kcalloc(LUT_MAX_ENTRIES + 1, size: sizeof(*table), GFP_KERNEL); |
213 | if (!table) |
214 | return -ENOMEM; |
215 | |
216 | ret = dev_pm_opp_of_add_table(dev: cpu_dev); |
217 | if (!ret) { |
218 | /* Disable all opps and cross-validate against LUT later */ |
219 | icc_scaling_enabled = true; |
220 | for (rate = 0; ; rate++) { |
221 | opp = dev_pm_opp_find_freq_ceil(dev: cpu_dev, freq: &rate); |
222 | if (IS_ERR(ptr: opp)) |
223 | break; |
224 | |
225 | dev_pm_opp_put(opp); |
226 | dev_pm_opp_disable(dev: cpu_dev, freq: rate); |
227 | } |
228 | } else if (ret != -ENODEV) { |
229 | dev_err(cpu_dev, "Invalid opp table in device tree\n" ); |
230 | kfree(objp: table); |
231 | return ret; |
232 | } else { |
233 | policy->fast_switch_possible = true; |
234 | icc_scaling_enabled = false; |
235 | } |
236 | |
237 | for (i = 0; i < LUT_MAX_ENTRIES; i++) { |
238 | data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut + |
239 | i * soc_data->lut_row_size); |
240 | src = FIELD_GET(LUT_SRC, data); |
241 | lval = FIELD_GET(LUT_L_VAL, data); |
242 | core_count = FIELD_GET(LUT_CORE_COUNT, data); |
243 | |
244 | data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut + |
245 | i * soc_data->lut_row_size); |
246 | volt = FIELD_GET(LUT_VOLT, data) * 1000; |
247 | |
248 | if (src) |
249 | freq = xo_rate * lval / 1000; |
250 | else |
251 | freq = cpu_hw_rate / 1000; |
252 | |
253 | if (freq != prev_freq && core_count != LUT_TURBO_IND) { |
254 | if (!qcom_cpufreq_update_opp(cpu_dev, freq_khz: freq, volt)) { |
255 | table[i].frequency = freq; |
256 | dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n" , i, |
257 | freq, core_count); |
258 | } else { |
259 | dev_warn(cpu_dev, "failed to update OPP for freq=%d\n" , freq); |
260 | table[i].frequency = CPUFREQ_ENTRY_INVALID; |
261 | } |
262 | |
263 | } else if (core_count == LUT_TURBO_IND) { |
264 | table[i].frequency = CPUFREQ_ENTRY_INVALID; |
265 | } |
266 | |
267 | /* |
268 | * Two of the same frequencies with the same core counts means |
269 | * end of table |
270 | */ |
271 | if (i > 0 && prev_freq == freq) { |
272 | struct cpufreq_frequency_table *prev = &table[i - 1]; |
273 | |
274 | /* |
275 | * Only treat the last frequency that might be a boost |
276 | * as the boost frequency |
277 | */ |
278 | if (prev->frequency == CPUFREQ_ENTRY_INVALID) { |
279 | if (!qcom_cpufreq_update_opp(cpu_dev, freq_khz: prev_freq, volt)) { |
280 | prev->frequency = prev_freq; |
281 | prev->flags = CPUFREQ_BOOST_FREQ; |
282 | } else { |
283 | dev_warn(cpu_dev, "failed to update OPP for freq=%d\n" , |
284 | freq); |
285 | } |
286 | } |
287 | |
288 | break; |
289 | } |
290 | |
291 | prev_freq = freq; |
292 | } |
293 | |
294 | table[i].frequency = CPUFREQ_TABLE_END; |
295 | policy->freq_table = table; |
296 | dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask: policy->cpus); |
297 | |
298 | return 0; |
299 | } |
300 | |
301 | static void qcom_get_related_cpus(int index, struct cpumask *m) |
302 | { |
303 | struct device_node *cpu_np; |
304 | struct of_phandle_args args; |
305 | int cpu, ret; |
306 | |
307 | for_each_possible_cpu(cpu) { |
308 | cpu_np = of_cpu_device_node_get(cpu); |
309 | if (!cpu_np) |
310 | continue; |
311 | |
312 | ret = of_parse_phandle_with_args(np: cpu_np, list_name: "qcom,freq-domain" , |
313 | cells_name: "#freq-domain-cells" , index: 0, |
314 | out_args: &args); |
315 | of_node_put(node: cpu_np); |
316 | if (ret < 0) |
317 | continue; |
318 | |
319 | if (index == args.args[0]) |
320 | cpumask_set_cpu(cpu, dstp: m); |
321 | } |
322 | } |
323 | |
324 | static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) |
325 | { |
326 | struct cpufreq_policy *policy = data->policy; |
327 | int cpu = cpumask_first(srcp: policy->related_cpus); |
328 | struct device *dev = get_cpu_device(cpu); |
329 | unsigned long freq_hz, throttled_freq; |
330 | struct dev_pm_opp *opp; |
331 | |
332 | /* |
333 | * Get the h/w throttled frequency, normalize it using the |
334 | * registered opp table and use it to calculate thermal pressure. |
335 | */ |
336 | freq_hz = qcom_lmh_get_throttle_freq(data); |
337 | |
338 | opp = dev_pm_opp_find_freq_floor(dev, freq: &freq_hz); |
339 | if (IS_ERR(ptr: opp) && PTR_ERR(ptr: opp) == -ERANGE) |
340 | opp = dev_pm_opp_find_freq_ceil(dev, freq: &freq_hz); |
341 | |
342 | if (IS_ERR(ptr: opp)) { |
343 | dev_warn(dev, "Can't find the OPP for throttling: %pe!\n" , opp); |
344 | } else { |
345 | dev_pm_opp_put(opp); |
346 | } |
347 | |
348 | throttled_freq = freq_hz / HZ_PER_KHZ; |
349 | |
350 | /* Update thermal pressure (the boost frequencies are accepted) */ |
351 | arch_update_thermal_pressure(cpus: policy->related_cpus, capped_frequency: throttled_freq); |
352 | |
353 | /* |
354 | * In the unlikely case policy is unregistered do not enable |
355 | * polling or h/w interrupt |
356 | */ |
357 | mutex_lock(&data->throttle_lock); |
358 | if (data->cancel_throttle) |
359 | goto out; |
360 | |
361 | /* |
362 | * If h/w throttled frequency is higher than what cpufreq has requested |
363 | * for, then stop polling and switch back to interrupt mechanism. |
364 | */ |
365 | if (throttled_freq >= qcom_cpufreq_get_freq(cpu)) |
366 | enable_irq(irq: data->throttle_irq); |
367 | else |
368 | mod_delayed_work(wq: system_highpri_wq, dwork: &data->throttle_work, |
369 | delay: msecs_to_jiffies(m: 10)); |
370 | |
371 | out: |
372 | mutex_unlock(lock: &data->throttle_lock); |
373 | } |
374 | |
375 | static void qcom_lmh_dcvs_poll(struct work_struct *work) |
376 | { |
377 | struct qcom_cpufreq_data *data; |
378 | |
379 | data = container_of(work, struct qcom_cpufreq_data, throttle_work.work); |
380 | qcom_lmh_dcvs_notify(data); |
381 | } |
382 | |
383 | static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) |
384 | { |
385 | struct qcom_cpufreq_data *c_data = data; |
386 | |
387 | /* Disable interrupt and enable polling */ |
388 | disable_irq_nosync(irq: c_data->throttle_irq); |
389 | schedule_delayed_work(dwork: &c_data->throttle_work, delay: 0); |
390 | |
391 | if (qcom_cpufreq.soc_data->reg_intr_clr) |
392 | writel_relaxed(GT_IRQ_STATUS, |
393 | c_data->base + qcom_cpufreq.soc_data->reg_intr_clr); |
394 | |
395 | return IRQ_HANDLED; |
396 | } |
397 | |
398 | static const struct qcom_cpufreq_soc_data qcom_soc_data = { |
399 | .reg_enable = 0x0, |
400 | .reg_dcvs_ctrl = 0xbc, |
401 | .reg_freq_lut = 0x110, |
402 | .reg_volt_lut = 0x114, |
403 | .reg_current_vote = 0x704, |
404 | .reg_perf_state = 0x920, |
405 | .lut_row_size = 32, |
406 | }; |
407 | |
408 | static const struct qcom_cpufreq_soc_data epss_soc_data = { |
409 | .reg_enable = 0x0, |
410 | .reg_domain_state = 0x20, |
411 | .reg_dcvs_ctrl = 0xb0, |
412 | .reg_freq_lut = 0x100, |
413 | .reg_volt_lut = 0x200, |
414 | .reg_intr_clr = 0x308, |
415 | .reg_perf_state = 0x320, |
416 | .lut_row_size = 4, |
417 | }; |
418 | |
419 | static const struct of_device_id qcom_cpufreq_hw_match[] = { |
420 | { .compatible = "qcom,cpufreq-hw" , .data = &qcom_soc_data }, |
421 | { .compatible = "qcom,cpufreq-epss" , .data = &epss_soc_data }, |
422 | {} |
423 | }; |
424 | MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match); |
425 | |
426 | static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) |
427 | { |
428 | struct qcom_cpufreq_data *data = policy->driver_data; |
429 | struct platform_device *pdev = cpufreq_get_driver_data(); |
430 | int ret; |
431 | |
432 | /* |
433 | * Look for LMh interrupt. If no interrupt line is specified / |
434 | * if there is an error, allow cpufreq to be enabled as usual. |
435 | */ |
436 | data->throttle_irq = platform_get_irq_optional(pdev, index); |
437 | if (data->throttle_irq == -ENXIO) |
438 | return 0; |
439 | if (data->throttle_irq < 0) |
440 | return data->throttle_irq; |
441 | |
442 | data->cancel_throttle = false; |
443 | data->policy = policy; |
444 | |
445 | mutex_init(&data->throttle_lock); |
446 | INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll); |
447 | |
448 | snprintf(buf: data->irq_name, size: sizeof(data->irq_name), fmt: "dcvsh-irq-%u" , policy->cpu); |
449 | ret = request_threaded_irq(irq: data->throttle_irq, NULL, thread_fn: qcom_lmh_dcvs_handle_irq, |
450 | IRQF_ONESHOT | IRQF_NO_AUTOEN, name: data->irq_name, dev: data); |
451 | if (ret) { |
452 | dev_err(&pdev->dev, "Error registering %s: %d\n" , data->irq_name, ret); |
453 | return 0; |
454 | } |
455 | |
456 | ret = irq_set_affinity_and_hint(irq: data->throttle_irq, m: policy->cpus); |
457 | if (ret) |
458 | dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n" , |
459 | data->irq_name, data->throttle_irq); |
460 | |
461 | return 0; |
462 | } |
463 | |
464 | static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy) |
465 | { |
466 | struct qcom_cpufreq_data *data = policy->driver_data; |
467 | struct platform_device *pdev = cpufreq_get_driver_data(); |
468 | int ret; |
469 | |
470 | if (data->throttle_irq <= 0) |
471 | return 0; |
472 | |
473 | mutex_lock(&data->throttle_lock); |
474 | data->cancel_throttle = false; |
475 | mutex_unlock(lock: &data->throttle_lock); |
476 | |
477 | ret = irq_set_affinity_and_hint(irq: data->throttle_irq, m: policy->cpus); |
478 | if (ret) |
479 | dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n" , |
480 | data->irq_name, data->throttle_irq); |
481 | |
482 | return ret; |
483 | } |
484 | |
485 | static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy) |
486 | { |
487 | struct qcom_cpufreq_data *data = policy->driver_data; |
488 | |
489 | if (data->throttle_irq <= 0) |
490 | return 0; |
491 | |
492 | mutex_lock(&data->throttle_lock); |
493 | data->cancel_throttle = true; |
494 | mutex_unlock(lock: &data->throttle_lock); |
495 | |
496 | cancel_delayed_work_sync(dwork: &data->throttle_work); |
497 | irq_set_affinity_and_hint(irq: data->throttle_irq, NULL); |
498 | disable_irq_nosync(irq: data->throttle_irq); |
499 | |
500 | return 0; |
501 | } |
502 | |
503 | static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) |
504 | { |
505 | if (data->throttle_irq <= 0) |
506 | return; |
507 | |
508 | free_irq(data->throttle_irq, data); |
509 | } |
510 | |
511 | static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) |
512 | { |
513 | struct platform_device *pdev = cpufreq_get_driver_data(); |
514 | struct device *dev = &pdev->dev; |
515 | struct of_phandle_args args; |
516 | struct device_node *cpu_np; |
517 | struct device *cpu_dev; |
518 | struct qcom_cpufreq_data *data; |
519 | int ret, index; |
520 | |
521 | cpu_dev = get_cpu_device(cpu: policy->cpu); |
522 | if (!cpu_dev) { |
523 | pr_err("%s: failed to get cpu%d device\n" , __func__, |
524 | policy->cpu); |
525 | return -ENODEV; |
526 | } |
527 | |
528 | cpu_np = of_cpu_device_node_get(cpu: policy->cpu); |
529 | if (!cpu_np) |
530 | return -EINVAL; |
531 | |
532 | ret = of_parse_phandle_with_args(np: cpu_np, list_name: "qcom,freq-domain" , |
533 | cells_name: "#freq-domain-cells" , index: 0, out_args: &args); |
534 | of_node_put(node: cpu_np); |
535 | if (ret) |
536 | return ret; |
537 | |
538 | index = args.args[0]; |
539 | data = &qcom_cpufreq.data[index]; |
540 | |
541 | /* HW should be in enabled state to proceed */ |
542 | if (!(readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_enable) & 0x1)) { |
543 | dev_err(dev, "Domain-%d cpufreq hardware not enabled\n" , index); |
544 | return -ENODEV; |
545 | } |
546 | |
547 | if (readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_dcvs_ctrl) & 0x1) |
548 | data->per_core_dcvs = true; |
549 | |
550 | qcom_get_related_cpus(index, m: policy->cpus); |
551 | |
552 | policy->driver_data = data; |
553 | policy->dvfs_possible_from_any_cpu = true; |
554 | |
555 | ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy); |
556 | if (ret) { |
557 | dev_err(dev, "Domain-%d failed to read LUT\n" , index); |
558 | return ret; |
559 | } |
560 | |
561 | ret = dev_pm_opp_get_opp_count(dev: cpu_dev); |
562 | if (ret <= 0) { |
563 | dev_err(cpu_dev, "Failed to add OPPs\n" ); |
564 | return -ENODEV; |
565 | } |
566 | |
567 | if (policy_has_boost_freq(policy)) { |
568 | ret = cpufreq_enable_boost_support(); |
569 | if (ret) |
570 | dev_warn(cpu_dev, "failed to enable boost: %d\n" , ret); |
571 | } |
572 | |
573 | return qcom_cpufreq_hw_lmh_init(policy, index); |
574 | } |
575 | |
576 | static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) |
577 | { |
578 | struct device *cpu_dev = get_cpu_device(cpu: policy->cpu); |
579 | struct qcom_cpufreq_data *data = policy->driver_data; |
580 | |
581 | dev_pm_opp_remove_all_dynamic(dev: cpu_dev); |
582 | dev_pm_opp_of_cpumask_remove_table(cpumask: policy->related_cpus); |
583 | qcom_cpufreq_hw_lmh_exit(data); |
584 | kfree(objp: policy->freq_table); |
585 | kfree(objp: data); |
586 | |
587 | return 0; |
588 | } |
589 | |
590 | static void qcom_cpufreq_ready(struct cpufreq_policy *policy) |
591 | { |
592 | struct qcom_cpufreq_data *data = policy->driver_data; |
593 | |
594 | if (data->throttle_irq >= 0) |
595 | enable_irq(irq: data->throttle_irq); |
596 | } |
597 | |
598 | static struct freq_attr *qcom_cpufreq_hw_attr[] = { |
599 | &cpufreq_freq_attr_scaling_available_freqs, |
600 | &cpufreq_freq_attr_scaling_boost_freqs, |
601 | NULL |
602 | }; |
603 | |
604 | static struct cpufreq_driver cpufreq_qcom_hw_driver = { |
605 | .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
606 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
607 | CPUFREQ_IS_COOLING_DEV, |
608 | .verify = cpufreq_generic_frequency_table_verify, |
609 | .target_index = qcom_cpufreq_hw_target_index, |
610 | .get = qcom_cpufreq_hw_get, |
611 | .init = qcom_cpufreq_hw_cpu_init, |
612 | .exit = qcom_cpufreq_hw_cpu_exit, |
613 | .online = qcom_cpufreq_hw_cpu_online, |
614 | .offline = qcom_cpufreq_hw_cpu_offline, |
615 | .register_em = cpufreq_register_em_with_opp, |
616 | .fast_switch = qcom_cpufreq_hw_fast_switch, |
617 | .name = "qcom-cpufreq-hw" , |
618 | .attr = qcom_cpufreq_hw_attr, |
619 | .ready = qcom_cpufreq_ready, |
620 | }; |
621 | |
622 | static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) |
623 | { |
624 | struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk); |
625 | |
626 | return qcom_lmh_get_throttle_freq(data); |
627 | } |
628 | |
629 | static const struct clk_ops qcom_cpufreq_hw_clk_ops = { |
630 | .recalc_rate = qcom_cpufreq_hw_recalc_rate, |
631 | }; |
632 | |
633 | static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) |
634 | { |
635 | struct clk_hw_onecell_data *clk_data; |
636 | struct device *dev = &pdev->dev; |
637 | struct device *cpu_dev; |
638 | struct clk *clk; |
639 | int ret, i, num_domains; |
640 | |
641 | clk = clk_get(dev, id: "xo" ); |
642 | if (IS_ERR(ptr: clk)) |
643 | return PTR_ERR(ptr: clk); |
644 | |
645 | xo_rate = clk_get_rate(clk); |
646 | clk_put(clk); |
647 | |
648 | clk = clk_get(dev, id: "alternate" ); |
649 | if (IS_ERR(ptr: clk)) |
650 | return PTR_ERR(ptr: clk); |
651 | |
652 | cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV; |
653 | clk_put(clk); |
654 | |
655 | cpufreq_qcom_hw_driver.driver_data = pdev; |
656 | |
657 | /* Check for optional interconnect paths on CPU0 */ |
658 | cpu_dev = get_cpu_device(cpu: 0); |
659 | if (!cpu_dev) |
660 | return -EPROBE_DEFER; |
661 | |
662 | ret = dev_pm_opp_of_find_icc_paths(dev: cpu_dev, NULL); |
663 | if (ret) |
664 | return dev_err_probe(dev, err: ret, fmt: "Failed to find icc paths\n" ); |
665 | |
666 | for (num_domains = 0; num_domains < MAX_FREQ_DOMAINS; num_domains++) |
667 | if (!platform_get_resource(pdev, IORESOURCE_MEM, num_domains)) |
668 | break; |
669 | |
670 | qcom_cpufreq.data = devm_kzalloc(dev, size: sizeof(struct qcom_cpufreq_data) * num_domains, |
671 | GFP_KERNEL); |
672 | if (!qcom_cpufreq.data) |
673 | return -ENOMEM; |
674 | |
675 | qcom_cpufreq.soc_data = of_device_get_match_data(dev); |
676 | if (!qcom_cpufreq.soc_data) |
677 | return -ENODEV; |
678 | |
679 | clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL); |
680 | if (!clk_data) |
681 | return -ENOMEM; |
682 | |
683 | clk_data->num = num_domains; |
684 | |
685 | for (i = 0; i < num_domains; i++) { |
686 | struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i]; |
687 | struct clk_init_data clk_init = {}; |
688 | void __iomem *base; |
689 | |
690 | base = devm_platform_ioremap_resource(pdev, index: i); |
691 | if (IS_ERR(ptr: base)) { |
692 | dev_err(dev, "Failed to map resource index %d\n" , i); |
693 | return PTR_ERR(ptr: base); |
694 | } |
695 | |
696 | data->base = base; |
697 | |
698 | /* Register CPU clock for each frequency domain */ |
699 | clk_init.name = kasprintf(GFP_KERNEL, fmt: "qcom_cpufreq%d" , i); |
700 | if (!clk_init.name) |
701 | return -ENOMEM; |
702 | |
703 | clk_init.flags = CLK_GET_RATE_NOCACHE; |
704 | clk_init.ops = &qcom_cpufreq_hw_clk_ops; |
705 | data->cpu_clk.init = &clk_init; |
706 | |
707 | ret = devm_clk_hw_register(dev, hw: &data->cpu_clk); |
708 | if (ret < 0) { |
709 | dev_err(dev, "Failed to register clock %d: %d\n" , i, ret); |
710 | kfree(objp: clk_init.name); |
711 | return ret; |
712 | } |
713 | |
714 | clk_data->hws[i] = &data->cpu_clk; |
715 | kfree(objp: clk_init.name); |
716 | } |
717 | |
718 | ret = devm_of_clk_add_hw_provider(dev, get: of_clk_hw_onecell_get, data: clk_data); |
719 | if (ret < 0) { |
720 | dev_err(dev, "Failed to add clock provider\n" ); |
721 | return ret; |
722 | } |
723 | |
724 | ret = cpufreq_register_driver(driver_data: &cpufreq_qcom_hw_driver); |
725 | if (ret) |
726 | dev_err(dev, "CPUFreq HW driver failed to register\n" ); |
727 | else |
728 | dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n" ); |
729 | |
730 | return ret; |
731 | } |
732 | |
733 | static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev) |
734 | { |
735 | cpufreq_unregister_driver(driver_data: &cpufreq_qcom_hw_driver); |
736 | } |
737 | |
738 | static struct platform_driver qcom_cpufreq_hw_driver = { |
739 | .probe = qcom_cpufreq_hw_driver_probe, |
740 | .remove_new = qcom_cpufreq_hw_driver_remove, |
741 | .driver = { |
742 | .name = "qcom-cpufreq-hw" , |
743 | .of_match_table = qcom_cpufreq_hw_match, |
744 | }, |
745 | }; |
746 | |
747 | static int __init qcom_cpufreq_hw_init(void) |
748 | { |
749 | return platform_driver_register(&qcom_cpufreq_hw_driver); |
750 | } |
751 | postcore_initcall(qcom_cpufreq_hw_init); |
752 | |
753 | static void __exit qcom_cpufreq_hw_exit(void) |
754 | { |
755 | platform_driver_unregister(&qcom_cpufreq_hw_driver); |
756 | } |
757 | module_exit(qcom_cpufreq_hw_exit); |
758 | |
759 | MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver" ); |
760 | MODULE_LICENSE("GPL v2" ); |
761 | |