1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. |
4 | * |
5 | * (C) Copyright 2014, 2015 Linaro Ltd. |
6 | * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> |
7 | * |
8 | * CPPC describes a few methods for controlling CPU performance using |
9 | * information from a per CPU table called CPC. This table is described in |
10 | * the ACPI v5.0+ specification. The table consists of a list of |
11 | * registers which may be memory mapped or hardware registers and also may |
12 | * include some static integer values. |
13 | * |
14 | * CPU performance is on an abstract continuous scale as against a discretized |
15 | * P-state scale which is tied to CPU frequency only. In brief, the basic |
16 | * operation involves: |
17 | * |
18 | * - OS makes a CPU performance request. (Can provide min and max bounds) |
19 | * |
20 | * - Platform (such as BMC) is free to optimize request within requested bounds |
21 | * depending on power/thermal budgets etc. |
22 | * |
23 | * - Platform conveys its decision back to OS |
24 | * |
25 | * The communication between OS and platform occurs through another medium |
26 | * called (PCC) Platform Communication Channel. This is a generic mailbox like |
27 | * mechanism which includes doorbell semantics to indicate register updates. |
28 | * See drivers/mailbox/pcc.c for details on PCC. |
29 | * |
30 | * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and |
31 | * above specifications. |
32 | */ |
33 | |
34 | #define pr_fmt(fmt) "ACPI CPPC: " fmt |
35 | |
36 | #include <linux/delay.h> |
37 | #include <linux/iopoll.h> |
38 | #include <linux/ktime.h> |
39 | #include <linux/rwsem.h> |
40 | #include <linux/wait.h> |
41 | #include <linux/topology.h> |
42 | |
43 | #include <acpi/cppc_acpi.h> |
44 | |
45 | struct cppc_pcc_data { |
46 | struct pcc_mbox_chan *pcc_channel; |
47 | void __iomem *pcc_comm_addr; |
48 | bool pcc_channel_acquired; |
49 | unsigned int deadline_us; |
50 | unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; |
51 | |
52 | bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ |
53 | bool platform_owns_pcc; /* Ownership of PCC subspace */ |
54 | unsigned int pcc_write_cnt; /* Running count of PCC write commands */ |
55 | |
56 | /* |
57 | * Lock to provide controlled access to the PCC channel. |
58 | * |
59 | * For performance critical usecases(currently cppc_set_perf) |
60 | * We need to take read_lock and check if channel belongs to OSPM |
61 | * before reading or writing to PCC subspace |
62 | * We need to take write_lock before transferring the channel |
63 | * ownership to the platform via a Doorbell |
64 | * This allows us to batch a number of CPPC requests if they happen |
65 | * to originate in about the same time |
66 | * |
67 | * For non-performance critical usecases(init) |
68 | * Take write_lock for all purposes which gives exclusive access |
69 | */ |
70 | struct rw_semaphore pcc_lock; |
71 | |
72 | /* Wait queue for CPUs whose requests were batched */ |
73 | wait_queue_head_t pcc_write_wait_q; |
74 | ktime_t last_cmd_cmpl_time; |
75 | ktime_t last_mpar_reset; |
76 | int mpar_count; |
77 | int refcount; |
78 | }; |
79 | |
80 | /* Array to represent the PCC channel per subspace ID */ |
81 | static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; |
82 | /* The cpu_pcc_subspace_idx contains per CPU subspace ID */ |
83 | static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); |
84 | |
85 | /* |
86 | * The cpc_desc structure contains the ACPI register details |
87 | * as described in the per CPU _CPC tables. The details |
88 | * include the type of register (e.g. PCC, System IO, FFH etc.) |
89 | * and destination addresses which lets us READ/WRITE CPU performance |
90 | * information using the appropriate I/O methods. |
91 | */ |
92 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); |
93 | |
94 | /* pcc mapped address + header size + offset within PCC subspace */ |
95 | #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \ |
96 | 0x8 + (offs)) |
97 | |
98 | /* Check if a CPC register is in PCC */ |
99 | #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ |
100 | (cpc)->cpc_entry.reg.space_id == \ |
101 | ACPI_ADR_SPACE_PLATFORM_COMM) |
102 | |
103 | /* Check if a CPC register is in SystemMemory */ |
104 | #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ |
105 | (cpc)->cpc_entry.reg.space_id == \ |
106 | ACPI_ADR_SPACE_SYSTEM_MEMORY) |
107 | |
108 | /* Check if a CPC register is in SystemIo */ |
109 | #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ |
110 | (cpc)->cpc_entry.reg.space_id == \ |
111 | ACPI_ADR_SPACE_SYSTEM_IO) |
112 | |
113 | /* Evaluates to True if reg is a NULL register descriptor */ |
114 | #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ |
115 | (reg)->address == 0 && \ |
116 | (reg)->bit_width == 0 && \ |
117 | (reg)->bit_offset == 0 && \ |
118 | (reg)->access_width == 0) |
119 | |
120 | /* Evaluates to True if an optional cpc field is supported */ |
121 | #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ |
122 | !!(cpc)->cpc_entry.int_value : \ |
123 | !IS_NULL_REG(&(cpc)->cpc_entry.reg)) |
124 | /* |
125 | * Arbitrary Retries in case the remote processor is slow to respond |
126 | * to PCC commands. Keeping it high enough to cover emulators where |
127 | * the processors run painfully slow. |
128 | */ |
129 | #define NUM_RETRIES 500ULL |
130 | |
131 | #define OVER_16BTS_MASK ~0xFFFFULL |
132 | |
133 | #define define_one_cppc_ro(_name) \ |
134 | static struct kobj_attribute _name = \ |
135 | __ATTR(_name, 0444, show_##_name, NULL) |
136 | |
137 | #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) |
138 | |
139 | #define show_cppc_data(access_fn, struct_name, member_name) \ |
140 | static ssize_t show_##member_name(struct kobject *kobj, \ |
141 | struct kobj_attribute *attr, char *buf) \ |
142 | { \ |
143 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ |
144 | struct struct_name st_name = {0}; \ |
145 | int ret; \ |
146 | \ |
147 | ret = access_fn(cpc_ptr->cpu_id, &st_name); \ |
148 | if (ret) \ |
149 | return ret; \ |
150 | \ |
151 | return sysfs_emit(buf, "%llu\n", \ |
152 | (u64)st_name.member_name); \ |
153 | } \ |
154 | define_one_cppc_ro(member_name) |
155 | |
156 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); |
157 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); |
158 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); |
159 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); |
160 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq); |
161 | show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); |
162 | |
163 | show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); |
164 | show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); |
165 | |
166 | static ssize_t show_feedback_ctrs(struct kobject *kobj, |
167 | struct kobj_attribute *attr, char *buf) |
168 | { |
169 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); |
170 | struct cppc_perf_fb_ctrs fb_ctrs = {0}; |
171 | int ret; |
172 | |
173 | ret = cppc_get_perf_ctrs(cpu: cpc_ptr->cpu_id, perf_fb_ctrs: &fb_ctrs); |
174 | if (ret) |
175 | return ret; |
176 | |
177 | return sysfs_emit(buf, fmt: "ref:%llu del:%llu\n" , |
178 | fb_ctrs.reference, fb_ctrs.delivered); |
179 | } |
180 | define_one_cppc_ro(feedback_ctrs); |
181 | |
182 | static struct attribute *cppc_attrs[] = { |
183 | &feedback_ctrs.attr, |
184 | &reference_perf.attr, |
185 | &wraparound_time.attr, |
186 | &highest_perf.attr, |
187 | &lowest_perf.attr, |
188 | &lowest_nonlinear_perf.attr, |
189 | &nominal_perf.attr, |
190 | &nominal_freq.attr, |
191 | &lowest_freq.attr, |
192 | NULL |
193 | }; |
194 | ATTRIBUTE_GROUPS(cppc); |
195 | |
196 | static const struct kobj_type cppc_ktype = { |
197 | .sysfs_ops = &kobj_sysfs_ops, |
198 | .default_groups = cppc_groups, |
199 | }; |
200 | |
201 | static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) |
202 | { |
203 | int ret, status; |
204 | struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; |
205 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = |
206 | pcc_ss_data->pcc_comm_addr; |
207 | |
208 | if (!pcc_ss_data->platform_owns_pcc) |
209 | return 0; |
210 | |
211 | /* |
212 | * Poll PCC status register every 3us(delay_us) for maximum of |
213 | * deadline_us(timeout_us) until PCC command complete bit is set(cond) |
214 | */ |
215 | ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status, |
216 | status & PCC_CMD_COMPLETE_MASK, 3, |
217 | pcc_ss_data->deadline_us); |
218 | |
219 | if (likely(!ret)) { |
220 | pcc_ss_data->platform_owns_pcc = false; |
221 | if (chk_err_bit && (status & PCC_ERROR_MASK)) |
222 | ret = -EIO; |
223 | } |
224 | |
225 | if (unlikely(ret)) |
226 | pr_err("PCC check channel failed for ss: %d. ret=%d\n" , |
227 | pcc_ss_id, ret); |
228 | |
229 | return ret; |
230 | } |
231 | |
232 | /* |
233 | * This function transfers the ownership of the PCC to the platform |
234 | * So it must be called while holding write_lock(pcc_lock) |
235 | */ |
236 | static int send_pcc_cmd(int pcc_ss_id, u16 cmd) |
237 | { |
238 | int ret = -EIO, i; |
239 | struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; |
240 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = |
241 | pcc_ss_data->pcc_comm_addr; |
242 | unsigned int time_delta; |
243 | |
244 | /* |
245 | * For CMD_WRITE we know for a fact the caller should have checked |
246 | * the channel before writing to PCC space |
247 | */ |
248 | if (cmd == CMD_READ) { |
249 | /* |
250 | * If there are pending cpc_writes, then we stole the channel |
251 | * before write completion, so first send a WRITE command to |
252 | * platform |
253 | */ |
254 | if (pcc_ss_data->pending_pcc_write_cmd) |
255 | send_pcc_cmd(pcc_ss_id, CMD_WRITE); |
256 | |
257 | ret = check_pcc_chan(pcc_ss_id, chk_err_bit: false); |
258 | if (ret) |
259 | goto end; |
260 | } else /* CMD_WRITE */ |
261 | pcc_ss_data->pending_pcc_write_cmd = FALSE; |
262 | |
263 | /* |
264 | * Handle the Minimum Request Turnaround Time(MRTT) |
265 | * "The minimum amount of time that OSPM must wait after the completion |
266 | * of a command before issuing the next command, in microseconds" |
267 | */ |
268 | if (pcc_ss_data->pcc_mrtt) { |
269 | time_delta = ktime_us_delta(later: ktime_get(), |
270 | earlier: pcc_ss_data->last_cmd_cmpl_time); |
271 | if (pcc_ss_data->pcc_mrtt > time_delta) |
272 | udelay(pcc_ss_data->pcc_mrtt - time_delta); |
273 | } |
274 | |
275 | /* |
276 | * Handle the non-zero Maximum Periodic Access Rate(MPAR) |
277 | * "The maximum number of periodic requests that the subspace channel can |
278 | * support, reported in commands per minute. 0 indicates no limitation." |
279 | * |
280 | * This parameter should be ideally zero or large enough so that it can |
281 | * handle maximum number of requests that all the cores in the system can |
282 | * collectively generate. If it is not, we will follow the spec and just |
283 | * not send the request to the platform after hitting the MPAR limit in |
284 | * any 60s window |
285 | */ |
286 | if (pcc_ss_data->pcc_mpar) { |
287 | if (pcc_ss_data->mpar_count == 0) { |
288 | time_delta = ktime_ms_delta(later: ktime_get(), |
289 | earlier: pcc_ss_data->last_mpar_reset); |
290 | if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { |
291 | pr_debug("PCC cmd for subspace %d not sent due to MPAR limit" , |
292 | pcc_ss_id); |
293 | ret = -EIO; |
294 | goto end; |
295 | } |
296 | pcc_ss_data->last_mpar_reset = ktime_get(); |
297 | pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; |
298 | } |
299 | pcc_ss_data->mpar_count--; |
300 | } |
301 | |
302 | /* Write to the shared comm region. */ |
303 | writew_relaxed(cmd, &generic_comm_base->command); |
304 | |
305 | /* Flip CMD COMPLETE bit */ |
306 | writew_relaxed(0, &generic_comm_base->status); |
307 | |
308 | pcc_ss_data->platform_owns_pcc = true; |
309 | |
310 | /* Ring doorbell */ |
311 | ret = mbox_send_message(chan: pcc_ss_data->pcc_channel->mchan, mssg: &cmd); |
312 | if (ret < 0) { |
313 | pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n" , |
314 | pcc_ss_id, cmd, ret); |
315 | goto end; |
316 | } |
317 | |
318 | /* wait for completion and check for PCC error bit */ |
319 | ret = check_pcc_chan(pcc_ss_id, chk_err_bit: true); |
320 | |
321 | if (pcc_ss_data->pcc_mrtt) |
322 | pcc_ss_data->last_cmd_cmpl_time = ktime_get(); |
323 | |
324 | if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq) |
325 | mbox_chan_txdone(chan: pcc_ss_data->pcc_channel->mchan, r: ret); |
326 | else |
327 | mbox_client_txdone(chan: pcc_ss_data->pcc_channel->mchan, r: ret); |
328 | |
329 | end: |
330 | if (cmd == CMD_WRITE) { |
331 | if (unlikely(ret)) { |
332 | for_each_possible_cpu(i) { |
333 | struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); |
334 | |
335 | if (!desc) |
336 | continue; |
337 | |
338 | if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) |
339 | desc->write_cmd_status = ret; |
340 | } |
341 | } |
342 | pcc_ss_data->pcc_write_cnt++; |
343 | wake_up_all(&pcc_ss_data->pcc_write_wait_q); |
344 | } |
345 | |
346 | return ret; |
347 | } |
348 | |
349 | static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) |
350 | { |
351 | if (ret < 0) |
352 | pr_debug("TX did not complete: CMD sent:%x, ret:%d\n" , |
353 | *(u16 *)msg, ret); |
354 | else |
355 | pr_debug("TX completed. CMD sent:%x, ret:%d\n" , |
356 | *(u16 *)msg, ret); |
357 | } |
358 | |
359 | static struct mbox_client cppc_mbox_cl = { |
360 | .tx_done = cppc_chan_tx_done, |
361 | .knows_txdone = true, |
362 | }; |
363 | |
364 | static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) |
365 | { |
366 | int result = -EFAULT; |
367 | acpi_status status = AE_OK; |
368 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
369 | struct acpi_buffer format = {sizeof("NNNNN" ), "NNNNN" }; |
370 | struct acpi_buffer state = {0, NULL}; |
371 | union acpi_object *psd = NULL; |
372 | struct acpi_psd_package *pdomain; |
373 | |
374 | status = acpi_evaluate_object_typed(object: handle, pathname: "_PSD" , NULL, |
375 | return_buffer: &buffer, ACPI_TYPE_PACKAGE); |
376 | if (status == AE_NOT_FOUND) /* _PSD is optional */ |
377 | return 0; |
378 | if (ACPI_FAILURE(status)) |
379 | return -ENODEV; |
380 | |
381 | psd = buffer.pointer; |
382 | if (!psd || psd->package.count != 1) { |
383 | pr_debug("Invalid _PSD data\n" ); |
384 | goto end; |
385 | } |
386 | |
387 | pdomain = &(cpc_ptr->domain_info); |
388 | |
389 | state.length = sizeof(struct acpi_psd_package); |
390 | state.pointer = pdomain; |
391 | |
392 | status = acpi_extract_package(package: &(psd->package.elements[0]), |
393 | format: &format, buffer: &state); |
394 | if (ACPI_FAILURE(status)) { |
395 | pr_debug("Invalid _PSD data for CPU:%d\n" , cpc_ptr->cpu_id); |
396 | goto end; |
397 | } |
398 | |
399 | if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { |
400 | pr_debug("Unknown _PSD:num_entries for CPU:%d\n" , cpc_ptr->cpu_id); |
401 | goto end; |
402 | } |
403 | |
404 | if (pdomain->revision != ACPI_PSD_REV0_REVISION) { |
405 | pr_debug("Unknown _PSD:revision for CPU: %d\n" , cpc_ptr->cpu_id); |
406 | goto end; |
407 | } |
408 | |
409 | if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && |
410 | pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && |
411 | pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { |
412 | pr_debug("Invalid _PSD:coord_type for CPU:%d\n" , cpc_ptr->cpu_id); |
413 | goto end; |
414 | } |
415 | |
416 | result = 0; |
417 | end: |
418 | kfree(objp: buffer.pointer); |
419 | return result; |
420 | } |
421 | |
422 | bool acpi_cpc_valid(void) |
423 | { |
424 | struct cpc_desc *cpc_ptr; |
425 | int cpu; |
426 | |
427 | if (acpi_disabled) |
428 | return false; |
429 | |
430 | for_each_present_cpu(cpu) { |
431 | cpc_ptr = per_cpu(cpc_desc_ptr, cpu); |
432 | if (!cpc_ptr) |
433 | return false; |
434 | } |
435 | |
436 | return true; |
437 | } |
438 | EXPORT_SYMBOL_GPL(acpi_cpc_valid); |
439 | |
440 | bool cppc_allow_fast_switch(void) |
441 | { |
442 | struct cpc_register_resource *desired_reg; |
443 | struct cpc_desc *cpc_ptr; |
444 | int cpu; |
445 | |
446 | for_each_possible_cpu(cpu) { |
447 | cpc_ptr = per_cpu(cpc_desc_ptr, cpu); |
448 | desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF]; |
449 | if (!CPC_IN_SYSTEM_MEMORY(desired_reg) && |
450 | !CPC_IN_SYSTEM_IO(desired_reg)) |
451 | return false; |
452 | } |
453 | |
454 | return true; |
455 | } |
456 | EXPORT_SYMBOL_GPL(cppc_allow_fast_switch); |
457 | |
458 | /** |
459 | * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu |
460 | * @cpu: Find all CPUs that share a domain with cpu. |
461 | * @cpu_data: Pointer to CPU specific CPPC data including PSD info. |
462 | * |
463 | * Return: 0 for success or negative value for err. |
464 | */ |
465 | int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data) |
466 | { |
467 | struct cpc_desc *cpc_ptr, *match_cpc_ptr; |
468 | struct acpi_psd_package *match_pdomain; |
469 | struct acpi_psd_package *pdomain; |
470 | int count_target, i; |
471 | |
472 | /* |
473 | * Now that we have _PSD data from all CPUs, let's setup P-state |
474 | * domain info. |
475 | */ |
476 | cpc_ptr = per_cpu(cpc_desc_ptr, cpu); |
477 | if (!cpc_ptr) |
478 | return -EFAULT; |
479 | |
480 | pdomain = &(cpc_ptr->domain_info); |
481 | cpumask_set_cpu(cpu, dstp: cpu_data->shared_cpu_map); |
482 | if (pdomain->num_processors <= 1) |
483 | return 0; |
484 | |
485 | /* Validate the Domain info */ |
486 | count_target = pdomain->num_processors; |
487 | if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) |
488 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
489 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) |
490 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW; |
491 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) |
492 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY; |
493 | |
494 | for_each_possible_cpu(i) { |
495 | if (i == cpu) |
496 | continue; |
497 | |
498 | match_cpc_ptr = per_cpu(cpc_desc_ptr, i); |
499 | if (!match_cpc_ptr) |
500 | goto err_fault; |
501 | |
502 | match_pdomain = &(match_cpc_ptr->domain_info); |
503 | if (match_pdomain->domain != pdomain->domain) |
504 | continue; |
505 | |
506 | /* Here i and cpu are in the same domain */ |
507 | if (match_pdomain->num_processors != count_target) |
508 | goto err_fault; |
509 | |
510 | if (pdomain->coord_type != match_pdomain->coord_type) |
511 | goto err_fault; |
512 | |
513 | cpumask_set_cpu(cpu: i, dstp: cpu_data->shared_cpu_map); |
514 | } |
515 | |
516 | return 0; |
517 | |
518 | err_fault: |
519 | /* Assume no coordination on any error parsing domain info */ |
520 | cpumask_clear(dstp: cpu_data->shared_cpu_map); |
521 | cpumask_set_cpu(cpu, dstp: cpu_data->shared_cpu_map); |
522 | cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE; |
523 | |
524 | return -EFAULT; |
525 | } |
526 | EXPORT_SYMBOL_GPL(acpi_get_psd_map); |
527 | |
528 | static int register_pcc_channel(int pcc_ss_idx) |
529 | { |
530 | struct pcc_mbox_chan *pcc_chan; |
531 | u64 usecs_lat; |
532 | |
533 | if (pcc_ss_idx >= 0) { |
534 | pcc_chan = pcc_mbox_request_channel(cl: &cppc_mbox_cl, subspace_id: pcc_ss_idx); |
535 | |
536 | if (IS_ERR(ptr: pcc_chan)) { |
537 | pr_err("Failed to find PCC channel for subspace %d\n" , |
538 | pcc_ss_idx); |
539 | return -ENODEV; |
540 | } |
541 | |
542 | pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan; |
543 | /* |
544 | * cppc_ss->latency is just a Nominal value. In reality |
545 | * the remote processor could be much slower to reply. |
546 | * So add an arbitrary amount of wait on top of Nominal. |
547 | */ |
548 | usecs_lat = NUM_RETRIES * pcc_chan->latency; |
549 | pcc_data[pcc_ss_idx]->deadline_us = usecs_lat; |
550 | pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time; |
551 | pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate; |
552 | pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency; |
553 | |
554 | pcc_data[pcc_ss_idx]->pcc_comm_addr = |
555 | acpi_os_ioremap(phys: pcc_chan->shmem_base_addr, |
556 | size: pcc_chan->shmem_size); |
557 | if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { |
558 | pr_err("Failed to ioremap PCC comm region mem for %d\n" , |
559 | pcc_ss_idx); |
560 | return -ENOMEM; |
561 | } |
562 | |
563 | /* Set flag so that we don't come here for each CPU. */ |
564 | pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; |
565 | } |
566 | |
567 | return 0; |
568 | } |
569 | |
570 | /** |
571 | * cpc_ffh_supported() - check if FFH reading supported |
572 | * |
573 | * Check if the architecture has support for functional fixed hardware |
574 | * read/write capability. |
575 | * |
576 | * Return: true for supported, false for not supported |
577 | */ |
578 | bool __weak cpc_ffh_supported(void) |
579 | { |
580 | return false; |
581 | } |
582 | |
583 | /** |
584 | * cpc_supported_by_cpu() - check if CPPC is supported by CPU |
585 | * |
586 | * Check if the architectural support for CPPC is present even |
587 | * if the _OSC hasn't prescribed it |
588 | * |
589 | * Return: true for supported, false for not supported |
590 | */ |
591 | bool __weak cpc_supported_by_cpu(void) |
592 | { |
593 | return false; |
594 | } |
595 | |
596 | /** |
597 | * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace |
598 | * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package. |
599 | * |
600 | * Check and allocate the cppc_pcc_data memory. |
601 | * In some processor configurations it is possible that same subspace |
602 | * is shared between multiple CPUs. This is seen especially in CPUs |
603 | * with hardware multi-threading support. |
604 | * |
605 | * Return: 0 for success, errno for failure |
606 | */ |
607 | static int pcc_data_alloc(int pcc_ss_id) |
608 | { |
609 | if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) |
610 | return -EINVAL; |
611 | |
612 | if (pcc_data[pcc_ss_id]) { |
613 | pcc_data[pcc_ss_id]->refcount++; |
614 | } else { |
615 | pcc_data[pcc_ss_id] = kzalloc(size: sizeof(struct cppc_pcc_data), |
616 | GFP_KERNEL); |
617 | if (!pcc_data[pcc_ss_id]) |
618 | return -ENOMEM; |
619 | pcc_data[pcc_ss_id]->refcount++; |
620 | } |
621 | |
622 | return 0; |
623 | } |
624 | |
625 | /* |
626 | * An example CPC table looks like the following. |
627 | * |
628 | * Name (_CPC, Package() { |
629 | * 17, // NumEntries |
630 | * 1, // Revision |
631 | * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance |
632 | * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance |
633 | * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance |
634 | * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance |
635 | * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register |
636 | * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register |
637 | * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)}, |
638 | * ... |
639 | * ... |
640 | * ... |
641 | * } |
642 | * Each Register() encodes how to access that specific register. |
643 | * e.g. a sample PCC entry has the following encoding: |
644 | * |
645 | * Register ( |
646 | * PCC, // AddressSpaceKeyword |
647 | * 8, // RegisterBitWidth |
648 | * 8, // RegisterBitOffset |
649 | * 0x30, // RegisterAddress |
650 | * 9, // AccessSize (subspace ID) |
651 | * ) |
652 | */ |
653 | |
654 | #ifndef arch_init_invariance_cppc |
655 | static inline void arch_init_invariance_cppc(void) { } |
656 | #endif |
657 | |
658 | /** |
659 | * acpi_cppc_processor_probe - Search for per CPU _CPC objects. |
660 | * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
661 | * |
662 | * Return: 0 for success or negative value for err. |
663 | */ |
664 | int acpi_cppc_processor_probe(struct acpi_processor *pr) |
665 | { |
666 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; |
667 | union acpi_object *out_obj, *cpc_obj; |
668 | struct cpc_desc *cpc_ptr; |
669 | struct cpc_reg *gas_t; |
670 | struct device *cpu_dev; |
671 | acpi_handle handle = pr->handle; |
672 | unsigned int num_ent, i, cpc_rev; |
673 | int pcc_subspace_id = -1; |
674 | acpi_status status; |
675 | int ret = -ENODATA; |
676 | |
677 | if (!osc_sb_cppc2_support_acked) { |
678 | pr_debug("CPPC v2 _OSC not acked\n" ); |
679 | if (!cpc_supported_by_cpu()) |
680 | return -ENODEV; |
681 | } |
682 | |
683 | /* Parse the ACPI _CPC table for this CPU. */ |
684 | status = acpi_evaluate_object_typed(object: handle, pathname: "_CPC" , NULL, return_buffer: &output, |
685 | ACPI_TYPE_PACKAGE); |
686 | if (ACPI_FAILURE(status)) { |
687 | ret = -ENODEV; |
688 | goto out_buf_free; |
689 | } |
690 | |
691 | out_obj = (union acpi_object *) output.pointer; |
692 | |
693 | cpc_ptr = kzalloc(size: sizeof(struct cpc_desc), GFP_KERNEL); |
694 | if (!cpc_ptr) { |
695 | ret = -ENOMEM; |
696 | goto out_buf_free; |
697 | } |
698 | |
699 | /* First entry is NumEntries. */ |
700 | cpc_obj = &out_obj->package.elements[0]; |
701 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { |
702 | num_ent = cpc_obj->integer.value; |
703 | if (num_ent <= 1) { |
704 | pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n" , |
705 | num_ent, pr->id); |
706 | goto out_free; |
707 | } |
708 | } else { |
709 | pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n" , |
710 | cpc_obj->type, pr->id); |
711 | goto out_free; |
712 | } |
713 | |
714 | /* Second entry should be revision. */ |
715 | cpc_obj = &out_obj->package.elements[1]; |
716 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { |
717 | cpc_rev = cpc_obj->integer.value; |
718 | } else { |
719 | pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n" , |
720 | cpc_obj->type, pr->id); |
721 | goto out_free; |
722 | } |
723 | |
724 | if (cpc_rev < CPPC_V2_REV) { |
725 | pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n" , cpc_rev, |
726 | pr->id); |
727 | goto out_free; |
728 | } |
729 | |
730 | /* |
731 | * Disregard _CPC if the number of entries in the return pachage is not |
732 | * as expected, but support future revisions being proper supersets of |
733 | * the v3 and only causing more entries to be returned by _CPC. |
734 | */ |
735 | if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || |
736 | (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || |
737 | (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { |
738 | pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n" , |
739 | num_ent, pr->id); |
740 | goto out_free; |
741 | } |
742 | if (cpc_rev > CPPC_V3_REV) { |
743 | num_ent = CPPC_V3_NUM_ENT; |
744 | cpc_rev = CPPC_V3_REV; |
745 | } |
746 | |
747 | cpc_ptr->num_entries = num_ent; |
748 | cpc_ptr->version = cpc_rev; |
749 | |
750 | /* Iterate through remaining entries in _CPC */ |
751 | for (i = 2; i < num_ent; i++) { |
752 | cpc_obj = &out_obj->package.elements[i]; |
753 | |
754 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { |
755 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; |
756 | cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; |
757 | } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { |
758 | gas_t = (struct cpc_reg *) |
759 | cpc_obj->buffer.pointer; |
760 | |
761 | /* |
762 | * The PCC Subspace index is encoded inside |
763 | * the CPC table entries. The same PCC index |
764 | * will be used for all the PCC entries, |
765 | * so extract it only once. |
766 | */ |
767 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { |
768 | if (pcc_subspace_id < 0) { |
769 | pcc_subspace_id = gas_t->access_width; |
770 | if (pcc_data_alloc(pcc_ss_id: pcc_subspace_id)) |
771 | goto out_free; |
772 | } else if (pcc_subspace_id != gas_t->access_width) { |
773 | pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n" , |
774 | pr->id); |
775 | goto out_free; |
776 | } |
777 | } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
778 | if (gas_t->address) { |
779 | void __iomem *addr; |
780 | |
781 | if (!osc_cpc_flexible_adr_space_confirmed) { |
782 | pr_debug("Flexible address space capability not supported\n" ); |
783 | if (!cpc_supported_by_cpu()) |
784 | goto out_free; |
785 | } |
786 | |
787 | addr = ioremap(offset: gas_t->address, size: gas_t->bit_width/8); |
788 | if (!addr) |
789 | goto out_free; |
790 | cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; |
791 | } |
792 | } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
793 | if (gas_t->access_width < 1 || gas_t->access_width > 3) { |
794 | /* |
795 | * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit. |
796 | * SystemIO doesn't implement 64-bit |
797 | * registers. |
798 | */ |
799 | pr_debug("Invalid access width %d for SystemIO register in _CPC\n" , |
800 | gas_t->access_width); |
801 | goto out_free; |
802 | } |
803 | if (gas_t->address & OVER_16BTS_MASK) { |
804 | /* SystemIO registers use 16-bit integer addresses */ |
805 | pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n" , |
806 | gas_t->address); |
807 | goto out_free; |
808 | } |
809 | if (!osc_cpc_flexible_adr_space_confirmed) { |
810 | pr_debug("Flexible address space capability not supported\n" ); |
811 | if (!cpc_supported_by_cpu()) |
812 | goto out_free; |
813 | } |
814 | } else { |
815 | if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { |
816 | /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */ |
817 | pr_debug("Unsupported register type (%d) in _CPC\n" , |
818 | gas_t->space_id); |
819 | goto out_free; |
820 | } |
821 | } |
822 | |
823 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; |
824 | memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); |
825 | } else { |
826 | pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n" , |
827 | i, pr->id); |
828 | goto out_free; |
829 | } |
830 | } |
831 | per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; |
832 | |
833 | /* |
834 | * Initialize the remaining cpc_regs as unsupported. |
835 | * Example: In case FW exposes CPPC v2, the below loop will initialize |
836 | * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported |
837 | */ |
838 | for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) { |
839 | cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER; |
840 | cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0; |
841 | } |
842 | |
843 | |
844 | /* Store CPU Logical ID */ |
845 | cpc_ptr->cpu_id = pr->id; |
846 | |
847 | /* Parse PSD data for this CPU */ |
848 | ret = acpi_get_psd(cpc_ptr, handle); |
849 | if (ret) |
850 | goto out_free; |
851 | |
852 | /* Register PCC channel once for all PCC subspace ID. */ |
853 | if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { |
854 | ret = register_pcc_channel(pcc_ss_idx: pcc_subspace_id); |
855 | if (ret) |
856 | goto out_free; |
857 | |
858 | init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock); |
859 | init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q); |
860 | } |
861 | |
862 | /* Everything looks okay */ |
863 | pr_debug("Parsed CPC struct for CPU: %d\n" , pr->id); |
864 | |
865 | /* Add per logical CPU nodes for reading its feedback counters. */ |
866 | cpu_dev = get_cpu_device(cpu: pr->id); |
867 | if (!cpu_dev) { |
868 | ret = -EINVAL; |
869 | goto out_free; |
870 | } |
871 | |
872 | /* Plug PSD data into this CPU's CPC descriptor. */ |
873 | per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; |
874 | |
875 | ret = kobject_init_and_add(kobj: &cpc_ptr->kobj, ktype: &cppc_ktype, parent: &cpu_dev->kobj, |
876 | fmt: "acpi_cppc" ); |
877 | if (ret) { |
878 | per_cpu(cpc_desc_ptr, pr->id) = NULL; |
879 | kobject_put(kobj: &cpc_ptr->kobj); |
880 | goto out_free; |
881 | } |
882 | |
883 | arch_init_invariance_cppc(); |
884 | |
885 | kfree(objp: output.pointer); |
886 | return 0; |
887 | |
888 | out_free: |
889 | /* Free all the mapped sys mem areas for this CPU */ |
890 | for (i = 2; i < cpc_ptr->num_entries; i++) { |
891 | void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; |
892 | |
893 | if (addr) |
894 | iounmap(addr); |
895 | } |
896 | kfree(objp: cpc_ptr); |
897 | |
898 | out_buf_free: |
899 | kfree(objp: output.pointer); |
900 | return ret; |
901 | } |
902 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); |
903 | |
904 | /** |
905 | * acpi_cppc_processor_exit - Cleanup CPC structs. |
906 | * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
907 | * |
908 | * Return: Void |
909 | */ |
910 | void acpi_cppc_processor_exit(struct acpi_processor *pr) |
911 | { |
912 | struct cpc_desc *cpc_ptr; |
913 | unsigned int i; |
914 | void __iomem *addr; |
915 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); |
916 | |
917 | if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) { |
918 | if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { |
919 | pcc_data[pcc_ss_id]->refcount--; |
920 | if (!pcc_data[pcc_ss_id]->refcount) { |
921 | pcc_mbox_free_channel(chan: pcc_data[pcc_ss_id]->pcc_channel); |
922 | kfree(objp: pcc_data[pcc_ss_id]); |
923 | pcc_data[pcc_ss_id] = NULL; |
924 | } |
925 | } |
926 | } |
927 | |
928 | cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); |
929 | if (!cpc_ptr) |
930 | return; |
931 | |
932 | /* Free all the mapped sys mem areas for this CPU */ |
933 | for (i = 2; i < cpc_ptr->num_entries; i++) { |
934 | addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; |
935 | if (addr) |
936 | iounmap(addr); |
937 | } |
938 | |
939 | kobject_put(kobj: &cpc_ptr->kobj); |
940 | kfree(objp: cpc_ptr); |
941 | } |
942 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); |
943 | |
944 | /** |
945 | * cpc_read_ffh() - Read FFH register |
946 | * @cpunum: CPU number to read |
947 | * @reg: cppc register information |
948 | * @val: place holder for return value |
949 | * |
950 | * Read bit_width bits from a specified address and bit_offset |
951 | * |
952 | * Return: 0 for success and error code |
953 | */ |
954 | int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) |
955 | { |
956 | return -ENOTSUPP; |
957 | } |
958 | |
959 | /** |
960 | * cpc_write_ffh() - Write FFH register |
961 | * @cpunum: CPU number to write |
962 | * @reg: cppc register information |
963 | * @val: value to write |
964 | * |
965 | * Write value of bit_width bits to a specified address and bit_offset |
966 | * |
967 | * Return: 0 for success and error code |
968 | */ |
969 | int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) |
970 | { |
971 | return -ENOTSUPP; |
972 | } |
973 | |
974 | /* |
975 | * Since cpc_read and cpc_write are called while holding pcc_lock, it should be |
976 | * as fast as possible. We have already mapped the PCC subspace during init, so |
977 | * we can directly write to it. |
978 | */ |
979 | |
980 | static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) |
981 | { |
982 | void __iomem *vaddr = NULL; |
983 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
984 | struct cpc_reg *reg = ®_res->cpc_entry.reg; |
985 | |
986 | if (reg_res->type == ACPI_TYPE_INTEGER) { |
987 | *val = reg_res->cpc_entry.int_value; |
988 | return 0; |
989 | } |
990 | |
991 | *val = 0; |
992 | |
993 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
994 | u32 width = 8 << (reg->access_width - 1); |
995 | u32 val_u32; |
996 | acpi_status status; |
997 | |
998 | status = acpi_os_read_port(address: (acpi_io_address)reg->address, |
999 | value: &val_u32, width); |
1000 | if (ACPI_FAILURE(status)) { |
1001 | pr_debug("Error: Failed to read SystemIO port %llx\n" , |
1002 | reg->address); |
1003 | return -EFAULT; |
1004 | } |
1005 | |
1006 | *val = val_u32; |
1007 | return 0; |
1008 | } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) |
1009 | vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); |
1010 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
1011 | vaddr = reg_res->sys_mem_vaddr; |
1012 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) |
1013 | return cpc_read_ffh(cpunum: cpu, reg, val); |
1014 | else |
1015 | return acpi_os_read_memory(address: (acpi_physical_address)reg->address, |
1016 | value: val, width: reg->bit_width); |
1017 | |
1018 | switch (reg->bit_width) { |
1019 | case 8: |
1020 | *val = readb_relaxed(vaddr); |
1021 | break; |
1022 | case 16: |
1023 | *val = readw_relaxed(vaddr); |
1024 | break; |
1025 | case 32: |
1026 | *val = readl_relaxed(vaddr); |
1027 | break; |
1028 | case 64: |
1029 | *val = readq_relaxed(vaddr); |
1030 | break; |
1031 | default: |
1032 | pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n" , |
1033 | reg->bit_width, pcc_ss_id); |
1034 | return -EFAULT; |
1035 | } |
1036 | |
1037 | return 0; |
1038 | } |
1039 | |
1040 | static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) |
1041 | { |
1042 | int ret_val = 0; |
1043 | void __iomem *vaddr = NULL; |
1044 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
1045 | struct cpc_reg *reg = ®_res->cpc_entry.reg; |
1046 | |
1047 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
1048 | u32 width = 8 << (reg->access_width - 1); |
1049 | acpi_status status; |
1050 | |
1051 | status = acpi_os_write_port(address: (acpi_io_address)reg->address, |
1052 | value: (u32)val, width); |
1053 | if (ACPI_FAILURE(status)) { |
1054 | pr_debug("Error: Failed to write SystemIO port %llx\n" , |
1055 | reg->address); |
1056 | return -EFAULT; |
1057 | } |
1058 | |
1059 | return 0; |
1060 | } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) |
1061 | vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); |
1062 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
1063 | vaddr = reg_res->sys_mem_vaddr; |
1064 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) |
1065 | return cpc_write_ffh(cpunum: cpu, reg, val); |
1066 | else |
1067 | return acpi_os_write_memory(address: (acpi_physical_address)reg->address, |
1068 | value: val, width: reg->bit_width); |
1069 | |
1070 | switch (reg->bit_width) { |
1071 | case 8: |
1072 | writeb_relaxed(val, vaddr); |
1073 | break; |
1074 | case 16: |
1075 | writew_relaxed(val, vaddr); |
1076 | break; |
1077 | case 32: |
1078 | writel_relaxed(val, vaddr); |
1079 | break; |
1080 | case 64: |
1081 | writeq_relaxed(val, vaddr); |
1082 | break; |
1083 | default: |
1084 | pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n" , |
1085 | reg->bit_width, pcc_ss_id); |
1086 | ret_val = -EFAULT; |
1087 | break; |
1088 | } |
1089 | |
1090 | return ret_val; |
1091 | } |
1092 | |
1093 | static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf) |
1094 | { |
1095 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
1096 | struct cpc_register_resource *reg; |
1097 | |
1098 | if (!cpc_desc) { |
1099 | pr_debug("No CPC descriptor for CPU:%d\n" , cpunum); |
1100 | return -ENODEV; |
1101 | } |
1102 | |
1103 | reg = &cpc_desc->cpc_regs[reg_idx]; |
1104 | |
1105 | if (CPC_IN_PCC(reg)) { |
1106 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
1107 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1108 | int ret = 0; |
1109 | |
1110 | if (pcc_ss_id < 0) |
1111 | return -EIO; |
1112 | |
1113 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1114 | |
1115 | down_write(sem: &pcc_ss_data->pcc_lock); |
1116 | |
1117 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) |
1118 | cpc_read(cpu: cpunum, reg_res: reg, val: perf); |
1119 | else |
1120 | ret = -EIO; |
1121 | |
1122 | up_write(sem: &pcc_ss_data->pcc_lock); |
1123 | |
1124 | return ret; |
1125 | } |
1126 | |
1127 | cpc_read(cpu: cpunum, reg_res: reg, val: perf); |
1128 | |
1129 | return 0; |
1130 | } |
1131 | |
1132 | /** |
1133 | * cppc_get_desired_perf - Get the desired performance register value. |
1134 | * @cpunum: CPU from which to get desired performance. |
1135 | * @desired_perf: Return address. |
1136 | * |
1137 | * Return: 0 for success, -EIO otherwise. |
1138 | */ |
1139 | int cppc_get_desired_perf(int cpunum, u64 *desired_perf) |
1140 | { |
1141 | return cppc_get_perf(cpunum, reg_idx: DESIRED_PERF, perf: desired_perf); |
1142 | } |
1143 | EXPORT_SYMBOL_GPL(cppc_get_desired_perf); |
1144 | |
1145 | /** |
1146 | * cppc_get_nominal_perf - Get the nominal performance register value. |
1147 | * @cpunum: CPU from which to get nominal performance. |
1148 | * @nominal_perf: Return address. |
1149 | * |
1150 | * Return: 0 for success, -EIO otherwise. |
1151 | */ |
1152 | int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) |
1153 | { |
1154 | return cppc_get_perf(cpunum, reg_idx: NOMINAL_PERF, perf: nominal_perf); |
1155 | } |
1156 | |
1157 | /** |
1158 | * cppc_get_epp_perf - Get the epp register value. |
1159 | * @cpunum: CPU from which to get epp preference value. |
1160 | * @epp_perf: Return address. |
1161 | * |
1162 | * Return: 0 for success, -EIO otherwise. |
1163 | */ |
1164 | int cppc_get_epp_perf(int cpunum, u64 *epp_perf) |
1165 | { |
1166 | return cppc_get_perf(cpunum, reg_idx: ENERGY_PERF, perf: epp_perf); |
1167 | } |
1168 | EXPORT_SYMBOL_GPL(cppc_get_epp_perf); |
1169 | |
1170 | /** |
1171 | * cppc_get_perf_caps - Get a CPU's performance capabilities. |
1172 | * @cpunum: CPU from which to get capabilities info. |
1173 | * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h |
1174 | * |
1175 | * Return: 0 for success with perf_caps populated else -ERRNO. |
1176 | */ |
1177 | int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) |
1178 | { |
1179 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
1180 | struct cpc_register_resource *highest_reg, *lowest_reg, |
1181 | *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg, |
1182 | *low_freq_reg = NULL, *nom_freq_reg = NULL; |
1183 | u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0; |
1184 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
1185 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1186 | int ret = 0, regs_in_pcc = 0; |
1187 | |
1188 | if (!cpc_desc) { |
1189 | pr_debug("No CPC descriptor for CPU:%d\n" , cpunum); |
1190 | return -ENODEV; |
1191 | } |
1192 | |
1193 | highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; |
1194 | lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; |
1195 | lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; |
1196 | nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; |
1197 | low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; |
1198 | nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; |
1199 | guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; |
1200 | |
1201 | /* Are any of the regs PCC ?*/ |
1202 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || |
1203 | CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) || |
1204 | CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) { |
1205 | if (pcc_ss_id < 0) { |
1206 | pr_debug("Invalid pcc_ss_id\n" ); |
1207 | return -ENODEV; |
1208 | } |
1209 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1210 | regs_in_pcc = 1; |
1211 | down_write(sem: &pcc_ss_data->pcc_lock); |
1212 | /* Ring doorbell once to update PCC subspace */ |
1213 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { |
1214 | ret = -EIO; |
1215 | goto out_err; |
1216 | } |
1217 | } |
1218 | |
1219 | cpc_read(cpu: cpunum, reg_res: highest_reg, val: &high); |
1220 | perf_caps->highest_perf = high; |
1221 | |
1222 | cpc_read(cpu: cpunum, reg_res: lowest_reg, val: &low); |
1223 | perf_caps->lowest_perf = low; |
1224 | |
1225 | cpc_read(cpu: cpunum, reg_res: nominal_reg, val: &nom); |
1226 | perf_caps->nominal_perf = nom; |
1227 | |
1228 | if (guaranteed_reg->type != ACPI_TYPE_BUFFER || |
1229 | IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { |
1230 | perf_caps->guaranteed_perf = 0; |
1231 | } else { |
1232 | cpc_read(cpu: cpunum, reg_res: guaranteed_reg, val: &guaranteed); |
1233 | perf_caps->guaranteed_perf = guaranteed; |
1234 | } |
1235 | |
1236 | cpc_read(cpu: cpunum, reg_res: lowest_non_linear_reg, val: &min_nonlinear); |
1237 | perf_caps->lowest_nonlinear_perf = min_nonlinear; |
1238 | |
1239 | if (!high || !low || !nom || !min_nonlinear) |
1240 | ret = -EFAULT; |
1241 | |
1242 | /* Read optional lowest and nominal frequencies if present */ |
1243 | if (CPC_SUPPORTED(low_freq_reg)) |
1244 | cpc_read(cpu: cpunum, reg_res: low_freq_reg, val: &low_f); |
1245 | |
1246 | if (CPC_SUPPORTED(nom_freq_reg)) |
1247 | cpc_read(cpu: cpunum, reg_res: nom_freq_reg, val: &nom_f); |
1248 | |
1249 | perf_caps->lowest_freq = low_f; |
1250 | perf_caps->nominal_freq = nom_f; |
1251 | |
1252 | |
1253 | out_err: |
1254 | if (regs_in_pcc) |
1255 | up_write(sem: &pcc_ss_data->pcc_lock); |
1256 | return ret; |
1257 | } |
1258 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); |
1259 | |
1260 | /** |
1261 | * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region. |
1262 | * |
1263 | * CPPC has flexibility about how CPU performance counters are accessed. |
1264 | * One of the choices is PCC regions, which can have a high access latency. This |
1265 | * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time. |
1266 | * |
1267 | * Return: true if any of the counters are in PCC regions, false otherwise |
1268 | */ |
1269 | bool cppc_perf_ctrs_in_pcc(void) |
1270 | { |
1271 | int cpu; |
1272 | |
1273 | for_each_present_cpu(cpu) { |
1274 | struct cpc_register_resource *ref_perf_reg; |
1275 | struct cpc_desc *cpc_desc; |
1276 | |
1277 | cpc_desc = per_cpu(cpc_desc_ptr, cpu); |
1278 | |
1279 | if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) || |
1280 | CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) || |
1281 | CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME])) |
1282 | return true; |
1283 | |
1284 | |
1285 | ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; |
1286 | |
1287 | /* |
1288 | * If reference perf register is not supported then we should |
1289 | * use the nominal perf value |
1290 | */ |
1291 | if (!CPC_SUPPORTED(ref_perf_reg)) |
1292 | ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; |
1293 | |
1294 | if (CPC_IN_PCC(ref_perf_reg)) |
1295 | return true; |
1296 | } |
1297 | |
1298 | return false; |
1299 | } |
1300 | EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc); |
1301 | |
1302 | /** |
1303 | * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. |
1304 | * @cpunum: CPU from which to read counters. |
1305 | * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h |
1306 | * |
1307 | * Return: 0 for success with perf_fb_ctrs populated else -ERRNO. |
1308 | */ |
1309 | int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) |
1310 | { |
1311 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
1312 | struct cpc_register_resource *delivered_reg, *reference_reg, |
1313 | *ref_perf_reg, *ctr_wrap_reg; |
1314 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
1315 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1316 | u64 delivered, reference, ref_perf, ctr_wrap_time; |
1317 | int ret = 0, regs_in_pcc = 0; |
1318 | |
1319 | if (!cpc_desc) { |
1320 | pr_debug("No CPC descriptor for CPU:%d\n" , cpunum); |
1321 | return -ENODEV; |
1322 | } |
1323 | |
1324 | delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; |
1325 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; |
1326 | ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; |
1327 | ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; |
1328 | |
1329 | /* |
1330 | * If reference perf register is not supported then we should |
1331 | * use the nominal perf value |
1332 | */ |
1333 | if (!CPC_SUPPORTED(ref_perf_reg)) |
1334 | ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; |
1335 | |
1336 | /* Are any of the regs PCC ?*/ |
1337 | if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || |
1338 | CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { |
1339 | if (pcc_ss_id < 0) { |
1340 | pr_debug("Invalid pcc_ss_id\n" ); |
1341 | return -ENODEV; |
1342 | } |
1343 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1344 | down_write(sem: &pcc_ss_data->pcc_lock); |
1345 | regs_in_pcc = 1; |
1346 | /* Ring doorbell once to update PCC subspace */ |
1347 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { |
1348 | ret = -EIO; |
1349 | goto out_err; |
1350 | } |
1351 | } |
1352 | |
1353 | cpc_read(cpu: cpunum, reg_res: delivered_reg, val: &delivered); |
1354 | cpc_read(cpu: cpunum, reg_res: reference_reg, val: &reference); |
1355 | cpc_read(cpu: cpunum, reg_res: ref_perf_reg, val: &ref_perf); |
1356 | |
1357 | /* |
1358 | * Per spec, if ctr_wrap_time optional register is unsupported, then the |
1359 | * performance counters are assumed to never wrap during the lifetime of |
1360 | * platform |
1361 | */ |
1362 | ctr_wrap_time = (u64)(~((u64)0)); |
1363 | if (CPC_SUPPORTED(ctr_wrap_reg)) |
1364 | cpc_read(cpu: cpunum, reg_res: ctr_wrap_reg, val: &ctr_wrap_time); |
1365 | |
1366 | if (!delivered || !reference || !ref_perf) { |
1367 | ret = -EFAULT; |
1368 | goto out_err; |
1369 | } |
1370 | |
1371 | perf_fb_ctrs->delivered = delivered; |
1372 | perf_fb_ctrs->reference = reference; |
1373 | perf_fb_ctrs->reference_perf = ref_perf; |
1374 | perf_fb_ctrs->wraparound_time = ctr_wrap_time; |
1375 | out_err: |
1376 | if (regs_in_pcc) |
1377 | up_write(sem: &pcc_ss_data->pcc_lock); |
1378 | return ret; |
1379 | } |
1380 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); |
1381 | |
1382 | /* |
1383 | * Set Energy Performance Preference Register value through |
1384 | * Performance Controls Interface |
1385 | */ |
1386 | int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) |
1387 | { |
1388 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
1389 | struct cpc_register_resource *epp_set_reg; |
1390 | struct cpc_register_resource *auto_sel_reg; |
1391 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); |
1392 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1393 | int ret; |
1394 | |
1395 | if (!cpc_desc) { |
1396 | pr_debug("No CPC descriptor for CPU:%d\n" , cpu); |
1397 | return -ENODEV; |
1398 | } |
1399 | |
1400 | auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; |
1401 | epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF]; |
1402 | |
1403 | if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) { |
1404 | if (pcc_ss_id < 0) { |
1405 | pr_debug("Invalid pcc_ss_id for CPU:%d\n" , cpu); |
1406 | return -ENODEV; |
1407 | } |
1408 | |
1409 | if (CPC_SUPPORTED(auto_sel_reg)) { |
1410 | ret = cpc_write(cpu, reg_res: auto_sel_reg, val: enable); |
1411 | if (ret) |
1412 | return ret; |
1413 | } |
1414 | |
1415 | if (CPC_SUPPORTED(epp_set_reg)) { |
1416 | ret = cpc_write(cpu, reg_res: epp_set_reg, val: perf_ctrls->energy_perf); |
1417 | if (ret) |
1418 | return ret; |
1419 | } |
1420 | |
1421 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1422 | |
1423 | down_write(sem: &pcc_ss_data->pcc_lock); |
1424 | /* after writing CPC, transfer the ownership of PCC to platform */ |
1425 | ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); |
1426 | up_write(sem: &pcc_ss_data->pcc_lock); |
1427 | } else { |
1428 | ret = -ENOTSUPP; |
1429 | pr_debug("_CPC in PCC is not supported\n" ); |
1430 | } |
1431 | |
1432 | return ret; |
1433 | } |
1434 | EXPORT_SYMBOL_GPL(cppc_set_epp_perf); |
1435 | |
1436 | /** |
1437 | * cppc_get_auto_sel_caps - Read autonomous selection register. |
1438 | * @cpunum : CPU from which to read register. |
1439 | * @perf_caps : struct where autonomous selection register value is updated. |
1440 | */ |
1441 | int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps) |
1442 | { |
1443 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
1444 | struct cpc_register_resource *auto_sel_reg; |
1445 | u64 auto_sel; |
1446 | |
1447 | if (!cpc_desc) { |
1448 | pr_debug("No CPC descriptor for CPU:%d\n" , cpunum); |
1449 | return -ENODEV; |
1450 | } |
1451 | |
1452 | auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; |
1453 | |
1454 | if (!CPC_SUPPORTED(auto_sel_reg)) |
1455 | pr_warn_once("Autonomous mode is not unsupported!\n" ); |
1456 | |
1457 | if (CPC_IN_PCC(auto_sel_reg)) { |
1458 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
1459 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1460 | int ret = 0; |
1461 | |
1462 | if (pcc_ss_id < 0) |
1463 | return -ENODEV; |
1464 | |
1465 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1466 | |
1467 | down_write(sem: &pcc_ss_data->pcc_lock); |
1468 | |
1469 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) { |
1470 | cpc_read(cpu: cpunum, reg_res: auto_sel_reg, val: &auto_sel); |
1471 | perf_caps->auto_sel = (bool)auto_sel; |
1472 | } else { |
1473 | ret = -EIO; |
1474 | } |
1475 | |
1476 | up_write(sem: &pcc_ss_data->pcc_lock); |
1477 | |
1478 | return ret; |
1479 | } |
1480 | |
1481 | return 0; |
1482 | } |
1483 | EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps); |
1484 | |
1485 | /** |
1486 | * cppc_set_auto_sel - Write autonomous selection register. |
1487 | * @cpu : CPU to which to write register. |
1488 | * @enable : the desired value of autonomous selection resiter to be updated. |
1489 | */ |
1490 | int cppc_set_auto_sel(int cpu, bool enable) |
1491 | { |
1492 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
1493 | struct cpc_register_resource *auto_sel_reg; |
1494 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); |
1495 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1496 | int ret = -EINVAL; |
1497 | |
1498 | if (!cpc_desc) { |
1499 | pr_debug("No CPC descriptor for CPU:%d\n" , cpu); |
1500 | return -ENODEV; |
1501 | } |
1502 | |
1503 | auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; |
1504 | |
1505 | if (CPC_IN_PCC(auto_sel_reg)) { |
1506 | if (pcc_ss_id < 0) { |
1507 | pr_debug("Invalid pcc_ss_id\n" ); |
1508 | return -ENODEV; |
1509 | } |
1510 | |
1511 | if (CPC_SUPPORTED(auto_sel_reg)) { |
1512 | ret = cpc_write(cpu, reg_res: auto_sel_reg, val: enable); |
1513 | if (ret) |
1514 | return ret; |
1515 | } |
1516 | |
1517 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1518 | |
1519 | down_write(sem: &pcc_ss_data->pcc_lock); |
1520 | /* after writing CPC, transfer the ownership of PCC to platform */ |
1521 | ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); |
1522 | up_write(sem: &pcc_ss_data->pcc_lock); |
1523 | } else { |
1524 | ret = -ENOTSUPP; |
1525 | pr_debug("_CPC in PCC is not supported\n" ); |
1526 | } |
1527 | |
1528 | return ret; |
1529 | } |
1530 | EXPORT_SYMBOL_GPL(cppc_set_auto_sel); |
1531 | |
1532 | /** |
1533 | * cppc_set_enable - Set to enable CPPC on the processor by writing the |
1534 | * Continuous Performance Control package EnableRegister field. |
1535 | * @cpu: CPU for which to enable CPPC register. |
1536 | * @enable: 0 - disable, 1 - enable CPPC feature on the processor. |
1537 | * |
1538 | * Return: 0 for success, -ERRNO or -EIO otherwise. |
1539 | */ |
1540 | int cppc_set_enable(int cpu, bool enable) |
1541 | { |
1542 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
1543 | struct cpc_register_resource *enable_reg; |
1544 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); |
1545 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1546 | int ret = -EINVAL; |
1547 | |
1548 | if (!cpc_desc) { |
1549 | pr_debug("No CPC descriptor for CPU:%d\n" , cpu); |
1550 | return -EINVAL; |
1551 | } |
1552 | |
1553 | enable_reg = &cpc_desc->cpc_regs[ENABLE]; |
1554 | |
1555 | if (CPC_IN_PCC(enable_reg)) { |
1556 | |
1557 | if (pcc_ss_id < 0) |
1558 | return -EIO; |
1559 | |
1560 | ret = cpc_write(cpu, reg_res: enable_reg, val: enable); |
1561 | if (ret) |
1562 | return ret; |
1563 | |
1564 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1565 | |
1566 | down_write(sem: &pcc_ss_data->pcc_lock); |
1567 | /* after writing CPC, transfer the ownership of PCC to platfrom */ |
1568 | ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); |
1569 | up_write(sem: &pcc_ss_data->pcc_lock); |
1570 | return ret; |
1571 | } |
1572 | |
1573 | return cpc_write(cpu, reg_res: enable_reg, val: enable); |
1574 | } |
1575 | EXPORT_SYMBOL_GPL(cppc_set_enable); |
1576 | |
1577 | /** |
1578 | * cppc_set_perf - Set a CPU's performance controls. |
1579 | * @cpu: CPU for which to set performance controls. |
1580 | * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h |
1581 | * |
1582 | * Return: 0 for success, -ERRNO otherwise. |
1583 | */ |
1584 | int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) |
1585 | { |
1586 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); |
1587 | struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg; |
1588 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); |
1589 | struct cppc_pcc_data *pcc_ss_data = NULL; |
1590 | int ret = 0; |
1591 | |
1592 | if (!cpc_desc) { |
1593 | pr_debug("No CPC descriptor for CPU:%d\n" , cpu); |
1594 | return -ENODEV; |
1595 | } |
1596 | |
1597 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; |
1598 | min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF]; |
1599 | max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF]; |
1600 | |
1601 | /* |
1602 | * This is Phase-I where we want to write to CPC registers |
1603 | * -> We want all CPUs to be able to execute this phase in parallel |
1604 | * |
1605 | * Since read_lock can be acquired by multiple CPUs simultaneously we |
1606 | * achieve that goal here |
1607 | */ |
1608 | if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { |
1609 | if (pcc_ss_id < 0) { |
1610 | pr_debug("Invalid pcc_ss_id\n" ); |
1611 | return -ENODEV; |
1612 | } |
1613 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1614 | down_read(sem: &pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ |
1615 | if (pcc_ss_data->platform_owns_pcc) { |
1616 | ret = check_pcc_chan(pcc_ss_id, chk_err_bit: false); |
1617 | if (ret) { |
1618 | up_read(sem: &pcc_ss_data->pcc_lock); |
1619 | return ret; |
1620 | } |
1621 | } |
1622 | /* |
1623 | * Update the pending_write to make sure a PCC CMD_READ will not |
1624 | * arrive and steal the channel during the switch to write lock |
1625 | */ |
1626 | pcc_ss_data->pending_pcc_write_cmd = true; |
1627 | cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt; |
1628 | cpc_desc->write_cmd_status = 0; |
1629 | } |
1630 | |
1631 | cpc_write(cpu, reg_res: desired_reg, val: perf_ctrls->desired_perf); |
1632 | |
1633 | /* |
1634 | * Only write if min_perf and max_perf not zero. Some drivers pass zero |
1635 | * value to min and max perf, but they don't mean to set the zero value, |
1636 | * they just don't want to write to those registers. |
1637 | */ |
1638 | if (perf_ctrls->min_perf) |
1639 | cpc_write(cpu, reg_res: min_perf_reg, val: perf_ctrls->min_perf); |
1640 | if (perf_ctrls->max_perf) |
1641 | cpc_write(cpu, reg_res: max_perf_reg, val: perf_ctrls->max_perf); |
1642 | |
1643 | if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) |
1644 | up_read(sem: &pcc_ss_data->pcc_lock); /* END Phase-I */ |
1645 | /* |
1646 | * This is Phase-II where we transfer the ownership of PCC to Platform |
1647 | * |
1648 | * Short Summary: Basically if we think of a group of cppc_set_perf |
1649 | * requests that happened in short overlapping interval. The last CPU to |
1650 | * come out of Phase-I will enter Phase-II and ring the doorbell. |
1651 | * |
1652 | * We have the following requirements for Phase-II: |
1653 | * 1. We want to execute Phase-II only when there are no CPUs |
1654 | * currently executing in Phase-I |
1655 | * 2. Once we start Phase-II we want to avoid all other CPUs from |
1656 | * entering Phase-I. |
1657 | * 3. We want only one CPU among all those who went through Phase-I |
1658 | * to run phase-II |
1659 | * |
1660 | * If write_trylock fails to get the lock and doesn't transfer the |
1661 | * PCC ownership to the platform, then one of the following will be TRUE |
1662 | * 1. There is at-least one CPU in Phase-I which will later execute |
1663 | * write_trylock, so the CPUs in Phase-I will be responsible for |
1664 | * executing the Phase-II. |
1665 | * 2. Some other CPU has beaten this CPU to successfully execute the |
1666 | * write_trylock and has already acquired the write_lock. We know for a |
1667 | * fact it (other CPU acquiring the write_lock) couldn't have happened |
1668 | * before this CPU's Phase-I as we held the read_lock. |
1669 | * 3. Some other CPU executing pcc CMD_READ has stolen the |
1670 | * down_write, in which case, send_pcc_cmd will check for pending |
1671 | * CMD_WRITE commands by checking the pending_pcc_write_cmd. |
1672 | * So this CPU can be certain that its request will be delivered |
1673 | * So in all cases, this CPU knows that its request will be delivered |
1674 | * by another CPU and can return |
1675 | * |
1676 | * After getting the down_write we still need to check for |
1677 | * pending_pcc_write_cmd to take care of the following scenario |
1678 | * The thread running this code could be scheduled out between |
1679 | * Phase-I and Phase-II. Before it is scheduled back on, another CPU |
1680 | * could have delivered the request to Platform by triggering the |
1681 | * doorbell and transferred the ownership of PCC to platform. So this |
1682 | * avoids triggering an unnecessary doorbell and more importantly before |
1683 | * triggering the doorbell it makes sure that the PCC channel ownership |
1684 | * is still with OSPM. |
1685 | * pending_pcc_write_cmd can also be cleared by a different CPU, if |
1686 | * there was a pcc CMD_READ waiting on down_write and it steals the lock |
1687 | * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this |
1688 | * case during a CMD_READ and if there are pending writes it delivers |
1689 | * the write command before servicing the read command |
1690 | */ |
1691 | if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { |
1692 | if (down_write_trylock(sem: &pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ |
1693 | /* Update only if there are pending write commands */ |
1694 | if (pcc_ss_data->pending_pcc_write_cmd) |
1695 | send_pcc_cmd(pcc_ss_id, CMD_WRITE); |
1696 | up_write(sem: &pcc_ss_data->pcc_lock); /* END Phase-II */ |
1697 | } else |
1698 | /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ |
1699 | wait_event(pcc_ss_data->pcc_write_wait_q, |
1700 | cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt); |
1701 | |
1702 | /* send_pcc_cmd updates the status in case of failure */ |
1703 | ret = cpc_desc->write_cmd_status; |
1704 | } |
1705 | return ret; |
1706 | } |
1707 | EXPORT_SYMBOL_GPL(cppc_set_perf); |
1708 | |
1709 | /** |
1710 | * cppc_get_transition_latency - returns frequency transition latency in ns |
1711 | * @cpu_num: CPU number for per_cpu(). |
1712 | * |
1713 | * ACPI CPPC does not explicitly specify how a platform can specify the |
1714 | * transition latency for performance change requests. The closest we have |
1715 | * is the timing information from the PCCT tables which provides the info |
1716 | * on the number and frequency of PCC commands the platform can handle. |
1717 | * |
1718 | * If desired_reg is in the SystemMemory or SystemIo ACPI address space, |
1719 | * then assume there is no latency. |
1720 | */ |
1721 | unsigned int cppc_get_transition_latency(int cpu_num) |
1722 | { |
1723 | /* |
1724 | * Expected transition latency is based on the PCCT timing values |
1725 | * Below are definition from ACPI spec: |
1726 | * pcc_nominal- Expected latency to process a command, in microseconds |
1727 | * pcc_mpar - The maximum number of periodic requests that the subspace |
1728 | * channel can support, reported in commands per minute. 0 |
1729 | * indicates no limitation. |
1730 | * pcc_mrtt - The minimum amount of time that OSPM must wait after the |
1731 | * completion of a command before issuing the next command, |
1732 | * in microseconds. |
1733 | */ |
1734 | unsigned int latency_ns = 0; |
1735 | struct cpc_desc *cpc_desc; |
1736 | struct cpc_register_resource *desired_reg; |
1737 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); |
1738 | struct cppc_pcc_data *pcc_ss_data; |
1739 | |
1740 | cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); |
1741 | if (!cpc_desc) |
1742 | return CPUFREQ_ETERNAL; |
1743 | |
1744 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; |
1745 | if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg)) |
1746 | return 0; |
1747 | else if (!CPC_IN_PCC(desired_reg)) |
1748 | return CPUFREQ_ETERNAL; |
1749 | |
1750 | if (pcc_ss_id < 0) |
1751 | return CPUFREQ_ETERNAL; |
1752 | |
1753 | pcc_ss_data = pcc_data[pcc_ss_id]; |
1754 | if (pcc_ss_data->pcc_mpar) |
1755 | latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); |
1756 | |
1757 | latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); |
1758 | latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); |
1759 | |
1760 | return latency_ns; |
1761 | } |
1762 | EXPORT_SYMBOL_GPL(cppc_get_transition_latency); |
1763 | |