1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/atomic.h> |
7 | #include <linux/bug.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/jiffies.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/list.h> |
12 | #include <linux/lockdep.h> |
13 | #include <linux/module.h> |
14 | #include <linux/of.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/spinlock.h> |
18 | #include <linux/types.h> |
19 | #include <linux/wait.h> |
20 | |
21 | #include <soc/qcom/rpmh.h> |
22 | |
23 | #include "rpmh-internal.h" |
24 | |
25 | #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000) |
26 | |
27 | #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \ |
28 | struct rpmh_request name = { \ |
29 | .msg = { \ |
30 | .state = s, \ |
31 | .cmds = name.cmd, \ |
32 | .num_cmds = 0, \ |
33 | .wait_for_compl = true, \ |
34 | }, \ |
35 | .cmd = { { 0 } }, \ |
36 | .completion = q, \ |
37 | .dev = device, \ |
38 | .needs_free = false, \ |
39 | } |
40 | |
41 | #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client) |
42 | |
43 | /** |
44 | * struct cache_req: the request object for caching |
45 | * |
46 | * @addr: the address of the resource |
47 | * @sleep_val: the sleep vote |
48 | * @wake_val: the wake vote |
49 | * @list: linked list obj |
50 | */ |
51 | struct cache_req { |
52 | u32 addr; |
53 | u32 sleep_val; |
54 | u32 wake_val; |
55 | struct list_head list; |
56 | }; |
57 | |
58 | /** |
59 | * struct batch_cache_req - An entry in our batch catch |
60 | * |
61 | * @list: linked list obj |
62 | * @count: number of messages |
63 | * @rpm_msgs: the messages |
64 | */ |
65 | |
66 | struct batch_cache_req { |
67 | struct list_head list; |
68 | int count; |
69 | struct rpmh_request rpm_msgs[]; |
70 | }; |
71 | |
72 | static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev) |
73 | { |
74 | struct rsc_drv *drv = dev_get_drvdata(dev: dev->parent); |
75 | |
76 | return &drv->client; |
77 | } |
78 | |
79 | void rpmh_tx_done(const struct tcs_request *msg) |
80 | { |
81 | struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request, |
82 | msg); |
83 | struct completion *compl = rpm_msg->completion; |
84 | bool free = rpm_msg->needs_free; |
85 | |
86 | if (!compl) |
87 | goto exit; |
88 | |
89 | /* Signal the blocking thread we are done */ |
90 | complete(compl); |
91 | |
92 | exit: |
93 | if (free) |
94 | kfree(objp: rpm_msg); |
95 | } |
96 | |
97 | static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) |
98 | { |
99 | struct cache_req *p, *req = NULL; |
100 | |
101 | list_for_each_entry(p, &ctrlr->cache, list) { |
102 | if (p->addr == addr) { |
103 | req = p; |
104 | break; |
105 | } |
106 | } |
107 | |
108 | return req; |
109 | } |
110 | |
111 | static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, |
112 | enum rpmh_state state, |
113 | struct tcs_cmd *cmd) |
114 | { |
115 | struct cache_req *req; |
116 | unsigned long flags; |
117 | u32 old_sleep_val, old_wake_val; |
118 | |
119 | spin_lock_irqsave(&ctrlr->cache_lock, flags); |
120 | req = __find_req(ctrlr, addr: cmd->addr); |
121 | if (req) |
122 | goto existing; |
123 | |
124 | req = kzalloc(size: sizeof(*req), GFP_ATOMIC); |
125 | if (!req) { |
126 | req = ERR_PTR(error: -ENOMEM); |
127 | goto unlock; |
128 | } |
129 | |
130 | req->addr = cmd->addr; |
131 | req->sleep_val = req->wake_val = UINT_MAX; |
132 | list_add_tail(new: &req->list, head: &ctrlr->cache); |
133 | |
134 | existing: |
135 | old_sleep_val = req->sleep_val; |
136 | old_wake_val = req->wake_val; |
137 | |
138 | switch (state) { |
139 | case RPMH_ACTIVE_ONLY_STATE: |
140 | case RPMH_WAKE_ONLY_STATE: |
141 | req->wake_val = cmd->data; |
142 | break; |
143 | case RPMH_SLEEP_STATE: |
144 | req->sleep_val = cmd->data; |
145 | break; |
146 | } |
147 | |
148 | ctrlr->dirty |= (req->sleep_val != old_sleep_val || |
149 | req->wake_val != old_wake_val) && |
150 | req->sleep_val != UINT_MAX && |
151 | req->wake_val != UINT_MAX; |
152 | |
153 | unlock: |
154 | spin_unlock_irqrestore(lock: &ctrlr->cache_lock, flags); |
155 | |
156 | return req; |
157 | } |
158 | |
159 | /** |
160 | * __rpmh_write: Cache and send the RPMH request |
161 | * |
162 | * @dev: The device making the request |
163 | * @state: Active/Sleep request type |
164 | * @rpm_msg: The data that needs to be sent (cmds). |
165 | * |
166 | * Cache the RPMH request and send if the state is ACTIVE_ONLY. |
167 | * SLEEP/WAKE_ONLY requests are not sent to the controller at |
168 | * this time. Use rpmh_flush() to send them to the controller. |
169 | */ |
170 | static int __rpmh_write(const struct device *dev, enum rpmh_state state, |
171 | struct rpmh_request *rpm_msg) |
172 | { |
173 | struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); |
174 | int ret = -EINVAL; |
175 | struct cache_req *req; |
176 | int i; |
177 | |
178 | /* Cache the request in our store and link the payload */ |
179 | for (i = 0; i < rpm_msg->msg.num_cmds; i++) { |
180 | req = cache_rpm_request(ctrlr, state, cmd: &rpm_msg->msg.cmds[i]); |
181 | if (IS_ERR(ptr: req)) |
182 | return PTR_ERR(ptr: req); |
183 | } |
184 | |
185 | if (state == RPMH_ACTIVE_ONLY_STATE) { |
186 | WARN_ON(irqs_disabled()); |
187 | ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), msg: &rpm_msg->msg); |
188 | } else { |
189 | /* Clean up our call by spoofing tx_done */ |
190 | ret = 0; |
191 | rpmh_tx_done(msg: &rpm_msg->msg); |
192 | } |
193 | |
194 | return ret; |
195 | } |
196 | |
197 | static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state, |
198 | const struct tcs_cmd *cmd, u32 n) |
199 | { |
200 | if (!cmd || !n || n > MAX_RPMH_PAYLOAD) |
201 | return -EINVAL; |
202 | |
203 | memcpy(req->cmd, cmd, n * sizeof(*cmd)); |
204 | |
205 | req->msg.state = state; |
206 | req->msg.cmds = req->cmd; |
207 | req->msg.num_cmds = n; |
208 | |
209 | return 0; |
210 | } |
211 | |
212 | /** |
213 | * rpmh_write_async: Write a set of RPMH commands |
214 | * |
215 | * @dev: The device making the request |
216 | * @state: Active/sleep set |
217 | * @cmd: The payload data |
218 | * @n: The number of elements in payload |
219 | * |
220 | * Write a set of RPMH commands, the order of commands is maintained |
221 | * and will be sent as a single shot. |
222 | */ |
223 | int rpmh_write_async(const struct device *dev, enum rpmh_state state, |
224 | const struct tcs_cmd *cmd, u32 n) |
225 | { |
226 | struct rpmh_request *rpm_msg; |
227 | int ret; |
228 | |
229 | rpm_msg = kzalloc(size: sizeof(*rpm_msg), GFP_ATOMIC); |
230 | if (!rpm_msg) |
231 | return -ENOMEM; |
232 | rpm_msg->needs_free = true; |
233 | |
234 | ret = __fill_rpmh_msg(req: rpm_msg, state, cmd, n); |
235 | if (ret) { |
236 | kfree(objp: rpm_msg); |
237 | return ret; |
238 | } |
239 | |
240 | return __rpmh_write(dev, state, rpm_msg); |
241 | } |
242 | EXPORT_SYMBOL_GPL(rpmh_write_async); |
243 | |
244 | /** |
245 | * rpmh_write: Write a set of RPMH commands and block until response |
246 | * |
247 | * @dev: The device making the request |
248 | * @state: Active/sleep set |
249 | * @cmd: The payload data |
250 | * @n: The number of elements in @cmd |
251 | * |
252 | * May sleep. Do not call from atomic contexts. |
253 | */ |
254 | int rpmh_write(const struct device *dev, enum rpmh_state state, |
255 | const struct tcs_cmd *cmd, u32 n) |
256 | { |
257 | DECLARE_COMPLETION_ONSTACK(compl); |
258 | DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg); |
259 | int ret; |
260 | |
261 | ret = __fill_rpmh_msg(req: &rpm_msg, state, cmd, n); |
262 | if (ret) |
263 | return ret; |
264 | |
265 | ret = __rpmh_write(dev, state, rpm_msg: &rpm_msg); |
266 | if (ret) |
267 | return ret; |
268 | |
269 | ret = wait_for_completion_timeout(x: &compl, RPMH_TIMEOUT_MS); |
270 | WARN_ON(!ret); |
271 | return (ret > 0) ? 0 : -ETIMEDOUT; |
272 | } |
273 | EXPORT_SYMBOL_GPL(rpmh_write); |
274 | |
275 | static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) |
276 | { |
277 | unsigned long flags; |
278 | |
279 | spin_lock_irqsave(&ctrlr->cache_lock, flags); |
280 | list_add_tail(new: &req->list, head: &ctrlr->batch_cache); |
281 | ctrlr->dirty = true; |
282 | spin_unlock_irqrestore(lock: &ctrlr->cache_lock, flags); |
283 | } |
284 | |
285 | static int flush_batch(struct rpmh_ctrlr *ctrlr) |
286 | { |
287 | struct batch_cache_req *req; |
288 | const struct rpmh_request *rpm_msg; |
289 | int ret = 0; |
290 | int i; |
291 | |
292 | /* Send Sleep/Wake requests to the controller, expect no response */ |
293 | list_for_each_entry(req, &ctrlr->batch_cache, list) { |
294 | for (i = 0; i < req->count; i++) { |
295 | rpm_msg = req->rpm_msgs + i; |
296 | ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), |
297 | msg: &rpm_msg->msg); |
298 | if (ret) |
299 | break; |
300 | } |
301 | } |
302 | |
303 | return ret; |
304 | } |
305 | |
306 | /** |
307 | * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the |
308 | * batch to finish. |
309 | * |
310 | * @dev: the device making the request |
311 | * @state: Active/sleep set |
312 | * @cmd: The payload data |
313 | * @n: The array of count of elements in each batch, 0 terminated. |
314 | * |
315 | * Write a request to the RSC controller without caching. If the request |
316 | * state is ACTIVE, then the requests are treated as completion request |
317 | * and sent to the controller immediately. The function waits until all the |
318 | * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the |
319 | * request is sent as fire-n-forget and no ack is expected. |
320 | * |
321 | * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests. |
322 | */ |
323 | int rpmh_write_batch(const struct device *dev, enum rpmh_state state, |
324 | const struct tcs_cmd *cmd, u32 *n) |
325 | { |
326 | struct batch_cache_req *req; |
327 | struct rpmh_request *rpm_msgs; |
328 | struct completion *compls; |
329 | struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); |
330 | unsigned long time_left; |
331 | int count = 0; |
332 | int ret, i; |
333 | void *ptr; |
334 | |
335 | if (!cmd || !n) |
336 | return -EINVAL; |
337 | |
338 | while (n[count] > 0) |
339 | count++; |
340 | if (!count) |
341 | return -EINVAL; |
342 | |
343 | ptr = kzalloc(size: sizeof(*req) + |
344 | count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)), |
345 | GFP_ATOMIC); |
346 | if (!ptr) |
347 | return -ENOMEM; |
348 | |
349 | req = ptr; |
350 | compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs); |
351 | |
352 | req->count = count; |
353 | rpm_msgs = req->rpm_msgs; |
354 | |
355 | for (i = 0; i < count; i++) { |
356 | __fill_rpmh_msg(req: rpm_msgs + i, state, cmd, n: n[i]); |
357 | cmd += n[i]; |
358 | } |
359 | |
360 | if (state != RPMH_ACTIVE_ONLY_STATE) { |
361 | cache_batch(ctrlr, req); |
362 | return 0; |
363 | } |
364 | |
365 | for (i = 0; i < count; i++) { |
366 | struct completion *compl = &compls[i]; |
367 | |
368 | init_completion(x: compl); |
369 | rpm_msgs[i].completion = compl; |
370 | ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), msg: &rpm_msgs[i].msg); |
371 | if (ret) { |
372 | pr_err("Error(%d) sending RPMH message addr=%#x\n" , |
373 | ret, rpm_msgs[i].msg.cmds[0].addr); |
374 | break; |
375 | } |
376 | } |
377 | |
378 | time_left = RPMH_TIMEOUT_MS; |
379 | while (i--) { |
380 | time_left = wait_for_completion_timeout(x: &compls[i], timeout: time_left); |
381 | if (!time_left) { |
382 | /* |
383 | * Better hope they never finish because they'll signal |
384 | * the completion that we're going to free once |
385 | * we've returned from this function. |
386 | */ |
387 | WARN_ON(1); |
388 | ret = -ETIMEDOUT; |
389 | goto exit; |
390 | } |
391 | } |
392 | |
393 | exit: |
394 | kfree(objp: ptr); |
395 | |
396 | return ret; |
397 | } |
398 | EXPORT_SYMBOL_GPL(rpmh_write_batch); |
399 | |
400 | static int is_req_valid(struct cache_req *req) |
401 | { |
402 | return (req->sleep_val != UINT_MAX && |
403 | req->wake_val != UINT_MAX && |
404 | req->sleep_val != req->wake_val); |
405 | } |
406 | |
407 | static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, |
408 | u32 addr, u32 data) |
409 | { |
410 | DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg); |
411 | |
412 | /* Wake sets are always complete and sleep sets are not */ |
413 | rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE); |
414 | rpm_msg.cmd[0].addr = addr; |
415 | rpm_msg.cmd[0].data = data; |
416 | rpm_msg.msg.num_cmds = 1; |
417 | |
418 | return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), msg: &rpm_msg.msg); |
419 | } |
420 | |
421 | /** |
422 | * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes |
423 | * |
424 | * @ctrlr: Controller making request to flush cached data |
425 | * |
426 | * Return: |
427 | * * 0 - Success |
428 | * * Error code - Otherwise |
429 | */ |
430 | int rpmh_flush(struct rpmh_ctrlr *ctrlr) |
431 | { |
432 | struct cache_req *p; |
433 | int ret = 0; |
434 | |
435 | lockdep_assert_irqs_disabled(); |
436 | |
437 | /* |
438 | * Currently rpmh_flush() is only called when we think we're running |
439 | * on the last processor. If the lock is busy it means another |
440 | * processor is up and it's better to abort than spin. |
441 | */ |
442 | if (!spin_trylock(lock: &ctrlr->cache_lock)) |
443 | return -EBUSY; |
444 | |
445 | if (!ctrlr->dirty) { |
446 | pr_debug("Skipping flush, TCS has latest data.\n" ); |
447 | goto write_next_wakeup; |
448 | } |
449 | |
450 | /* Invalidate the TCSes first to avoid stale data */ |
451 | rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); |
452 | |
453 | /* First flush the cached batch requests */ |
454 | ret = flush_batch(ctrlr); |
455 | if (ret) |
456 | goto exit; |
457 | |
458 | list_for_each_entry(p, &ctrlr->cache, list) { |
459 | if (!is_req_valid(req: p)) { |
460 | pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x" , |
461 | __func__, p->addr, p->sleep_val, p->wake_val); |
462 | continue; |
463 | } |
464 | ret = send_single(ctrlr, state: RPMH_SLEEP_STATE, addr: p->addr, |
465 | data: p->sleep_val); |
466 | if (ret) |
467 | goto exit; |
468 | ret = send_single(ctrlr, state: RPMH_WAKE_ONLY_STATE, addr: p->addr, |
469 | data: p->wake_val); |
470 | if (ret) |
471 | goto exit; |
472 | } |
473 | |
474 | ctrlr->dirty = false; |
475 | |
476 | write_next_wakeup: |
477 | rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr)); |
478 | exit: |
479 | spin_unlock(lock: &ctrlr->cache_lock); |
480 | return ret; |
481 | } |
482 | |
483 | /** |
484 | * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache |
485 | * |
486 | * @dev: The device making the request |
487 | * |
488 | * Invalidate the sleep and wake values in batch_cache. |
489 | */ |
490 | void rpmh_invalidate(const struct device *dev) |
491 | { |
492 | struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); |
493 | struct batch_cache_req *req, *tmp; |
494 | unsigned long flags; |
495 | |
496 | spin_lock_irqsave(&ctrlr->cache_lock, flags); |
497 | list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) |
498 | kfree(objp: req); |
499 | INIT_LIST_HEAD(list: &ctrlr->batch_cache); |
500 | ctrlr->dirty = true; |
501 | spin_unlock_irqrestore(lock: &ctrlr->cache_lock, flags); |
502 | } |
503 | EXPORT_SYMBOL_GPL(rpmh_invalidate); |
504 | |