1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Xilinx Event Management Driver |
4 | * |
5 | * Copyright (C) 2021 Xilinx, Inc. |
6 | * |
7 | * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com> |
8 | */ |
9 | |
10 | #include <linux/cpuhotplug.h> |
11 | #include <linux/firmware/xlnx-event-manager.h> |
12 | #include <linux/firmware/xlnx-zynqmp.h> |
13 | #include <linux/hashtable.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/irq.h> |
16 | #include <linux/irqdomain.h> |
17 | #include <linux/module.h> |
18 | #include <linux/of_irq.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/slab.h> |
21 | |
22 | static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1); |
23 | |
24 | static int virq_sgi; |
25 | static int event_manager_availability = -EACCES; |
26 | |
27 | /* SGI number used for Event management driver */ |
28 | #define XLNX_EVENT_SGI_NUM (15) |
29 | |
30 | /* Max number of driver can register for same event */ |
31 | #define MAX_DRIVER_PER_EVENT (10U) |
32 | |
33 | /* Max HashMap Order for PM API feature check (1<<7 = 128) */ |
34 | #define REGISTERED_DRIVER_MAX_ORDER (7) |
35 | |
36 | #define MAX_BITS (32U) /* Number of bits available for error mask */ |
37 | |
38 | #define FIRMWARE_VERSION_MASK (0xFFFFU) |
39 | #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U) |
40 | |
41 | static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER); |
42 | static int sgi_num = XLNX_EVENT_SGI_NUM; |
43 | |
44 | static bool is_need_to_unregister; |
45 | |
46 | /** |
47 | * struct agent_cb - Registered callback function and private data. |
48 | * @agent_data: Data passed back to handler function. |
49 | * @eve_cb: Function pointer to store the callback function. |
50 | * @list: member to create list. |
51 | */ |
52 | struct agent_cb { |
53 | void *agent_data; |
54 | event_cb_func_t eve_cb; |
55 | struct list_head list; |
56 | }; |
57 | |
58 | /** |
59 | * struct registered_event_data - Registered Event Data. |
60 | * @key: key is the combine id(Node-Id | Event-Id) of type u64 |
61 | * where upper u32 for Node-Id and lower u32 for Event-Id, |
62 | * And this used as key to index into hashmap. |
63 | * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc. |
64 | * @wake: If this flag set, firmware will wake up processor if is |
65 | * in sleep or power down state. |
66 | * @cb_list_head: Head of call back data list which contain the information |
67 | * about registered handler and private data. |
68 | * @hentry: hlist_node that hooks this entry into hashtable. |
69 | */ |
70 | struct registered_event_data { |
71 | u64 key; |
72 | enum pm_api_cb_id cb_type; |
73 | bool wake; |
74 | struct list_head cb_list_head; |
75 | struct hlist_node hentry; |
76 | }; |
77 | |
78 | static bool xlnx_is_error_event(const u32 node_id) |
79 | { |
80 | u32 pm_family_code, pm_sub_family_code; |
81 | |
82 | zynqmp_pm_get_family_info(family: &pm_family_code, subfamily: &pm_sub_family_code); |
83 | |
84 | if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) { |
85 | if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 || |
86 | node_id == VERSAL_EVENT_ERROR_PMC_ERR2 || |
87 | node_id == VERSAL_EVENT_ERROR_PSM_ERR1 || |
88 | node_id == VERSAL_EVENT_ERROR_PSM_ERR2) |
89 | return true; |
90 | } else { |
91 | if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 || |
92 | node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 || |
93 | node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 || |
94 | node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 || |
95 | node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 || |
96 | node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 || |
97 | node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4) |
98 | return true; |
99 | } |
100 | |
101 | return false; |
102 | } |
103 | |
104 | static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake, |
105 | event_cb_func_t cb_fun, void *data) |
106 | { |
107 | u64 key = 0; |
108 | bool present_in_hash = false; |
109 | struct registered_event_data *eve_data; |
110 | struct agent_cb *cb_data; |
111 | struct agent_cb *cb_pos; |
112 | struct agent_cb *cb_next; |
113 | |
114 | key = ((u64)node_id << 32U) | (u64)event; |
115 | /* Check for existing entry in hash table for given key id */ |
116 | hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { |
117 | if (eve_data->key == key) { |
118 | present_in_hash = true; |
119 | break; |
120 | } |
121 | } |
122 | |
123 | if (!present_in_hash) { |
124 | /* Add new entry if not present in HASH table */ |
125 | eve_data = kmalloc(size: sizeof(*eve_data), GFP_KERNEL); |
126 | if (!eve_data) |
127 | return -ENOMEM; |
128 | eve_data->key = key; |
129 | eve_data->cb_type = PM_NOTIFY_CB; |
130 | eve_data->wake = wake; |
131 | INIT_LIST_HEAD(list: &eve_data->cb_list_head); |
132 | |
133 | cb_data = kmalloc(size: sizeof(*cb_data), GFP_KERNEL); |
134 | if (!cb_data) { |
135 | kfree(objp: eve_data); |
136 | return -ENOMEM; |
137 | } |
138 | cb_data->eve_cb = cb_fun; |
139 | cb_data->agent_data = data; |
140 | |
141 | /* Add into callback list */ |
142 | list_add(new: &cb_data->list, head: &eve_data->cb_list_head); |
143 | |
144 | /* Add into HASH table */ |
145 | hash_add(reg_driver_map, &eve_data->hentry, key); |
146 | } else { |
147 | /* Search for callback function and private data in list */ |
148 | list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { |
149 | if (cb_pos->eve_cb == cb_fun && |
150 | cb_pos->agent_data == data) { |
151 | return 0; |
152 | } |
153 | } |
154 | |
155 | /* Add multiple handler and private data in list */ |
156 | cb_data = kmalloc(size: sizeof(*cb_data), GFP_KERNEL); |
157 | if (!cb_data) |
158 | return -ENOMEM; |
159 | cb_data->eve_cb = cb_fun; |
160 | cb_data->agent_data = data; |
161 | |
162 | list_add(new: &cb_data->list, head: &eve_data->cb_list_head); |
163 | } |
164 | |
165 | return 0; |
166 | } |
167 | |
168 | static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data) |
169 | { |
170 | struct registered_event_data *eve_data; |
171 | struct agent_cb *cb_data; |
172 | |
173 | /* Check for existing entry in hash table for given cb_type */ |
174 | hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) { |
175 | if (eve_data->cb_type == PM_INIT_SUSPEND_CB) { |
176 | pr_err("Found as already registered\n" ); |
177 | return -EINVAL; |
178 | } |
179 | } |
180 | |
181 | /* Add new entry if not present */ |
182 | eve_data = kmalloc(size: sizeof(*eve_data), GFP_KERNEL); |
183 | if (!eve_data) |
184 | return -ENOMEM; |
185 | |
186 | eve_data->key = 0; |
187 | eve_data->cb_type = PM_INIT_SUSPEND_CB; |
188 | INIT_LIST_HEAD(list: &eve_data->cb_list_head); |
189 | |
190 | cb_data = kmalloc(size: sizeof(*cb_data), GFP_KERNEL); |
191 | if (!cb_data) |
192 | return -ENOMEM; |
193 | cb_data->eve_cb = cb_fun; |
194 | cb_data->agent_data = data; |
195 | |
196 | /* Add into callback list */ |
197 | list_add(new: &cb_data->list, head: &eve_data->cb_list_head); |
198 | |
199 | hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB); |
200 | |
201 | return 0; |
202 | } |
203 | |
204 | static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) |
205 | { |
206 | bool is_callback_found = false; |
207 | struct registered_event_data *eve_data; |
208 | struct agent_cb *cb_pos; |
209 | struct agent_cb *cb_next; |
210 | struct hlist_node *tmp; |
211 | |
212 | is_need_to_unregister = false; |
213 | |
214 | /* Check for existing entry in hash table for given cb_type */ |
215 | hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) { |
216 | if (eve_data->cb_type == PM_INIT_SUSPEND_CB) { |
217 | /* Delete the list of callback */ |
218 | list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { |
219 | if (cb_pos->eve_cb == cb_fun) { |
220 | is_callback_found = true; |
221 | list_del_init(entry: &cb_pos->list); |
222 | kfree(objp: cb_pos); |
223 | } |
224 | } |
225 | /* remove an object from a hashtable */ |
226 | hash_del(node: &eve_data->hentry); |
227 | kfree(objp: eve_data); |
228 | is_need_to_unregister = true; |
229 | } |
230 | } |
231 | if (!is_callback_found) { |
232 | pr_warn("Didn't find any registered callback for suspend event\n" ); |
233 | return -EINVAL; |
234 | } |
235 | |
236 | return 0; |
237 | } |
238 | |
239 | static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event, |
240 | event_cb_func_t cb_fun, void *data) |
241 | { |
242 | bool is_callback_found = false; |
243 | struct registered_event_data *eve_data; |
244 | u64 key = ((u64)node_id << 32U) | (u64)event; |
245 | struct agent_cb *cb_pos; |
246 | struct agent_cb *cb_next; |
247 | struct hlist_node *tmp; |
248 | |
249 | is_need_to_unregister = false; |
250 | |
251 | /* Check for existing entry in hash table for given key id */ |
252 | hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) { |
253 | if (eve_data->key == key) { |
254 | /* Delete the list of callback */ |
255 | list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { |
256 | if (cb_pos->eve_cb == cb_fun && |
257 | cb_pos->agent_data == data) { |
258 | is_callback_found = true; |
259 | list_del_init(entry: &cb_pos->list); |
260 | kfree(objp: cb_pos); |
261 | } |
262 | } |
263 | |
264 | /* Remove HASH table if callback list is empty */ |
265 | if (list_empty(head: &eve_data->cb_list_head)) { |
266 | /* remove an object from a HASH table */ |
267 | hash_del(node: &eve_data->hentry); |
268 | kfree(objp: eve_data); |
269 | is_need_to_unregister = true; |
270 | } |
271 | } |
272 | } |
273 | if (!is_callback_found) { |
274 | pr_warn("Didn't find any registered callback for 0x%x 0x%x\n" , |
275 | node_id, event); |
276 | return -EINVAL; |
277 | } |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | /** |
283 | * xlnx_register_event() - Register for the event. |
284 | * @cb_type: Type of callback from pm_api_cb_id, |
285 | * PM_NOTIFY_CB - for Error Events, |
286 | * PM_INIT_SUSPEND_CB - for suspend callback. |
287 | * @node_id: Node-Id related to event. |
288 | * @event: Event Mask for the Error Event. |
289 | * @wake: Flag specifying whether the subsystem should be woken upon |
290 | * event notification. |
291 | * @cb_fun: Function pointer to store the callback function. |
292 | * @data: Pointer for the driver instance. |
293 | * |
294 | * Return: Returns 0 on successful registration else error code. |
295 | */ |
296 | int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, |
297 | const bool wake, event_cb_func_t cb_fun, void *data) |
298 | { |
299 | int ret = 0; |
300 | u32 eve; |
301 | int pos; |
302 | |
303 | if (event_manager_availability) |
304 | return event_manager_availability; |
305 | |
306 | if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) { |
307 | pr_err("%s() Unsupported Callback 0x%x\n" , __func__, cb_type); |
308 | return -EINVAL; |
309 | } |
310 | |
311 | if (!cb_fun) |
312 | return -EFAULT; |
313 | |
314 | if (cb_type == PM_INIT_SUSPEND_CB) { |
315 | ret = xlnx_add_cb_for_suspend(cb_fun, data); |
316 | } else { |
317 | if (!xlnx_is_error_event(node_id)) { |
318 | /* Add entry for Node-Id/Event in hash table */ |
319 | ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data); |
320 | } else { |
321 | /* Add into Hash table */ |
322 | for (pos = 0; pos < MAX_BITS; pos++) { |
323 | eve = event & (1 << pos); |
324 | if (!eve) |
325 | continue; |
326 | |
327 | /* Add entry for Node-Id/Eve in hash table */ |
328 | ret = xlnx_add_cb_for_notify_event(node_id, event: eve, wake, cb_fun, |
329 | data); |
330 | /* Break the loop if got error */ |
331 | if (ret) |
332 | break; |
333 | } |
334 | if (ret) { |
335 | /* Skip the Event for which got the error */ |
336 | pos--; |
337 | /* Remove registered(during this call) event from hash table */ |
338 | for ( ; pos >= 0; pos--) { |
339 | eve = event & (1 << pos); |
340 | if (!eve) |
341 | continue; |
342 | xlnx_remove_cb_for_notify_event(node_id, event: eve, cb_fun, data); |
343 | } |
344 | } |
345 | } |
346 | |
347 | if (ret) { |
348 | pr_err("%s() failed for 0x%x and 0x%x: %d\r\n" , __func__, node_id, |
349 | event, ret); |
350 | return ret; |
351 | } |
352 | |
353 | /* Register for Node-Id/Event combination in firmware */ |
354 | ret = zynqmp_pm_register_notifier(node: node_id, event, wake, enable: true); |
355 | if (ret) { |
356 | pr_err("%s() failed for 0x%x and 0x%x: %d\r\n" , __func__, node_id, |
357 | event, ret); |
358 | /* Remove already registered event from hash table */ |
359 | if (xlnx_is_error_event(node_id)) { |
360 | for (pos = 0; pos < MAX_BITS; pos++) { |
361 | eve = event & (1 << pos); |
362 | if (!eve) |
363 | continue; |
364 | xlnx_remove_cb_for_notify_event(node_id, event: eve, cb_fun, data); |
365 | } |
366 | } else { |
367 | xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); |
368 | } |
369 | return ret; |
370 | } |
371 | } |
372 | |
373 | return ret; |
374 | } |
375 | EXPORT_SYMBOL_GPL(xlnx_register_event); |
376 | |
377 | /** |
378 | * xlnx_unregister_event() - Unregister for the event. |
379 | * @cb_type: Type of callback from pm_api_cb_id, |
380 | * PM_NOTIFY_CB - for Error Events, |
381 | * PM_INIT_SUSPEND_CB - for suspend callback. |
382 | * @node_id: Node-Id related to event. |
383 | * @event: Event Mask for the Error Event. |
384 | * @cb_fun: Function pointer of callback function. |
385 | * @data: Pointer of agent's private data. |
386 | * |
387 | * Return: Returns 0 on successful unregistration else error code. |
388 | */ |
389 | int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, |
390 | event_cb_func_t cb_fun, void *data) |
391 | { |
392 | int ret = 0; |
393 | u32 eve, pos; |
394 | |
395 | is_need_to_unregister = false; |
396 | |
397 | if (event_manager_availability) |
398 | return event_manager_availability; |
399 | |
400 | if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) { |
401 | pr_err("%s() Unsupported Callback 0x%x\n" , __func__, cb_type); |
402 | return -EINVAL; |
403 | } |
404 | |
405 | if (!cb_fun) |
406 | return -EFAULT; |
407 | |
408 | if (cb_type == PM_INIT_SUSPEND_CB) { |
409 | ret = xlnx_remove_cb_for_suspend(cb_fun); |
410 | } else { |
411 | /* Remove Node-Id/Event from hash table */ |
412 | if (!xlnx_is_error_event(node_id)) { |
413 | xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); |
414 | } else { |
415 | for (pos = 0; pos < MAX_BITS; pos++) { |
416 | eve = event & (1 << pos); |
417 | if (!eve) |
418 | continue; |
419 | |
420 | xlnx_remove_cb_for_notify_event(node_id, event: eve, cb_fun, data); |
421 | } |
422 | } |
423 | |
424 | /* Un-register if list is empty */ |
425 | if (is_need_to_unregister) { |
426 | /* Un-register for Node-Id/Event combination */ |
427 | ret = zynqmp_pm_register_notifier(node: node_id, event, wake: false, enable: false); |
428 | if (ret) { |
429 | pr_err("%s() failed for 0x%x and 0x%x: %d\n" , |
430 | __func__, node_id, event, ret); |
431 | return ret; |
432 | } |
433 | } |
434 | } |
435 | |
436 | return ret; |
437 | } |
438 | EXPORT_SYMBOL_GPL(xlnx_unregister_event); |
439 | |
440 | static void xlnx_call_suspend_cb_handler(const u32 *payload) |
441 | { |
442 | bool is_callback_found = false; |
443 | struct registered_event_data *eve_data; |
444 | u32 cb_type = payload[0]; |
445 | struct agent_cb *cb_pos; |
446 | struct agent_cb *cb_next; |
447 | |
448 | /* Check for existing entry in hash table for given cb_type */ |
449 | hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) { |
450 | if (eve_data->cb_type == cb_type) { |
451 | list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { |
452 | cb_pos->eve_cb(&payload[0], cb_pos->agent_data); |
453 | is_callback_found = true; |
454 | } |
455 | } |
456 | } |
457 | if (!is_callback_found) |
458 | pr_warn("Didn't find any registered callback for suspend event\n" ); |
459 | } |
460 | |
461 | static void xlnx_call_notify_cb_handler(const u32 *payload) |
462 | { |
463 | bool is_callback_found = false; |
464 | struct registered_event_data *eve_data; |
465 | u64 key = ((u64)payload[1] << 32U) | (u64)payload[2]; |
466 | int ret; |
467 | struct agent_cb *cb_pos; |
468 | struct agent_cb *cb_next; |
469 | |
470 | /* Check for existing entry in hash table for given key id */ |
471 | hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { |
472 | if (eve_data->key == key) { |
473 | list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { |
474 | cb_pos->eve_cb(&payload[0], cb_pos->agent_data); |
475 | is_callback_found = true; |
476 | } |
477 | |
478 | /* re register with firmware to get future events */ |
479 | ret = zynqmp_pm_register_notifier(node: payload[1], event: payload[2], |
480 | wake: eve_data->wake, enable: true); |
481 | if (ret) { |
482 | pr_err("%s() failed for 0x%x and 0x%x: %d\r\n" , __func__, |
483 | payload[1], payload[2], ret); |
484 | list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, |
485 | list) { |
486 | /* Remove already registered event from hash table */ |
487 | xlnx_remove_cb_for_notify_event(node_id: payload[1], event: payload[2], |
488 | cb_fun: cb_pos->eve_cb, |
489 | data: cb_pos->agent_data); |
490 | } |
491 | } |
492 | } |
493 | } |
494 | if (!is_callback_found) |
495 | pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n" , |
496 | payload[1], payload[2]); |
497 | } |
498 | |
499 | static void xlnx_get_event_callback_data(u32 *buf) |
500 | { |
501 | zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, ret_payload: buf, num_args: 0); |
502 | } |
503 | |
504 | static irqreturn_t xlnx_event_handler(int irq, void *dev_id) |
505 | { |
506 | u32 cb_type, node_id, event, pos; |
507 | u32 payload[CB_MAX_PAYLOAD_SIZE] = {0}; |
508 | u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0}; |
509 | |
510 | /* Get event data */ |
511 | xlnx_get_event_callback_data(buf: payload); |
512 | |
513 | /* First element is callback type, others are callback arguments */ |
514 | cb_type = payload[0]; |
515 | |
516 | if (cb_type == PM_NOTIFY_CB) { |
517 | node_id = payload[1]; |
518 | event = payload[2]; |
519 | if (!xlnx_is_error_event(node_id)) { |
520 | xlnx_call_notify_cb_handler(payload); |
521 | } else { |
522 | /* |
523 | * Each call back function expecting payload as an input arguments. |
524 | * We can get multiple error events as in one call back through error |
525 | * mask. So payload[2] may can contain multiple error events. |
526 | * In reg_driver_map database we store data in the combination of single |
527 | * node_id-error combination. |
528 | * So coping the payload message into event_data and update the |
529 | * event_data[2] with Error Mask for single error event and use |
530 | * event_data as input argument for registered call back function. |
531 | * |
532 | */ |
533 | memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE)); |
534 | /* Support Multiple Error Event */ |
535 | for (pos = 0; pos < MAX_BITS; pos++) { |
536 | if ((0 == (event & (1 << pos)))) |
537 | continue; |
538 | event_data[2] = (event & (1 << pos)); |
539 | xlnx_call_notify_cb_handler(payload: event_data); |
540 | } |
541 | } |
542 | } else if (cb_type == PM_INIT_SUSPEND_CB) { |
543 | xlnx_call_suspend_cb_handler(payload); |
544 | } else { |
545 | pr_err("%s() Unsupported Callback %d\n" , __func__, cb_type); |
546 | } |
547 | |
548 | return IRQ_HANDLED; |
549 | } |
550 | |
551 | static int xlnx_event_cpuhp_start(unsigned int cpu) |
552 | { |
553 | enable_percpu_irq(irq: virq_sgi, type: IRQ_TYPE_NONE); |
554 | |
555 | return 0; |
556 | } |
557 | |
558 | static int xlnx_event_cpuhp_down(unsigned int cpu) |
559 | { |
560 | disable_percpu_irq(irq: virq_sgi); |
561 | |
562 | return 0; |
563 | } |
564 | |
565 | static void xlnx_disable_percpu_irq(void *data) |
566 | { |
567 | disable_percpu_irq(irq: virq_sgi); |
568 | } |
569 | |
570 | static int xlnx_event_init_sgi(struct platform_device *pdev) |
571 | { |
572 | int ret = 0; |
573 | int cpu; |
574 | /* |
575 | * IRQ related structures are used for the following: |
576 | * for each SGI interrupt ensure its mapped by GIC IRQ domain |
577 | * and that each corresponding linux IRQ for the HW IRQ has |
578 | * a handler for when receiving an interrupt from the remote |
579 | * processor. |
580 | */ |
581 | struct irq_domain *domain; |
582 | struct irq_fwspec sgi_fwspec; |
583 | struct device_node *interrupt_parent = NULL; |
584 | struct device *parent = pdev->dev.parent; |
585 | |
586 | /* Find GIC controller to map SGIs. */ |
587 | interrupt_parent = of_irq_find_parent(child: parent->of_node); |
588 | if (!interrupt_parent) { |
589 | dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n" ); |
590 | return -EINVAL; |
591 | } |
592 | |
593 | /* Each SGI needs to be associated with GIC's IRQ domain. */ |
594 | domain = irq_find_host(node: interrupt_parent); |
595 | of_node_put(node: interrupt_parent); |
596 | |
597 | /* Each mapping needs GIC domain when finding IRQ mapping. */ |
598 | sgi_fwspec.fwnode = domain->fwnode; |
599 | |
600 | /* |
601 | * When irq domain looks at mapping each arg is as follows: |
602 | * 3 args for: interrupt type (SGI), interrupt # (set later), type |
603 | */ |
604 | sgi_fwspec.param_count = 1; |
605 | |
606 | /* Set SGI's hwirq */ |
607 | sgi_fwspec.param[0] = sgi_num; |
608 | virq_sgi = irq_create_fwspec_mapping(fwspec: &sgi_fwspec); |
609 | |
610 | cpu = get_cpu(); |
611 | per_cpu(cpu_number1, cpu) = cpu; |
612 | ret = request_percpu_irq(irq: virq_sgi, handler: xlnx_event_handler, devname: "xlnx_event_mgmt" , |
613 | percpu_dev_id: &cpu_number1); |
614 | put_cpu(); |
615 | |
616 | WARN_ON(ret); |
617 | if (ret) { |
618 | irq_dispose_mapping(virq: virq_sgi); |
619 | return ret; |
620 | } |
621 | |
622 | irq_to_desc(irq: virq_sgi); |
623 | irq_set_status_flags(irq: virq_sgi, set: IRQ_PER_CPU); |
624 | |
625 | return ret; |
626 | } |
627 | |
628 | static void xlnx_event_cleanup_sgi(struct platform_device *pdev) |
629 | { |
630 | int cpu = smp_processor_id(); |
631 | |
632 | per_cpu(cpu_number1, cpu) = cpu; |
633 | |
634 | cpuhp_remove_state(state: CPUHP_AP_ONLINE_DYN); |
635 | |
636 | on_each_cpu(func: xlnx_disable_percpu_irq, NULL, wait: 1); |
637 | |
638 | irq_clear_status_flags(irq: virq_sgi, clr: IRQ_PER_CPU); |
639 | free_percpu_irq(virq_sgi, &cpu_number1); |
640 | irq_dispose_mapping(virq: virq_sgi); |
641 | } |
642 | |
643 | static int xlnx_event_manager_probe(struct platform_device *pdev) |
644 | { |
645 | int ret; |
646 | |
647 | ret = zynqmp_pm_feature(api_id: PM_REGISTER_NOTIFIER); |
648 | if (ret < 0) { |
649 | dev_err(&pdev->dev, "Feature check failed with %d\n" , ret); |
650 | return ret; |
651 | } |
652 | |
653 | if ((ret & FIRMWARE_VERSION_MASK) < |
654 | REGISTER_NOTIFIER_FIRMWARE_VERSION) { |
655 | dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n" , |
656 | REGISTER_NOTIFIER_FIRMWARE_VERSION, |
657 | ret & FIRMWARE_VERSION_MASK); |
658 | return -EOPNOTSUPP; |
659 | } |
660 | |
661 | /* Initialize the SGI */ |
662 | ret = xlnx_event_init_sgi(pdev); |
663 | if (ret) { |
664 | dev_err(&pdev->dev, "SGI Init has been failed with %d\n" , ret); |
665 | return ret; |
666 | } |
667 | |
668 | /* Setup function for the CPU hot-plug cases */ |
669 | cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "soc/event:starting" , |
670 | startup: xlnx_event_cpuhp_start, teardown: xlnx_event_cpuhp_down); |
671 | |
672 | ret = zynqmp_pm_register_sgi(sgi_num, reset: 0); |
673 | if (ret) { |
674 | if (ret == -EOPNOTSUPP) |
675 | dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n" ); |
676 | else |
677 | dev_err(&pdev->dev, "SGI %d registration failed, err %d\n" , sgi_num, ret); |
678 | |
679 | xlnx_event_cleanup_sgi(pdev); |
680 | return ret; |
681 | } |
682 | |
683 | event_manager_availability = 0; |
684 | |
685 | dev_info(&pdev->dev, "SGI %d Registered over TF-A\n" , sgi_num); |
686 | dev_info(&pdev->dev, "Xilinx Event Management driver probed\n" ); |
687 | |
688 | return ret; |
689 | } |
690 | |
691 | static void xlnx_event_manager_remove(struct platform_device *pdev) |
692 | { |
693 | int i; |
694 | struct registered_event_data *eve_data; |
695 | struct hlist_node *tmp; |
696 | int ret; |
697 | struct agent_cb *cb_pos; |
698 | struct agent_cb *cb_next; |
699 | |
700 | hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) { |
701 | list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { |
702 | list_del_init(entry: &cb_pos->list); |
703 | kfree(objp: cb_pos); |
704 | } |
705 | hash_del(node: &eve_data->hentry); |
706 | kfree(objp: eve_data); |
707 | } |
708 | |
709 | ret = zynqmp_pm_register_sgi(sgi_num: 0, reset: 1); |
710 | if (ret) |
711 | dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n" , ret); |
712 | |
713 | xlnx_event_cleanup_sgi(pdev); |
714 | |
715 | event_manager_availability = -EACCES; |
716 | } |
717 | |
718 | static struct platform_driver xlnx_event_manager_driver = { |
719 | .probe = xlnx_event_manager_probe, |
720 | .remove_new = xlnx_event_manager_remove, |
721 | .driver = { |
722 | .name = "xlnx_event_manager" , |
723 | }, |
724 | }; |
725 | module_param(sgi_num, uint, 0); |
726 | module_platform_driver(xlnx_event_manager_driver); |
727 | |