1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * (C) Copyright 2020 Hewlett Packard Enterprise Development LP |
7 | * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. |
8 | */ |
9 | |
10 | /* |
11 | * Cross Partition Communication (XPC) support - standard version. |
12 | * |
13 | * XPC provides a message passing capability that crosses partition |
14 | * boundaries. This module is made up of two parts: |
15 | * |
16 | * partition This part detects the presence/absence of other |
17 | * partitions. It provides a heartbeat and monitors |
18 | * the heartbeats of other partitions. |
19 | * |
20 | * channel This part manages the channels and sends/receives |
21 | * messages across them to/from other partitions. |
22 | * |
23 | * There are a couple of additional functions residing in XP, which |
24 | * provide an interface to XPC for its users. |
25 | * |
26 | * |
27 | * Caveats: |
28 | * |
29 | * . Currently on sn2, we have no way to determine which nasid an IRQ |
30 | * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write |
31 | * followed by an IPI. The amo indicates where data is to be pulled |
32 | * from, so after the IPI arrives, the remote partition checks the amo |
33 | * word. The IPI can actually arrive before the amo however, so other |
34 | * code must periodically check for this case. Also, remote amo |
35 | * operations do not reliably time out. Thus we do a remote PIO read |
36 | * solely to know whether the remote partition is down and whether we |
37 | * should stop sending IPIs to it. This remote PIO read operation is |
38 | * set up in a special nofault region so SAL knows to ignore (and |
39 | * cleanup) any errors due to the remote amo write, PIO read, and/or |
40 | * PIO write operations. |
41 | * |
42 | * If/when new hardware solves this IPI problem, we should abandon |
43 | * the current approach. |
44 | * |
45 | */ |
46 | |
47 | #include <linux/module.h> |
48 | #include <linux/slab.h> |
49 | #include <linux/sysctl.h> |
50 | #include <linux/device.h> |
51 | #include <linux/delay.h> |
52 | #include <linux/reboot.h> |
53 | #include <linux/kdebug.h> |
54 | #include <linux/kthread.h> |
55 | #include "xpc.h" |
56 | |
57 | #ifdef CONFIG_X86_64 |
58 | #include <asm/traps.h> |
59 | #endif |
60 | |
61 | /* define two XPC debug device structures to be used with dev_dbg() et al */ |
62 | |
63 | static struct device_driver xpc_dbg_name = { |
64 | .name = "xpc" |
65 | }; |
66 | |
67 | static struct device xpc_part_dbg_subname = { |
68 | .init_name = "" , /* set to "part" at xpc_init() time */ |
69 | .driver = &xpc_dbg_name |
70 | }; |
71 | |
72 | static struct device xpc_chan_dbg_subname = { |
73 | .init_name = "" , /* set to "chan" at xpc_init() time */ |
74 | .driver = &xpc_dbg_name |
75 | }; |
76 | |
77 | struct device *xpc_part = &xpc_part_dbg_subname; |
78 | struct device *xpc_chan = &xpc_chan_dbg_subname; |
79 | |
80 | static int xpc_kdebug_ignore; |
81 | |
82 | /* systune related variables for /proc/sys directories */ |
83 | |
84 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; |
85 | static int xpc_hb_min_interval = 1; |
86 | static int xpc_hb_max_interval = 10; |
87 | |
88 | static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; |
89 | static int xpc_hb_check_min_interval = 10; |
90 | static int xpc_hb_check_max_interval = 120; |
91 | |
92 | int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; |
93 | static int xpc_disengage_min_timelimit; /* = 0 */ |
94 | static int xpc_disengage_max_timelimit = 120; |
95 | |
96 | static struct ctl_table xpc_sys_xpc_hb[] = { |
97 | { |
98 | .procname = "hb_interval" , |
99 | .data = &xpc_hb_interval, |
100 | .maxlen = sizeof(int), |
101 | .mode = 0644, |
102 | .proc_handler = proc_dointvec_minmax, |
103 | .extra1 = &xpc_hb_min_interval, |
104 | .extra2 = &xpc_hb_max_interval}, |
105 | { |
106 | .procname = "hb_check_interval" , |
107 | .data = &xpc_hb_check_interval, |
108 | .maxlen = sizeof(int), |
109 | .mode = 0644, |
110 | .proc_handler = proc_dointvec_minmax, |
111 | .extra1 = &xpc_hb_check_min_interval, |
112 | .extra2 = &xpc_hb_check_max_interval}, |
113 | }; |
114 | static struct ctl_table xpc_sys_xpc[] = { |
115 | { |
116 | .procname = "disengage_timelimit" , |
117 | .data = &xpc_disengage_timelimit, |
118 | .maxlen = sizeof(int), |
119 | .mode = 0644, |
120 | .proc_handler = proc_dointvec_minmax, |
121 | .extra1 = &xpc_disengage_min_timelimit, |
122 | .extra2 = &xpc_disengage_max_timelimit}, |
123 | }; |
124 | |
125 | static struct ctl_table_header *xpc_sysctl; |
126 | static struct ctl_table_header *xpc_sysctl_hb; |
127 | |
128 | /* non-zero if any remote partition disengage was timed out */ |
129 | int xpc_disengage_timedout; |
130 | |
131 | /* #of activate IRQs received and not yet processed */ |
132 | int xpc_activate_IRQ_rcvd; |
133 | DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock); |
134 | |
135 | /* IRQ handler notifies this wait queue on receipt of an IRQ */ |
136 | DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); |
137 | |
138 | static unsigned long xpc_hb_check_timeout; |
139 | static struct timer_list xpc_hb_timer; |
140 | |
141 | /* notification that the xpc_hb_checker thread has exited */ |
142 | static DECLARE_COMPLETION(xpc_hb_checker_exited); |
143 | |
144 | /* notification that the xpc_discovery thread has exited */ |
145 | static DECLARE_COMPLETION(xpc_discovery_exited); |
146 | |
147 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); |
148 | |
149 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); |
150 | static struct notifier_block xpc_reboot_notifier = { |
151 | .notifier_call = xpc_system_reboot, |
152 | }; |
153 | |
154 | static int xpc_system_die(struct notifier_block *, unsigned long, void *); |
155 | static struct notifier_block xpc_die_notifier = { |
156 | .notifier_call = xpc_system_die, |
157 | }; |
158 | |
159 | struct xpc_arch_operations xpc_arch_ops; |
160 | |
161 | /* |
162 | * Timer function to enforce the timelimit on the partition disengage. |
163 | */ |
164 | static void |
165 | xpc_timeout_partition_disengage(struct timer_list *t) |
166 | { |
167 | struct xpc_partition *part = from_timer(part, t, disengage_timer); |
168 | |
169 | DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); |
170 | |
171 | xpc_partition_disengaged_from_timer(part); |
172 | |
173 | DBUG_ON(part->disengage_timeout != 0); |
174 | DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); |
175 | } |
176 | |
177 | /* |
178 | * Timer to produce the heartbeat. The timer structures function is |
179 | * already set when this is initially called. A tunable is used to |
180 | * specify when the next timeout should occur. |
181 | */ |
182 | static void |
183 | xpc_hb_beater(struct timer_list *unused) |
184 | { |
185 | xpc_arch_ops.increment_heartbeat(); |
186 | |
187 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) |
188 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
189 | |
190 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); |
191 | add_timer(timer: &xpc_hb_timer); |
192 | } |
193 | |
194 | static void |
195 | xpc_start_hb_beater(void) |
196 | { |
197 | xpc_arch_ops.heartbeat_init(); |
198 | timer_setup(&xpc_hb_timer, xpc_hb_beater, 0); |
199 | xpc_hb_beater(NULL); |
200 | } |
201 | |
202 | static void |
203 | xpc_stop_hb_beater(void) |
204 | { |
205 | del_timer_sync(timer: &xpc_hb_timer); |
206 | xpc_arch_ops.heartbeat_exit(); |
207 | } |
208 | |
209 | /* |
210 | * At periodic intervals, scan through all active partitions and ensure |
211 | * their heartbeat is still active. If not, the partition is deactivated. |
212 | */ |
213 | static void |
214 | xpc_check_remote_hb(void) |
215 | { |
216 | struct xpc_partition *part; |
217 | short partid; |
218 | enum xp_retval ret; |
219 | |
220 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
221 | |
222 | if (xpc_exiting) |
223 | break; |
224 | |
225 | if (partid == xp_partition_id) |
226 | continue; |
227 | |
228 | part = &xpc_partitions[partid]; |
229 | |
230 | if (part->act_state == XPC_P_AS_INACTIVE || |
231 | part->act_state == XPC_P_AS_DEACTIVATING) { |
232 | continue; |
233 | } |
234 | |
235 | ret = xpc_arch_ops.get_remote_heartbeat(part); |
236 | if (ret != xpSuccess) |
237 | XPC_DEACTIVATE_PARTITION(part, ret); |
238 | } |
239 | } |
240 | |
241 | /* |
242 | * This thread is responsible for nearly all of the partition |
243 | * activation/deactivation. |
244 | */ |
245 | static int |
246 | xpc_hb_checker(void *ignore) |
247 | { |
248 | int force_IRQ = 0; |
249 | |
250 | /* this thread was marked active by xpc_hb_init() */ |
251 | |
252 | set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); |
253 | |
254 | /* set our heartbeating to other partitions into motion */ |
255 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
256 | xpc_start_hb_beater(); |
257 | |
258 | while (!xpc_exiting) { |
259 | |
260 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
261 | "been received\n" , |
262 | (int)(xpc_hb_check_timeout - jiffies), |
263 | xpc_activate_IRQ_rcvd); |
264 | |
265 | /* checking of remote heartbeats is skewed by IRQ handling */ |
266 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { |
267 | xpc_hb_check_timeout = jiffies + |
268 | (xpc_hb_check_interval * HZ); |
269 | |
270 | dev_dbg(xpc_part, "checking remote heartbeats\n" ); |
271 | xpc_check_remote_hb(); |
272 | } |
273 | |
274 | /* check for outstanding IRQs */ |
275 | if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) { |
276 | force_IRQ = 0; |
277 | dev_dbg(xpc_part, "processing activate IRQs " |
278 | "received\n" ); |
279 | xpc_arch_ops.process_activate_IRQ_rcvd(); |
280 | } |
281 | |
282 | /* wait for IRQ or timeout */ |
283 | (void)wait_event_interruptible(xpc_activate_IRQ_wq, |
284 | (time_is_before_eq_jiffies( |
285 | xpc_hb_check_timeout) || |
286 | xpc_activate_IRQ_rcvd > 0 || |
287 | xpc_exiting)); |
288 | } |
289 | |
290 | xpc_stop_hb_beater(); |
291 | |
292 | dev_dbg(xpc_part, "heartbeat checker is exiting\n" ); |
293 | |
294 | /* mark this thread as having exited */ |
295 | complete(&xpc_hb_checker_exited); |
296 | return 0; |
297 | } |
298 | |
299 | /* |
300 | * This thread will attempt to discover other partitions to activate |
301 | * based on info provided by SAL. This new thread is short lived and |
302 | * will exit once discovery is complete. |
303 | */ |
304 | static int |
305 | xpc_initiate_discovery(void *ignore) |
306 | { |
307 | xpc_discovery(); |
308 | |
309 | dev_dbg(xpc_part, "discovery thread is exiting\n" ); |
310 | |
311 | /* mark this thread as having exited */ |
312 | complete(&xpc_discovery_exited); |
313 | return 0; |
314 | } |
315 | |
316 | /* |
317 | * The first kthread assigned to a newly activated partition is the one |
318 | * created by XPC HB with which it calls xpc_activating(). XPC hangs on to |
319 | * that kthread until the partition is brought down, at which time that kthread |
320 | * returns back to XPC HB. (The return of that kthread will signify to XPC HB |
321 | * that XPC has dismantled all communication infrastructure for the associated |
322 | * partition.) This kthread becomes the channel manager for that partition. |
323 | * |
324 | * Each active partition has a channel manager, who, besides connecting and |
325 | * disconnecting channels, will ensure that each of the partition's connected |
326 | * channels has the required number of assigned kthreads to get the work done. |
327 | */ |
328 | static void |
329 | xpc_channel_mgr(struct xpc_partition *part) |
330 | { |
331 | while (part->act_state != XPC_P_AS_DEACTIVATING || |
332 | atomic_read(v: &part->nchannels_active) > 0 || |
333 | !xpc_partition_disengaged(part)) { |
334 | |
335 | xpc_process_sent_chctl_flags(part); |
336 | |
337 | /* |
338 | * Wait until we've been requested to activate kthreads or |
339 | * all of the channel's message queues have been torn down or |
340 | * a signal is pending. |
341 | * |
342 | * The channel_mgr_requests is set to 1 after being awakened, |
343 | * This is done to prevent the channel mgr from making one pass |
344 | * through the loop for each request, since he will |
345 | * be servicing all the requests in one pass. The reason it's |
346 | * set to 1 instead of 0 is so that other kthreads will know |
347 | * that the channel mgr is running and won't bother trying to |
348 | * wake him up. |
349 | */ |
350 | atomic_dec(v: &part->channel_mgr_requests); |
351 | (void)wait_event_interruptible(part->channel_mgr_wq, |
352 | (atomic_read(&part->channel_mgr_requests) > 0 || |
353 | part->chctl.all_flags != 0 || |
354 | (part->act_state == XPC_P_AS_DEACTIVATING && |
355 | atomic_read(&part->nchannels_active) == 0 && |
356 | xpc_partition_disengaged(part)))); |
357 | atomic_set(v: &part->channel_mgr_requests, i: 1); |
358 | } |
359 | } |
360 | |
361 | /* |
362 | * Guarantee that the kzalloc'd memory is cacheline aligned. |
363 | */ |
364 | void * |
365 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) |
366 | { |
367 | /* see if kzalloc will give us cachline aligned memory by default */ |
368 | *base = kzalloc(size, flags); |
369 | if (*base == NULL) |
370 | return NULL; |
371 | |
372 | if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) |
373 | return *base; |
374 | |
375 | kfree(objp: *base); |
376 | |
377 | /* nope, we'll have to do it ourselves */ |
378 | *base = kzalloc(size: size + L1_CACHE_BYTES, flags); |
379 | if (*base == NULL) |
380 | return NULL; |
381 | |
382 | return (void *)L1_CACHE_ALIGN((u64)*base); |
383 | } |
384 | |
385 | /* |
386 | * Setup the channel structures necessary to support XPartition Communication |
387 | * between the specified remote partition and the local one. |
388 | */ |
389 | static enum xp_retval |
390 | xpc_setup_ch_structures(struct xpc_partition *part) |
391 | { |
392 | enum xp_retval ret; |
393 | int ch_number; |
394 | struct xpc_channel *ch; |
395 | short partid = XPC_PARTID(part); |
396 | |
397 | /* |
398 | * Allocate all of the channel structures as a contiguous chunk of |
399 | * memory. |
400 | */ |
401 | DBUG_ON(part->channels != NULL); |
402 | part->channels = kcalloc(XPC_MAX_NCHANNELS, |
403 | size: sizeof(struct xpc_channel), |
404 | GFP_KERNEL); |
405 | if (part->channels == NULL) { |
406 | dev_err(xpc_chan, "can't get memory for channels\n" ); |
407 | return xpNoMemory; |
408 | } |
409 | |
410 | /* allocate the remote open and close args */ |
411 | |
412 | part->remote_openclose_args = |
413 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, |
414 | GFP_KERNEL, base: &part-> |
415 | remote_openclose_args_base); |
416 | if (part->remote_openclose_args == NULL) { |
417 | dev_err(xpc_chan, "can't get memory for remote connect args\n" ); |
418 | ret = xpNoMemory; |
419 | goto out_1; |
420 | } |
421 | |
422 | part->chctl.all_flags = 0; |
423 | spin_lock_init(&part->chctl_lock); |
424 | |
425 | atomic_set(v: &part->channel_mgr_requests, i: 1); |
426 | init_waitqueue_head(&part->channel_mgr_wq); |
427 | |
428 | part->nchannels = XPC_MAX_NCHANNELS; |
429 | |
430 | atomic_set(v: &part->nchannels_active, i: 0); |
431 | atomic_set(v: &part->nchannels_engaged, i: 0); |
432 | |
433 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { |
434 | ch = &part->channels[ch_number]; |
435 | |
436 | ch->partid = partid; |
437 | ch->number = ch_number; |
438 | ch->flags = XPC_C_DISCONNECTED; |
439 | |
440 | atomic_set(v: &ch->kthreads_assigned, i: 0); |
441 | atomic_set(v: &ch->kthreads_idle, i: 0); |
442 | atomic_set(v: &ch->kthreads_active, i: 0); |
443 | |
444 | atomic_set(v: &ch->references, i: 0); |
445 | atomic_set(v: &ch->n_to_notify, i: 0); |
446 | |
447 | spin_lock_init(&ch->lock); |
448 | init_completion(x: &ch->wdisconnect_wait); |
449 | |
450 | atomic_set(v: &ch->n_on_msg_allocate_wq, i: 0); |
451 | init_waitqueue_head(&ch->msg_allocate_wq); |
452 | init_waitqueue_head(&ch->idle_wq); |
453 | } |
454 | |
455 | ret = xpc_arch_ops.setup_ch_structures(part); |
456 | if (ret != xpSuccess) |
457 | goto out_2; |
458 | |
459 | /* |
460 | * With the setting of the partition setup_state to XPC_P_SS_SETUP, |
461 | * we're declaring that this partition is ready to go. |
462 | */ |
463 | part->setup_state = XPC_P_SS_SETUP; |
464 | |
465 | return xpSuccess; |
466 | |
467 | /* setup of ch structures failed */ |
468 | out_2: |
469 | kfree(objp: part->remote_openclose_args_base); |
470 | part->remote_openclose_args = NULL; |
471 | out_1: |
472 | kfree(objp: part->channels); |
473 | part->channels = NULL; |
474 | return ret; |
475 | } |
476 | |
477 | /* |
478 | * Teardown the channel structures necessary to support XPartition Communication |
479 | * between the specified remote partition and the local one. |
480 | */ |
481 | static void |
482 | xpc_teardown_ch_structures(struct xpc_partition *part) |
483 | { |
484 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); |
485 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); |
486 | |
487 | /* |
488 | * Make this partition inaccessible to local processes by marking it |
489 | * as no longer setup. Then wait before proceeding with the teardown |
490 | * until all existing references cease. |
491 | */ |
492 | DBUG_ON(part->setup_state != XPC_P_SS_SETUP); |
493 | part->setup_state = XPC_P_SS_WTEARDOWN; |
494 | |
495 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); |
496 | |
497 | /* now we can begin tearing down the infrastructure */ |
498 | |
499 | xpc_arch_ops.teardown_ch_structures(part); |
500 | |
501 | kfree(objp: part->remote_openclose_args_base); |
502 | part->remote_openclose_args = NULL; |
503 | kfree(objp: part->channels); |
504 | part->channels = NULL; |
505 | |
506 | part->setup_state = XPC_P_SS_TORNDOWN; |
507 | } |
508 | |
509 | /* |
510 | * When XPC HB determines that a partition has come up, it will create a new |
511 | * kthread and that kthread will call this function to attempt to set up the |
512 | * basic infrastructure used for Cross Partition Communication with the newly |
513 | * upped partition. |
514 | * |
515 | * The kthread that was created by XPC HB and which setup the XPC |
516 | * infrastructure will remain assigned to the partition becoming the channel |
517 | * manager for that partition until the partition is deactivating, at which |
518 | * time the kthread will teardown the XPC infrastructure and then exit. |
519 | */ |
520 | static int |
521 | xpc_activating(void *__partid) |
522 | { |
523 | short partid = (u64)__partid; |
524 | struct xpc_partition *part = &xpc_partitions[partid]; |
525 | unsigned long irq_flags; |
526 | |
527 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
528 | |
529 | spin_lock_irqsave(&part->act_lock, irq_flags); |
530 | |
531 | if (part->act_state == XPC_P_AS_DEACTIVATING) { |
532 | part->act_state = XPC_P_AS_INACTIVE; |
533 | spin_unlock_irqrestore(lock: &part->act_lock, flags: irq_flags); |
534 | part->remote_rp_pa = 0; |
535 | return 0; |
536 | } |
537 | |
538 | /* indicate the thread is activating */ |
539 | DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); |
540 | part->act_state = XPC_P_AS_ACTIVATING; |
541 | |
542 | XPC_SET_REASON(part, 0, 0); |
543 | spin_unlock_irqrestore(lock: &part->act_lock, flags: irq_flags); |
544 | |
545 | dev_dbg(xpc_part, "activating partition %d\n" , partid); |
546 | |
547 | xpc_arch_ops.allow_hb(partid); |
548 | |
549 | if (xpc_setup_ch_structures(part) == xpSuccess) { |
550 | (void)xpc_part_ref(part); /* this will always succeed */ |
551 | |
552 | if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { |
553 | xpc_mark_partition_active(part); |
554 | xpc_channel_mgr(part); |
555 | /* won't return until partition is deactivating */ |
556 | } |
557 | |
558 | xpc_part_deref(part); |
559 | xpc_teardown_ch_structures(part); |
560 | } |
561 | |
562 | xpc_arch_ops.disallow_hb(partid); |
563 | xpc_mark_partition_inactive(part); |
564 | |
565 | if (part->reason == xpReactivating) { |
566 | /* interrupting ourselves results in activating partition */ |
567 | xpc_arch_ops.request_partition_reactivation(part); |
568 | } |
569 | |
570 | return 0; |
571 | } |
572 | |
573 | void |
574 | xpc_activate_partition(struct xpc_partition *part) |
575 | { |
576 | short partid = XPC_PARTID(part); |
577 | unsigned long irq_flags; |
578 | struct task_struct *kthread; |
579 | |
580 | spin_lock_irqsave(&part->act_lock, irq_flags); |
581 | |
582 | DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); |
583 | |
584 | part->act_state = XPC_P_AS_ACTIVATION_REQ; |
585 | XPC_SET_REASON(part, xpCloneKThread, __LINE__); |
586 | |
587 | spin_unlock_irqrestore(lock: &part->act_lock, flags: irq_flags); |
588 | |
589 | kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d" , |
590 | partid); |
591 | if (IS_ERR(ptr: kthread)) { |
592 | spin_lock_irqsave(&part->act_lock, irq_flags); |
593 | part->act_state = XPC_P_AS_INACTIVE; |
594 | XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); |
595 | spin_unlock_irqrestore(lock: &part->act_lock, flags: irq_flags); |
596 | } |
597 | } |
598 | |
599 | void |
600 | xpc_activate_kthreads(struct xpc_channel *ch, int needed) |
601 | { |
602 | int idle = atomic_read(v: &ch->kthreads_idle); |
603 | int assigned = atomic_read(v: &ch->kthreads_assigned); |
604 | int wakeup; |
605 | |
606 | DBUG_ON(needed <= 0); |
607 | |
608 | if (idle > 0) { |
609 | wakeup = (needed > idle) ? idle : needed; |
610 | needed -= wakeup; |
611 | |
612 | dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " |
613 | "channel=%d\n" , wakeup, ch->partid, ch->number); |
614 | |
615 | /* only wakeup the requested number of kthreads */ |
616 | wake_up_nr(&ch->idle_wq, wakeup); |
617 | } |
618 | |
619 | if (needed <= 0) |
620 | return; |
621 | |
622 | if (needed + assigned > ch->kthreads_assigned_limit) { |
623 | needed = ch->kthreads_assigned_limit - assigned; |
624 | if (needed <= 0) |
625 | return; |
626 | } |
627 | |
628 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n" , |
629 | needed, ch->partid, ch->number); |
630 | |
631 | xpc_create_kthreads(ch, needed, 0); |
632 | } |
633 | |
634 | /* |
635 | * This function is where XPC's kthreads wait for messages to deliver. |
636 | */ |
637 | static void |
638 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) |
639 | { |
640 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
641 | xpc_arch_ops.n_of_deliverable_payloads; |
642 | |
643 | do { |
644 | /* deliver messages to their intended recipients */ |
645 | |
646 | while (n_of_deliverable_payloads(ch) > 0 && |
647 | !(ch->flags & XPC_C_DISCONNECTING)) { |
648 | xpc_deliver_payload(ch); |
649 | } |
650 | |
651 | if (atomic_inc_return(v: &ch->kthreads_idle) > |
652 | ch->kthreads_idle_limit) { |
653 | /* too many idle kthreads on this channel */ |
654 | atomic_dec(v: &ch->kthreads_idle); |
655 | break; |
656 | } |
657 | |
658 | dev_dbg(xpc_chan, "idle kthread calling " |
659 | "wait_event_interruptible_exclusive()\n" ); |
660 | |
661 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
662 | (n_of_deliverable_payloads(ch) > 0 || |
663 | (ch->flags & XPC_C_DISCONNECTING))); |
664 | |
665 | atomic_dec(v: &ch->kthreads_idle); |
666 | |
667 | } while (!(ch->flags & XPC_C_DISCONNECTING)); |
668 | } |
669 | |
670 | static int |
671 | xpc_kthread_start(void *args) |
672 | { |
673 | short partid = XPC_UNPACK_ARG1(args); |
674 | u16 ch_number = XPC_UNPACK_ARG2(args); |
675 | struct xpc_partition *part = &xpc_partitions[partid]; |
676 | struct xpc_channel *ch; |
677 | int n_needed; |
678 | unsigned long irq_flags; |
679 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
680 | xpc_arch_ops.n_of_deliverable_payloads; |
681 | |
682 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n" , |
683 | partid, ch_number); |
684 | |
685 | ch = &part->channels[ch_number]; |
686 | |
687 | if (!(ch->flags & XPC_C_DISCONNECTING)) { |
688 | |
689 | /* let registerer know that connection has been established */ |
690 | |
691 | spin_lock_irqsave(&ch->lock, irq_flags); |
692 | if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { |
693 | ch->flags |= XPC_C_CONNECTEDCALLOUT; |
694 | spin_unlock_irqrestore(lock: &ch->lock, flags: irq_flags); |
695 | |
696 | xpc_connected_callout(ch); |
697 | |
698 | spin_lock_irqsave(&ch->lock, irq_flags); |
699 | ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; |
700 | spin_unlock_irqrestore(lock: &ch->lock, flags: irq_flags); |
701 | |
702 | /* |
703 | * It is possible that while the callout was being |
704 | * made that the remote partition sent some messages. |
705 | * If that is the case, we may need to activate |
706 | * additional kthreads to help deliver them. We only |
707 | * need one less than total #of messages to deliver. |
708 | */ |
709 | n_needed = n_of_deliverable_payloads(ch) - 1; |
710 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) |
711 | xpc_activate_kthreads(ch, needed: n_needed); |
712 | |
713 | } else { |
714 | spin_unlock_irqrestore(lock: &ch->lock, flags: irq_flags); |
715 | } |
716 | |
717 | xpc_kthread_waitmsgs(part, ch); |
718 | } |
719 | |
720 | /* let registerer know that connection is disconnecting */ |
721 | |
722 | spin_lock_irqsave(&ch->lock, irq_flags); |
723 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && |
724 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
725 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
726 | spin_unlock_irqrestore(lock: &ch->lock, flags: irq_flags); |
727 | |
728 | xpc_disconnect_callout(ch, xpDisconnecting); |
729 | |
730 | spin_lock_irqsave(&ch->lock, irq_flags); |
731 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; |
732 | } |
733 | spin_unlock_irqrestore(lock: &ch->lock, flags: irq_flags); |
734 | |
735 | if (atomic_dec_return(v: &ch->kthreads_assigned) == 0 && |
736 | atomic_dec_return(v: &part->nchannels_engaged) == 0) { |
737 | xpc_arch_ops.indicate_partition_disengaged(part); |
738 | } |
739 | |
740 | xpc_msgqueue_deref(ch); |
741 | |
742 | dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n" , |
743 | partid, ch_number); |
744 | |
745 | xpc_part_deref(part); |
746 | return 0; |
747 | } |
748 | |
749 | /* |
750 | * For each partition that XPC has established communications with, there is |
751 | * a minimum of one kernel thread assigned to perform any operation that |
752 | * may potentially sleep or block (basically the callouts to the asynchronous |
753 | * functions registered via xpc_connect()). |
754 | * |
755 | * Additional kthreads are created and destroyed by XPC as the workload |
756 | * demands. |
757 | * |
758 | * A kthread is assigned to one of the active channels that exists for a given |
759 | * partition. |
760 | */ |
761 | void |
762 | xpc_create_kthreads(struct xpc_channel *ch, int needed, |
763 | int ignore_disconnecting) |
764 | { |
765 | unsigned long irq_flags; |
766 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
767 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
768 | struct task_struct *kthread; |
769 | void (*indicate_partition_disengaged) (struct xpc_partition *) = |
770 | xpc_arch_ops.indicate_partition_disengaged; |
771 | |
772 | while (needed-- > 0) { |
773 | |
774 | /* |
775 | * The following is done on behalf of the newly created |
776 | * kthread. That kthread is responsible for doing the |
777 | * counterpart to the following before it exits. |
778 | */ |
779 | if (ignore_disconnecting) { |
780 | if (!atomic_inc_not_zero(v: &ch->kthreads_assigned)) { |
781 | /* kthreads assigned had gone to zero */ |
782 | BUG_ON(!(ch->flags & |
783 | XPC_C_DISCONNECTINGCALLOUT_MADE)); |
784 | break; |
785 | } |
786 | |
787 | } else if (ch->flags & XPC_C_DISCONNECTING) { |
788 | break; |
789 | |
790 | } else if (atomic_inc_return(v: &ch->kthreads_assigned) == 1 && |
791 | atomic_inc_return(v: &part->nchannels_engaged) == 1) { |
792 | xpc_arch_ops.indicate_partition_engaged(part); |
793 | } |
794 | (void)xpc_part_ref(part); |
795 | xpc_msgqueue_ref(ch); |
796 | |
797 | kthread = kthread_run(xpc_kthread_start, (void *)args, |
798 | "xpc%02dc%d" , ch->partid, ch->number); |
799 | if (IS_ERR(ptr: kthread)) { |
800 | /* the fork failed */ |
801 | |
802 | /* |
803 | * NOTE: if (ignore_disconnecting && |
804 | * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, |
805 | * then we'll deadlock if all other kthreads assigned |
806 | * to this channel are blocked in the channel's |
807 | * registerer, because the only thing that will unblock |
808 | * them is the xpDisconnecting callout that this |
809 | * failed kthread_run() would have made. |
810 | */ |
811 | |
812 | if (atomic_dec_return(v: &ch->kthreads_assigned) == 0 && |
813 | atomic_dec_return(v: &part->nchannels_engaged) == 0) { |
814 | indicate_partition_disengaged(part); |
815 | } |
816 | xpc_msgqueue_deref(ch); |
817 | xpc_part_deref(part); |
818 | |
819 | if (atomic_read(v: &ch->kthreads_assigned) < |
820 | ch->kthreads_idle_limit) { |
821 | /* |
822 | * Flag this as an error only if we have an |
823 | * insufficient #of kthreads for the channel |
824 | * to function. |
825 | */ |
826 | spin_lock_irqsave(&ch->lock, irq_flags); |
827 | XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, |
828 | &irq_flags); |
829 | spin_unlock_irqrestore(lock: &ch->lock, flags: irq_flags); |
830 | } |
831 | break; |
832 | } |
833 | } |
834 | } |
835 | |
836 | void |
837 | xpc_disconnect_wait(int ch_number) |
838 | { |
839 | unsigned long irq_flags; |
840 | short partid; |
841 | struct xpc_partition *part; |
842 | struct xpc_channel *ch; |
843 | int wakeup_channel_mgr; |
844 | |
845 | /* now wait for all callouts to the caller's function to cease */ |
846 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
847 | part = &xpc_partitions[partid]; |
848 | |
849 | if (!xpc_part_ref(part)) |
850 | continue; |
851 | |
852 | ch = &part->channels[ch_number]; |
853 | |
854 | if (!(ch->flags & XPC_C_WDISCONNECT)) { |
855 | xpc_part_deref(part); |
856 | continue; |
857 | } |
858 | |
859 | wait_for_completion(&ch->wdisconnect_wait); |
860 | |
861 | spin_lock_irqsave(&ch->lock, irq_flags); |
862 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); |
863 | wakeup_channel_mgr = 0; |
864 | |
865 | if (ch->delayed_chctl_flags) { |
866 | if (part->act_state != XPC_P_AS_DEACTIVATING) { |
867 | spin_lock(lock: &part->chctl_lock); |
868 | part->chctl.flags[ch->number] |= |
869 | ch->delayed_chctl_flags; |
870 | spin_unlock(lock: &part->chctl_lock); |
871 | wakeup_channel_mgr = 1; |
872 | } |
873 | ch->delayed_chctl_flags = 0; |
874 | } |
875 | |
876 | ch->flags &= ~XPC_C_WDISCONNECT; |
877 | spin_unlock_irqrestore(lock: &ch->lock, flags: irq_flags); |
878 | |
879 | if (wakeup_channel_mgr) |
880 | xpc_wakeup_channel_mgr(part); |
881 | |
882 | xpc_part_deref(part); |
883 | } |
884 | } |
885 | |
886 | static int |
887 | xpc_setup_partitions(void) |
888 | { |
889 | short partid; |
890 | struct xpc_partition *part; |
891 | |
892 | xpc_partitions = kcalloc(n: xp_max_npartitions, |
893 | size: sizeof(struct xpc_partition), |
894 | GFP_KERNEL); |
895 | if (xpc_partitions == NULL) { |
896 | dev_err(xpc_part, "can't get memory for partition structure\n" ); |
897 | return -ENOMEM; |
898 | } |
899 | |
900 | /* |
901 | * The first few fields of each entry of xpc_partitions[] need to |
902 | * be initialized now so that calls to xpc_connect() and |
903 | * xpc_disconnect() can be made prior to the activation of any remote |
904 | * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE |
905 | * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING |
906 | * PARTITION HAS BEEN ACTIVATED. |
907 | */ |
908 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
909 | part = &xpc_partitions[partid]; |
910 | |
911 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); |
912 | |
913 | part->activate_IRQ_rcvd = 0; |
914 | spin_lock_init(&part->act_lock); |
915 | part->act_state = XPC_P_AS_INACTIVE; |
916 | XPC_SET_REASON(part, 0, 0); |
917 | |
918 | timer_setup(&part->disengage_timer, |
919 | xpc_timeout_partition_disengage, 0); |
920 | |
921 | part->setup_state = XPC_P_SS_UNSET; |
922 | init_waitqueue_head(&part->teardown_wq); |
923 | atomic_set(v: &part->references, i: 0); |
924 | } |
925 | |
926 | return xpc_arch_ops.setup_partitions(); |
927 | } |
928 | |
929 | static void |
930 | xpc_teardown_partitions(void) |
931 | { |
932 | xpc_arch_ops.teardown_partitions(); |
933 | kfree(objp: xpc_partitions); |
934 | } |
935 | |
936 | static void |
937 | xpc_do_exit(enum xp_retval reason) |
938 | { |
939 | short partid; |
940 | int active_part_count, printed_waiting_msg = 0; |
941 | struct xpc_partition *part; |
942 | unsigned long printmsg_time, disengage_timeout = 0; |
943 | |
944 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ |
945 | DBUG_ON(xpc_exiting == 1); |
946 | |
947 | /* |
948 | * Let the heartbeat checker thread and the discovery thread |
949 | * (if one is running) know that they should exit. Also wake up |
950 | * the heartbeat checker thread in case it's sleeping. |
951 | */ |
952 | xpc_exiting = 1; |
953 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
954 | |
955 | /* wait for the discovery thread to exit */ |
956 | wait_for_completion(&xpc_discovery_exited); |
957 | |
958 | /* wait for the heartbeat checker thread to exit */ |
959 | wait_for_completion(&xpc_hb_checker_exited); |
960 | |
961 | /* sleep for a 1/3 of a second or so */ |
962 | (void)msleep_interruptible(msecs: 300); |
963 | |
964 | /* wait for all partitions to become inactive */ |
965 | |
966 | printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
967 | xpc_disengage_timedout = 0; |
968 | |
969 | do { |
970 | active_part_count = 0; |
971 | |
972 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
973 | part = &xpc_partitions[partid]; |
974 | |
975 | if (xpc_partition_disengaged(part) && |
976 | part->act_state == XPC_P_AS_INACTIVE) { |
977 | continue; |
978 | } |
979 | |
980 | active_part_count++; |
981 | |
982 | XPC_DEACTIVATE_PARTITION(part, reason); |
983 | |
984 | if (part->disengage_timeout > disengage_timeout) |
985 | disengage_timeout = part->disengage_timeout; |
986 | } |
987 | |
988 | if (xpc_arch_ops.any_partition_engaged()) { |
989 | if (time_is_before_jiffies(printmsg_time)) { |
990 | dev_info(xpc_part, "waiting for remote " |
991 | "partitions to deactivate, timeout in " |
992 | "%ld seconds\n" , (disengage_timeout - |
993 | jiffies) / HZ); |
994 | printmsg_time = jiffies + |
995 | (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
996 | printed_waiting_msg = 1; |
997 | } |
998 | |
999 | } else if (active_part_count > 0) { |
1000 | if (printed_waiting_msg) { |
1001 | dev_info(xpc_part, "waiting for local partition" |
1002 | " to deactivate\n" ); |
1003 | printed_waiting_msg = 0; |
1004 | } |
1005 | |
1006 | } else { |
1007 | if (!xpc_disengage_timedout) { |
1008 | dev_info(xpc_part, "all partitions have " |
1009 | "deactivated\n" ); |
1010 | } |
1011 | break; |
1012 | } |
1013 | |
1014 | /* sleep for a 1/3 of a second or so */ |
1015 | (void)msleep_interruptible(msecs: 300); |
1016 | |
1017 | } while (1); |
1018 | |
1019 | DBUG_ON(xpc_arch_ops.any_partition_engaged()); |
1020 | |
1021 | xpc_teardown_rsvd_page(); |
1022 | |
1023 | if (reason == xpUnloading) { |
1024 | (void)unregister_die_notifier(nb: &xpc_die_notifier); |
1025 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
1026 | } |
1027 | |
1028 | /* clear the interface to XPC's functions */ |
1029 | xpc_clear_interface(); |
1030 | |
1031 | if (xpc_sysctl) |
1032 | unregister_sysctl_table(table: xpc_sysctl); |
1033 | if (xpc_sysctl_hb) |
1034 | unregister_sysctl_table(table: xpc_sysctl_hb); |
1035 | |
1036 | xpc_teardown_partitions(); |
1037 | |
1038 | if (is_uv_system()) |
1039 | xpc_exit_uv(); |
1040 | } |
1041 | |
1042 | /* |
1043 | * This function is called when the system is being rebooted. |
1044 | */ |
1045 | static int |
1046 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) |
1047 | { |
1048 | enum xp_retval reason; |
1049 | |
1050 | switch (event) { |
1051 | case SYS_RESTART: |
1052 | reason = xpSystemReboot; |
1053 | break; |
1054 | case SYS_HALT: |
1055 | reason = xpSystemHalt; |
1056 | break; |
1057 | case SYS_POWER_OFF: |
1058 | reason = xpSystemPoweroff; |
1059 | break; |
1060 | default: |
1061 | reason = xpSystemGoingDown; |
1062 | } |
1063 | |
1064 | xpc_do_exit(reason); |
1065 | return NOTIFY_DONE; |
1066 | } |
1067 | |
1068 | /* Used to only allow one cpu to complete disconnect */ |
1069 | static unsigned int xpc_die_disconnecting; |
1070 | |
1071 | /* |
1072 | * Notify other partitions to deactivate from us by first disengaging from all |
1073 | * references to our memory. |
1074 | */ |
1075 | static void |
1076 | xpc_die_deactivate(void) |
1077 | { |
1078 | struct xpc_partition *part; |
1079 | short partid; |
1080 | int any_engaged; |
1081 | long keep_waiting; |
1082 | long wait_to_print; |
1083 | |
1084 | if (cmpxchg(&xpc_die_disconnecting, 0, 1)) |
1085 | return; |
1086 | |
1087 | /* keep xpc_hb_checker thread from doing anything (just in case) */ |
1088 | xpc_exiting = 1; |
1089 | |
1090 | xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ |
1091 | |
1092 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
1093 | part = &xpc_partitions[partid]; |
1094 | |
1095 | if (xpc_arch_ops.partition_engaged(partid) || |
1096 | part->act_state != XPC_P_AS_INACTIVE) { |
1097 | xpc_arch_ops.request_partition_deactivation(part); |
1098 | xpc_arch_ops.indicate_partition_disengaged(part); |
1099 | } |
1100 | } |
1101 | |
1102 | /* |
1103 | * Though we requested that all other partitions deactivate from us, |
1104 | * we only wait until they've all disengaged or we've reached the |
1105 | * defined timelimit. |
1106 | * |
1107 | * Given that one iteration through the following while-loop takes |
1108 | * approximately 200 microseconds, calculate the #of loops to take |
1109 | * before bailing and the #of loops before printing a waiting message. |
1110 | */ |
1111 | keep_waiting = xpc_disengage_timelimit * 1000 * 5; |
1112 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; |
1113 | |
1114 | while (1) { |
1115 | any_engaged = xpc_arch_ops.any_partition_engaged(); |
1116 | if (!any_engaged) { |
1117 | dev_info(xpc_part, "all partitions have deactivated\n" ); |
1118 | break; |
1119 | } |
1120 | |
1121 | if (!keep_waiting--) { |
1122 | for (partid = 0; partid < xp_max_npartitions; |
1123 | partid++) { |
1124 | if (xpc_arch_ops.partition_engaged(partid)) { |
1125 | dev_info(xpc_part, "deactivate from " |
1126 | "remote partition %d timed " |
1127 | "out\n" , partid); |
1128 | } |
1129 | } |
1130 | break; |
1131 | } |
1132 | |
1133 | if (!wait_to_print--) { |
1134 | dev_info(xpc_part, "waiting for remote partitions to " |
1135 | "deactivate, timeout in %ld seconds\n" , |
1136 | keep_waiting / (1000 * 5)); |
1137 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * |
1138 | 1000 * 5; |
1139 | } |
1140 | |
1141 | udelay(200); |
1142 | } |
1143 | } |
1144 | |
1145 | /* |
1146 | * This function is called when the system is being restarted or halted due |
1147 | * to some sort of system failure. If this is the case we need to notify the |
1148 | * other partitions to disengage from all references to our memory. |
1149 | * This function can also be called when our heartbeater could be offlined |
1150 | * for a time. In this case we need to notify other partitions to not worry |
1151 | * about the lack of a heartbeat. |
1152 | */ |
1153 | static int |
1154 | xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) |
1155 | { |
1156 | struct die_args *die_args = _die_args; |
1157 | |
1158 | switch (event) { |
1159 | case DIE_TRAP: |
1160 | if (die_args->trapnr == X86_TRAP_DF) |
1161 | xpc_die_deactivate(); |
1162 | |
1163 | if (((die_args->trapnr == X86_TRAP_MF) || |
1164 | (die_args->trapnr == X86_TRAP_XF)) && |
1165 | !user_mode(regs: die_args->regs)) |
1166 | xpc_die_deactivate(); |
1167 | |
1168 | break; |
1169 | case DIE_INT3: |
1170 | case DIE_DEBUG: |
1171 | break; |
1172 | case DIE_OOPS: |
1173 | case DIE_GPF: |
1174 | default: |
1175 | xpc_die_deactivate(); |
1176 | } |
1177 | |
1178 | return NOTIFY_DONE; |
1179 | } |
1180 | |
1181 | static int __init |
1182 | xpc_init(void) |
1183 | { |
1184 | int ret; |
1185 | struct task_struct *kthread; |
1186 | |
1187 | dev_set_name(dev: xpc_part, name: "part" ); |
1188 | dev_set_name(dev: xpc_chan, name: "chan" ); |
1189 | |
1190 | if (is_uv_system()) { |
1191 | ret = xpc_init_uv(); |
1192 | |
1193 | } else { |
1194 | ret = -ENODEV; |
1195 | } |
1196 | |
1197 | if (ret != 0) |
1198 | return ret; |
1199 | |
1200 | ret = xpc_setup_partitions(); |
1201 | if (ret != 0) { |
1202 | dev_err(xpc_part, "can't get memory for partition structure\n" ); |
1203 | goto out_1; |
1204 | } |
1205 | |
1206 | xpc_sysctl = register_sysctl("xpc" , xpc_sys_xpc); |
1207 | xpc_sysctl_hb = register_sysctl("xpc/hb" , xpc_sys_xpc_hb); |
1208 | |
1209 | /* |
1210 | * Fill the partition reserved page with the information needed by |
1211 | * other partitions to discover we are alive and establish initial |
1212 | * communications. |
1213 | */ |
1214 | ret = xpc_setup_rsvd_page(); |
1215 | if (ret != 0) { |
1216 | dev_err(xpc_part, "can't setup our reserved page\n" ); |
1217 | goto out_2; |
1218 | } |
1219 | |
1220 | /* add ourselves to the reboot_notifier_list */ |
1221 | ret = register_reboot_notifier(&xpc_reboot_notifier); |
1222 | if (ret != 0) |
1223 | dev_warn(xpc_part, "can't register reboot notifier\n" ); |
1224 | |
1225 | /* add ourselves to the die_notifier list */ |
1226 | ret = register_die_notifier(nb: &xpc_die_notifier); |
1227 | if (ret != 0) |
1228 | dev_warn(xpc_part, "can't register die notifier\n" ); |
1229 | |
1230 | /* |
1231 | * The real work-horse behind xpc. This processes incoming |
1232 | * interrupts and monitors remote heartbeats. |
1233 | */ |
1234 | kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); |
1235 | if (IS_ERR(ptr: kthread)) { |
1236 | dev_err(xpc_part, "failed while forking hb check thread\n" ); |
1237 | ret = -EBUSY; |
1238 | goto out_3; |
1239 | } |
1240 | |
1241 | /* |
1242 | * Startup a thread that will attempt to discover other partitions to |
1243 | * activate based on info provided by SAL. This new thread is short |
1244 | * lived and will exit once discovery is complete. |
1245 | */ |
1246 | kthread = kthread_run(xpc_initiate_discovery, NULL, |
1247 | XPC_DISCOVERY_THREAD_NAME); |
1248 | if (IS_ERR(ptr: kthread)) { |
1249 | dev_err(xpc_part, "failed while forking discovery thread\n" ); |
1250 | |
1251 | /* mark this new thread as a non-starter */ |
1252 | complete(&xpc_discovery_exited); |
1253 | |
1254 | xpc_do_exit(reason: xpUnloading); |
1255 | return -EBUSY; |
1256 | } |
1257 | |
1258 | /* set the interface to point at XPC's functions */ |
1259 | xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, |
1260 | xpc_initiate_send, xpc_initiate_send_notify, |
1261 | xpc_initiate_received, xpc_initiate_partid_to_nasids); |
1262 | |
1263 | return 0; |
1264 | |
1265 | /* initialization was not successful */ |
1266 | out_3: |
1267 | xpc_teardown_rsvd_page(); |
1268 | |
1269 | (void)unregister_die_notifier(nb: &xpc_die_notifier); |
1270 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
1271 | out_2: |
1272 | if (xpc_sysctl_hb) |
1273 | unregister_sysctl_table(table: xpc_sysctl_hb); |
1274 | if (xpc_sysctl) |
1275 | unregister_sysctl_table(table: xpc_sysctl); |
1276 | |
1277 | xpc_teardown_partitions(); |
1278 | out_1: |
1279 | if (is_uv_system()) |
1280 | xpc_exit_uv(); |
1281 | return ret; |
1282 | } |
1283 | |
1284 | module_init(xpc_init); |
1285 | |
1286 | static void __exit |
1287 | xpc_exit(void) |
1288 | { |
1289 | xpc_do_exit(reason: xpUnloading); |
1290 | } |
1291 | |
1292 | module_exit(xpc_exit); |
1293 | |
1294 | MODULE_AUTHOR("Silicon Graphics, Inc." ); |
1295 | MODULE_DESCRIPTION("Cross Partition Communication (XPC) support" ); |
1296 | MODULE_LICENSE("GPL" ); |
1297 | |
1298 | module_param(xpc_hb_interval, int, 0); |
1299 | MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " |
1300 | "heartbeat increments." ); |
1301 | |
1302 | module_param(xpc_hb_check_interval, int, 0); |
1303 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " |
1304 | "heartbeat checks." ); |
1305 | |
1306 | module_param(xpc_disengage_timelimit, int, 0); |
1307 | MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait " |
1308 | "for disengage to complete." ); |
1309 | |
1310 | module_param(xpc_kdebug_ignore, int, 0); |
1311 | MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " |
1312 | "other partitions when dropping into kdebug." ); |
1313 | |