1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * core function to access sclp interface |
4 | * |
5 | * Copyright IBM Corp. 1999, 2009 |
6 | * |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
9 | */ |
10 | |
11 | #include <linux/kernel_stat.h> |
12 | #include <linux/module.h> |
13 | #include <linux/err.h> |
14 | #include <linux/panic_notifier.h> |
15 | #include <linux/spinlock.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/timer.h> |
18 | #include <linux/reboot.h> |
19 | #include <linux/jiffies.h> |
20 | #include <linux/init.h> |
21 | #include <linux/platform_device.h> |
22 | #include <asm/types.h> |
23 | #include <asm/irq.h> |
24 | #include <asm/debug.h> |
25 | |
26 | #include "sclp.h" |
27 | |
28 | #define "sclp: " |
29 | |
30 | struct sclp_trace_entry { |
31 | char id[4] __nonstring; |
32 | u32 a; |
33 | u64 b; |
34 | }; |
35 | |
36 | #define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry) |
37 | #define SCLP_TRACE_MAX_SIZE 128 |
38 | #define SCLP_TRACE_EVENT_MAX_SIZE 64 |
39 | |
40 | /* Debug trace area intended for all entries in abbreviated form. */ |
41 | DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp" , 8, 1, SCLP_TRACE_ENTRY_SIZE, |
42 | &debug_hex_ascii_view); |
43 | |
44 | /* Error trace area intended for full entries relating to failed requests. */ |
45 | DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err" , 4, 1, |
46 | SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view); |
47 | |
48 | /* Lock to protect internal data consistency. */ |
49 | static DEFINE_SPINLOCK(sclp_lock); |
50 | |
51 | /* Mask of events that we can send to the sclp interface. */ |
52 | static sccb_mask_t sclp_receive_mask; |
53 | |
54 | /* Mask of events that we can receive from the sclp interface. */ |
55 | static sccb_mask_t sclp_send_mask; |
56 | |
57 | /* List of registered event listeners and senders. */ |
58 | static LIST_HEAD(sclp_reg_list); |
59 | |
60 | /* List of queued requests. */ |
61 | static LIST_HEAD(sclp_req_queue); |
62 | |
63 | /* Data for read and init requests. */ |
64 | static struct sclp_req sclp_read_req; |
65 | static struct sclp_req sclp_init_req; |
66 | static void *sclp_read_sccb; |
67 | static struct init_sccb *sclp_init_sccb; |
68 | |
69 | /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ |
70 | int sclp_console_pages = SCLP_CONSOLE_PAGES; |
71 | /* Flag to indicate if buffer pages are dropped on buffer full condition */ |
72 | bool sclp_console_drop = true; |
73 | /* Number of times the console dropped buffer pages */ |
74 | unsigned long sclp_console_full; |
75 | |
76 | /* The currently active SCLP command word. */ |
77 | static sclp_cmdw_t active_cmd; |
78 | |
79 | static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) |
80 | { |
81 | struct sclp_trace_entry e; |
82 | |
83 | memset(&e, 0, sizeof(e)); |
84 | strtomem(e.id, id); |
85 | e.a = a; |
86 | e.b = b; |
87 | debug_event(&sclp_debug, prio, &e, sizeof(e)); |
88 | if (err) |
89 | debug_event(&sclp_debug_err, 0, &e, sizeof(e)); |
90 | } |
91 | |
92 | static inline int no_zeroes_len(void *data, int len) |
93 | { |
94 | char *d = data; |
95 | |
96 | /* Minimize trace area usage by not tracing trailing zeroes. */ |
97 | while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0) |
98 | len--; |
99 | |
100 | return len; |
101 | } |
102 | |
103 | static inline void sclp_trace_bin(int prio, void *d, int len, int errlen) |
104 | { |
105 | debug_event(&sclp_debug, prio, d, no_zeroes_len(data: d, len)); |
106 | if (errlen) |
107 | debug_event(&sclp_debug_err, 0, d, no_zeroes_len(data: d, len: errlen)); |
108 | } |
109 | |
110 | static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb) |
111 | { |
112 | struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1); |
113 | int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE; |
114 | |
115 | /* Full SCCB tracing if debug level is set to max. */ |
116 | if (sclp_debug.level == DEBUG_MAX_LEVEL) |
117 | return len; |
118 | |
119 | /* Minimal tracing for console writes. */ |
120 | if (cmd == SCLP_CMDW_WRITE_EVENT_DATA && |
121 | (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG)) |
122 | limit = SCLP_TRACE_ENTRY_SIZE; |
123 | |
124 | return min(len, limit); |
125 | } |
126 | |
127 | static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b, |
128 | sclp_cmdw_t cmd, struct sccb_header *sccb, |
129 | bool err) |
130 | { |
131 | sclp_trace(prio, id, a, b, err); |
132 | if (sccb) { |
133 | sclp_trace_bin(prio: prio + 1, d: sccb, len: abbrev_len(cmd, sccb), |
134 | errlen: err ? sccb->length : 0); |
135 | } |
136 | } |
137 | |
138 | static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b, |
139 | struct evbuf_header *evbuf, bool err) |
140 | { |
141 | sclp_trace(prio, id, a, b, err); |
142 | sclp_trace_bin(prio: prio + 1, d: evbuf, |
143 | min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE), |
144 | errlen: err ? evbuf->length : 0); |
145 | } |
146 | |
147 | static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req, |
148 | bool err) |
149 | { |
150 | struct sccb_header *sccb = req->sccb; |
151 | union { |
152 | struct { |
153 | u16 status; |
154 | u16 response; |
155 | u16 timeout; |
156 | u16 start_count; |
157 | }; |
158 | u64 b; |
159 | } summary; |
160 | |
161 | summary.status = req->status; |
162 | summary.response = sccb ? sccb->response_code : 0; |
163 | summary.timeout = (u16)req->queue_timeout; |
164 | summary.start_count = (u16)req->start_count; |
165 | |
166 | sclp_trace(prio, id, __pa(sccb), b: summary.b, err); |
167 | } |
168 | |
169 | static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b, |
170 | struct sclp_register *reg) |
171 | { |
172 | struct { |
173 | u64 receive; |
174 | u64 send; |
175 | } d; |
176 | |
177 | d.receive = reg->receive_mask; |
178 | d.send = reg->send_mask; |
179 | |
180 | sclp_trace(prio, id, a, b, err: false); |
181 | sclp_trace_bin(prio, d: &d, len: sizeof(d), errlen: 0); |
182 | } |
183 | |
184 | static int __init sclp_setup_console_pages(char *str) |
185 | { |
186 | int pages, rc; |
187 | |
188 | rc = kstrtoint(s: str, base: 0, res: &pages); |
189 | if (!rc && pages >= SCLP_CONSOLE_PAGES) |
190 | sclp_console_pages = pages; |
191 | return 1; |
192 | } |
193 | |
194 | __setup("sclp_con_pages=" , sclp_setup_console_pages); |
195 | |
196 | static int __init sclp_setup_console_drop(char *str) |
197 | { |
198 | return kstrtobool(s: str, res: &sclp_console_drop) == 0; |
199 | } |
200 | |
201 | __setup("sclp_con_drop=" , sclp_setup_console_drop); |
202 | |
203 | /* Timer for request retries. */ |
204 | static struct timer_list sclp_request_timer; |
205 | |
206 | /* Timer for queued requests. */ |
207 | static struct timer_list sclp_queue_timer; |
208 | |
209 | /* Internal state: is a request active at the sclp? */ |
210 | static volatile enum sclp_running_state_t { |
211 | sclp_running_state_idle, |
212 | sclp_running_state_running, |
213 | sclp_running_state_reset_pending |
214 | } sclp_running_state = sclp_running_state_idle; |
215 | |
216 | /* Internal state: is a read request pending? */ |
217 | static volatile enum sclp_reading_state_t { |
218 | sclp_reading_state_idle, |
219 | sclp_reading_state_reading |
220 | } sclp_reading_state = sclp_reading_state_idle; |
221 | |
222 | /* Internal state: is the driver currently serving requests? */ |
223 | static volatile enum sclp_activation_state_t { |
224 | sclp_activation_state_active, |
225 | sclp_activation_state_deactivating, |
226 | sclp_activation_state_inactive, |
227 | sclp_activation_state_activating |
228 | } sclp_activation_state = sclp_activation_state_active; |
229 | |
230 | /* Internal state: is an init mask request pending? */ |
231 | static volatile enum sclp_mask_state_t { |
232 | sclp_mask_state_idle, |
233 | sclp_mask_state_initializing |
234 | } sclp_mask_state = sclp_mask_state_idle; |
235 | |
236 | /* Maximum retry counts */ |
237 | #define SCLP_INIT_RETRY 3 |
238 | #define SCLP_MASK_RETRY 3 |
239 | |
240 | /* Timeout intervals in seconds.*/ |
241 | #define SCLP_BUSY_INTERVAL 10 |
242 | #define SCLP_RETRY_INTERVAL 30 |
243 | |
244 | static void sclp_request_timeout(bool force_restart); |
245 | static void sclp_process_queue(void); |
246 | static void __sclp_make_read_req(void); |
247 | static int sclp_init_mask(int calculate); |
248 | static int sclp_init(void); |
249 | |
250 | static void |
251 | __sclp_queue_read_req(void) |
252 | { |
253 | if (sclp_reading_state == sclp_reading_state_idle) { |
254 | sclp_reading_state = sclp_reading_state_reading; |
255 | __sclp_make_read_req(); |
256 | /* Add request to head of queue */ |
257 | list_add(new: &sclp_read_req.list, head: &sclp_req_queue); |
258 | } |
259 | } |
260 | |
261 | /* Set up request retry timer. Called while sclp_lock is locked. */ |
262 | static inline void |
263 | __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *)) |
264 | { |
265 | del_timer(timer: &sclp_request_timer); |
266 | sclp_request_timer.function = cb; |
267 | sclp_request_timer.expires = jiffies + time; |
268 | add_timer(timer: &sclp_request_timer); |
269 | } |
270 | |
271 | static void sclp_request_timeout_restart(struct timer_list *unused) |
272 | { |
273 | sclp_request_timeout(force_restart: true); |
274 | } |
275 | |
276 | static void sclp_request_timeout_normal(struct timer_list *unused) |
277 | { |
278 | sclp_request_timeout(force_restart: false); |
279 | } |
280 | |
281 | /* Request timeout handler. Restart the request queue. If force_restart, |
282 | * force restart of running request. */ |
283 | static void sclp_request_timeout(bool force_restart) |
284 | { |
285 | unsigned long flags; |
286 | |
287 | /* TMO: A timeout occurred (a=force_restart) */ |
288 | sclp_trace(prio: 2, id: "TMO" , a: force_restart, b: 0, err: true); |
289 | |
290 | spin_lock_irqsave(&sclp_lock, flags); |
291 | if (force_restart) { |
292 | if (sclp_running_state == sclp_running_state_running) { |
293 | /* Break running state and queue NOP read event request |
294 | * to get a defined interface state. */ |
295 | __sclp_queue_read_req(); |
296 | sclp_running_state = sclp_running_state_idle; |
297 | } |
298 | } else { |
299 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, |
300 | cb: sclp_request_timeout_normal); |
301 | } |
302 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
303 | sclp_process_queue(); |
304 | } |
305 | |
306 | /* |
307 | * Returns the expire value in jiffies of the next pending request timeout, |
308 | * if any. Needs to be called with sclp_lock. |
309 | */ |
310 | static unsigned long __sclp_req_queue_find_next_timeout(void) |
311 | { |
312 | unsigned long expires_next = 0; |
313 | struct sclp_req *req; |
314 | |
315 | list_for_each_entry(req, &sclp_req_queue, list) { |
316 | if (!req->queue_expires) |
317 | continue; |
318 | if (!expires_next || |
319 | (time_before(req->queue_expires, expires_next))) |
320 | expires_next = req->queue_expires; |
321 | } |
322 | return expires_next; |
323 | } |
324 | |
325 | /* |
326 | * Returns expired request, if any, and removes it from the list. |
327 | */ |
328 | static struct sclp_req *__sclp_req_queue_remove_expired_req(void) |
329 | { |
330 | unsigned long flags, now; |
331 | struct sclp_req *req; |
332 | |
333 | spin_lock_irqsave(&sclp_lock, flags); |
334 | now = jiffies; |
335 | /* Don't need list_for_each_safe because we break out after list_del */ |
336 | list_for_each_entry(req, &sclp_req_queue, list) { |
337 | if (!req->queue_expires) |
338 | continue; |
339 | if (time_before_eq(req->queue_expires, now)) { |
340 | if (req->status == SCLP_REQ_QUEUED) { |
341 | req->status = SCLP_REQ_QUEUED_TIMEOUT; |
342 | list_del(entry: &req->list); |
343 | goto out; |
344 | } |
345 | } |
346 | } |
347 | req = NULL; |
348 | out: |
349 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
350 | return req; |
351 | } |
352 | |
353 | /* |
354 | * Timeout handler for queued requests. Removes request from list and |
355 | * invokes callback. This timer can be set per request in situations where |
356 | * waiting too long would be harmful to the system, e.g. during SE reboot. |
357 | */ |
358 | static void sclp_req_queue_timeout(struct timer_list *unused) |
359 | { |
360 | unsigned long flags, expires_next; |
361 | struct sclp_req *req; |
362 | |
363 | do { |
364 | req = __sclp_req_queue_remove_expired_req(); |
365 | |
366 | if (req) { |
367 | /* RQTM: Request timed out (a=sccb, b=summary) */ |
368 | sclp_trace_req(prio: 2, id: "RQTM" , req, err: true); |
369 | } |
370 | |
371 | if (req && req->callback) |
372 | req->callback(req, req->callback_data); |
373 | } while (req); |
374 | |
375 | spin_lock_irqsave(&sclp_lock, flags); |
376 | expires_next = __sclp_req_queue_find_next_timeout(); |
377 | if (expires_next) |
378 | mod_timer(timer: &sclp_queue_timer, expires: expires_next); |
379 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
380 | } |
381 | |
382 | static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb) |
383 | { |
384 | static u64 srvc_count; |
385 | int rc; |
386 | |
387 | /* SRV1: Service call about to be issued (a=command, b=sccb address) */ |
388 | sclp_trace_sccb(prio: 0, id: "SRV1" , a: command, b: (u64)sccb, cmd: command, sccb, err: false); |
389 | |
390 | rc = sclp_service_call(command, sccb); |
391 | |
392 | /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */ |
393 | sclp_trace(prio: 0, id: "SRV2" , a: -rc, b: ++srvc_count, err: rc != 0); |
394 | |
395 | if (rc == 0) |
396 | active_cmd = command; |
397 | |
398 | return rc; |
399 | } |
400 | |
401 | /* Try to start a request. Return zero if the request was successfully |
402 | * started or if it will be started at a later time. Return non-zero otherwise. |
403 | * Called while sclp_lock is locked. */ |
404 | static int |
405 | __sclp_start_request(struct sclp_req *req) |
406 | { |
407 | int rc; |
408 | |
409 | if (sclp_running_state != sclp_running_state_idle) |
410 | return 0; |
411 | del_timer(timer: &sclp_request_timer); |
412 | rc = sclp_service_call_trace(command: req->command, sccb: req->sccb); |
413 | req->start_count++; |
414 | |
415 | if (rc == 0) { |
416 | /* Successfully started request */ |
417 | req->status = SCLP_REQ_RUNNING; |
418 | sclp_running_state = sclp_running_state_running; |
419 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, |
420 | cb: sclp_request_timeout_restart); |
421 | return 0; |
422 | } else if (rc == -EBUSY) { |
423 | /* Try again later */ |
424 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, |
425 | cb: sclp_request_timeout_normal); |
426 | return 0; |
427 | } |
428 | /* Request failed */ |
429 | req->status = SCLP_REQ_FAILED; |
430 | return rc; |
431 | } |
432 | |
433 | /* Try to start queued requests. */ |
434 | static void |
435 | sclp_process_queue(void) |
436 | { |
437 | struct sclp_req *req; |
438 | int rc; |
439 | unsigned long flags; |
440 | |
441 | spin_lock_irqsave(&sclp_lock, flags); |
442 | if (sclp_running_state != sclp_running_state_idle) { |
443 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
444 | return; |
445 | } |
446 | del_timer(timer: &sclp_request_timer); |
447 | while (!list_empty(head: &sclp_req_queue)) { |
448 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); |
449 | rc = __sclp_start_request(req); |
450 | if (rc == 0) |
451 | break; |
452 | /* Request failed */ |
453 | if (req->start_count > 1) { |
454 | /* Cannot abort already submitted request - could still |
455 | * be active at the SCLP */ |
456 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, |
457 | cb: sclp_request_timeout_normal); |
458 | break; |
459 | } |
460 | /* Post-processing for aborted request */ |
461 | list_del(entry: &req->list); |
462 | |
463 | /* RQAB: Request aborted (a=sccb, b=summary) */ |
464 | sclp_trace_req(prio: 2, id: "RQAB" , req, err: true); |
465 | |
466 | if (req->callback) { |
467 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
468 | req->callback(req, req->callback_data); |
469 | spin_lock_irqsave(&sclp_lock, flags); |
470 | } |
471 | } |
472 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
473 | } |
474 | |
475 | static int __sclp_can_add_request(struct sclp_req *req) |
476 | { |
477 | if (req == &sclp_init_req) |
478 | return 1; |
479 | if (sclp_init_state != sclp_init_state_initialized) |
480 | return 0; |
481 | if (sclp_activation_state != sclp_activation_state_active) |
482 | return 0; |
483 | return 1; |
484 | } |
485 | |
486 | /* Queue a new request. Return zero on success, non-zero otherwise. */ |
487 | int |
488 | sclp_add_request(struct sclp_req *req) |
489 | { |
490 | unsigned long flags; |
491 | int rc; |
492 | |
493 | spin_lock_irqsave(&sclp_lock, flags); |
494 | if (!__sclp_can_add_request(req)) { |
495 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
496 | return -EIO; |
497 | } |
498 | |
499 | /* RQAD: Request was added (a=sccb, b=caller) */ |
500 | sclp_trace(prio: 2, id: "RQAD" , __pa(req->sccb), _RET_IP_, err: false); |
501 | |
502 | req->status = SCLP_REQ_QUEUED; |
503 | req->start_count = 0; |
504 | list_add_tail(new: &req->list, head: &sclp_req_queue); |
505 | rc = 0; |
506 | if (req->queue_timeout) { |
507 | req->queue_expires = jiffies + req->queue_timeout * HZ; |
508 | if (!timer_pending(timer: &sclp_queue_timer) || |
509 | time_after(sclp_queue_timer.expires, req->queue_expires)) |
510 | mod_timer(timer: &sclp_queue_timer, expires: req->queue_expires); |
511 | } else |
512 | req->queue_expires = 0; |
513 | /* Start if request is first in list */ |
514 | if (sclp_running_state == sclp_running_state_idle && |
515 | req->list.prev == &sclp_req_queue) { |
516 | rc = __sclp_start_request(req); |
517 | if (rc) |
518 | list_del(entry: &req->list); |
519 | } |
520 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
521 | return rc; |
522 | } |
523 | |
524 | EXPORT_SYMBOL(sclp_add_request); |
525 | |
526 | /* Dispatch events found in request buffer to registered listeners. Return 0 |
527 | * if all events were dispatched, non-zero otherwise. */ |
528 | static int |
529 | sclp_dispatch_evbufs(struct sccb_header *sccb) |
530 | { |
531 | unsigned long flags; |
532 | struct evbuf_header *evbuf; |
533 | struct list_head *l; |
534 | struct sclp_register *reg; |
535 | int offset; |
536 | int rc; |
537 | |
538 | spin_lock_irqsave(&sclp_lock, flags); |
539 | rc = 0; |
540 | for (offset = sizeof(struct sccb_header); offset < sccb->length; |
541 | offset += evbuf->length) { |
542 | evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); |
543 | /* Check for malformed hardware response */ |
544 | if (evbuf->length == 0) |
545 | break; |
546 | /* Search for event handler */ |
547 | reg = NULL; |
548 | list_for_each(l, &sclp_reg_list) { |
549 | reg = list_entry(l, struct sclp_register, list); |
550 | if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type)) |
551 | break; |
552 | else |
553 | reg = NULL; |
554 | } |
555 | |
556 | /* EVNT: Event callback (b=receiver) */ |
557 | sclp_trace_evbuf(prio: 2, id: "EVNT" , a: 0, b: reg ? (u64)reg->receiver_fn : 0, |
558 | evbuf, err: !reg); |
559 | |
560 | if (reg && reg->receiver_fn) { |
561 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
562 | reg->receiver_fn(evbuf); |
563 | spin_lock_irqsave(&sclp_lock, flags); |
564 | } else if (reg == NULL) |
565 | rc = -EOPNOTSUPP; |
566 | } |
567 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
568 | return rc; |
569 | } |
570 | |
571 | /* Read event data request callback. */ |
572 | static void |
573 | sclp_read_cb(struct sclp_req *req, void *data) |
574 | { |
575 | unsigned long flags; |
576 | struct sccb_header *sccb; |
577 | |
578 | sccb = (struct sccb_header *) req->sccb; |
579 | if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || |
580 | sccb->response_code == 0x220)) |
581 | sclp_dispatch_evbufs(sccb); |
582 | spin_lock_irqsave(&sclp_lock, flags); |
583 | sclp_reading_state = sclp_reading_state_idle; |
584 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
585 | } |
586 | |
587 | /* Prepare read event data request. Called while sclp_lock is locked. */ |
588 | static void __sclp_make_read_req(void) |
589 | { |
590 | struct sccb_header *sccb; |
591 | |
592 | sccb = (struct sccb_header *) sclp_read_sccb; |
593 | clear_page(page: sccb); |
594 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); |
595 | sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; |
596 | sclp_read_req.status = SCLP_REQ_QUEUED; |
597 | sclp_read_req.start_count = 0; |
598 | sclp_read_req.callback = sclp_read_cb; |
599 | sclp_read_req.sccb = sccb; |
600 | sccb->length = PAGE_SIZE; |
601 | sccb->function_code = 0; |
602 | sccb->control_mask[2] = 0x80; |
603 | } |
604 | |
605 | /* Search request list for request with matching sccb. Return request if found, |
606 | * NULL otherwise. Called while sclp_lock is locked. */ |
607 | static inline struct sclp_req * |
608 | __sclp_find_req(u32 sccb) |
609 | { |
610 | struct list_head *l; |
611 | struct sclp_req *req; |
612 | |
613 | list_for_each(l, &sclp_req_queue) { |
614 | req = list_entry(l, struct sclp_req, list); |
615 | if (sccb == __pa(req->sccb)) |
616 | return req; |
617 | } |
618 | return NULL; |
619 | } |
620 | |
621 | static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) |
622 | { |
623 | struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); |
624 | struct evbuf_header *evbuf; |
625 | u16 response; |
626 | |
627 | if (!sccb) |
628 | return true; |
629 | |
630 | /* Check SCCB response. */ |
631 | response = sccb->response_code & 0xff; |
632 | if (response != 0x10 && response != 0x20) |
633 | return false; |
634 | |
635 | /* Check event-processed flag on outgoing events. */ |
636 | if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) { |
637 | evbuf = (struct evbuf_header *)(sccb + 1); |
638 | if (!(evbuf->flags & 0x80)) |
639 | return false; |
640 | } |
641 | |
642 | return true; |
643 | } |
644 | |
645 | /* Handler for external interruption. Perform request post-processing. |
646 | * Prepare read event data request if necessary. Start processing of next |
647 | * request on queue. */ |
648 | static void sclp_interrupt_handler(struct ext_code ext_code, |
649 | unsigned int param32, unsigned long param64) |
650 | { |
651 | struct sclp_req *req; |
652 | u32 finished_sccb; |
653 | u32 evbuf_pending; |
654 | |
655 | inc_irq_stat(IRQEXT_SCP); |
656 | spin_lock(lock: &sclp_lock); |
657 | finished_sccb = param32 & 0xfffffff8; |
658 | evbuf_pending = param32 & 0x3; |
659 | |
660 | /* INT: Interrupt received (a=intparm, b=cmd) */ |
661 | sclp_trace_sccb(prio: 0, id: "INT" , a: param32, b: active_cmd, cmd: active_cmd, |
662 | sccb: (struct sccb_header *)__va(finished_sccb), |
663 | err: !ok_response(sccb_int: finished_sccb, cmd: active_cmd)); |
664 | |
665 | if (finished_sccb) { |
666 | del_timer(timer: &sclp_request_timer); |
667 | sclp_running_state = sclp_running_state_reset_pending; |
668 | req = __sclp_find_req(sccb: finished_sccb); |
669 | if (req) { |
670 | /* Request post-processing */ |
671 | list_del(entry: &req->list); |
672 | req->status = SCLP_REQ_DONE; |
673 | |
674 | /* RQOK: Request success (a=sccb, b=summary) */ |
675 | sclp_trace_req(prio: 2, id: "RQOK" , req, err: false); |
676 | |
677 | if (req->callback) { |
678 | spin_unlock(lock: &sclp_lock); |
679 | req->callback(req, req->callback_data); |
680 | spin_lock(lock: &sclp_lock); |
681 | } |
682 | } else { |
683 | /* UNEX: Unexpected SCCB completion (a=sccb address) */ |
684 | sclp_trace(prio: 0, id: "UNEX" , a: finished_sccb, b: 0, err: true); |
685 | } |
686 | sclp_running_state = sclp_running_state_idle; |
687 | active_cmd = 0; |
688 | } |
689 | if (evbuf_pending && |
690 | sclp_activation_state == sclp_activation_state_active) |
691 | __sclp_queue_read_req(); |
692 | spin_unlock(lock: &sclp_lock); |
693 | sclp_process_queue(); |
694 | } |
695 | |
696 | /* Convert interval in jiffies to TOD ticks. */ |
697 | static inline u64 |
698 | sclp_tod_from_jiffies(unsigned long jiffies) |
699 | { |
700 | return (u64) (jiffies / HZ) << 32; |
701 | } |
702 | |
703 | /* Wait until a currently running request finished. Note: while this function |
704 | * is running, no timers are served on the calling CPU. */ |
705 | void |
706 | sclp_sync_wait(void) |
707 | { |
708 | unsigned long long old_tick; |
709 | struct ctlreg cr0, cr0_sync; |
710 | unsigned long flags; |
711 | static u64 sync_count; |
712 | u64 timeout; |
713 | int irq_context; |
714 | |
715 | /* SYN1: Synchronous wait start (a=runstate, b=sync count) */ |
716 | sclp_trace(prio: 4, id: "SYN1" , a: sclp_running_state, b: ++sync_count, err: false); |
717 | |
718 | /* We'll be disabling timer interrupts, so we need a custom timeout |
719 | * mechanism */ |
720 | timeout = 0; |
721 | if (timer_pending(timer: &sclp_request_timer)) { |
722 | /* Get timeout TOD value */ |
723 | timeout = get_tod_clock_fast() + |
724 | sclp_tod_from_jiffies(jiffies: sclp_request_timer.expires - |
725 | jiffies); |
726 | } |
727 | local_irq_save(flags); |
728 | /* Prevent bottom half from executing once we force interrupts open */ |
729 | irq_context = in_interrupt(); |
730 | if (!irq_context) |
731 | local_bh_disable(); |
732 | /* Enable service-signal interruption, disable timer interrupts */ |
733 | old_tick = local_tick_disable(); |
734 | trace_hardirqs_on(); |
735 | local_ctl_store(0, &cr0); |
736 | cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK; |
737 | cr0_sync.val |= 1UL << (63 - 54); |
738 | local_ctl_load(0, &cr0_sync); |
739 | __arch_local_irq_stosm(0x01); |
740 | /* Loop until driver state indicates finished request */ |
741 | while (sclp_running_state != sclp_running_state_idle) { |
742 | /* Check for expired request timer */ |
743 | if (get_tod_clock_fast() > timeout && del_timer(timer: &sclp_request_timer)) |
744 | sclp_request_timer.function(&sclp_request_timer); |
745 | cpu_relax(); |
746 | } |
747 | local_irq_disable(); |
748 | local_ctl_load(0, &cr0); |
749 | if (!irq_context) |
750 | _local_bh_enable(); |
751 | local_tick_enable(old_tick); |
752 | local_irq_restore(flags); |
753 | |
754 | /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */ |
755 | sclp_trace(prio: 4, id: "SYN2" , a: sclp_running_state, b: sync_count, err: false); |
756 | } |
757 | EXPORT_SYMBOL(sclp_sync_wait); |
758 | |
759 | /* Dispatch changes in send and receive mask to registered listeners. */ |
760 | static void |
761 | sclp_dispatch_state_change(void) |
762 | { |
763 | struct list_head *l; |
764 | struct sclp_register *reg; |
765 | unsigned long flags; |
766 | sccb_mask_t receive_mask; |
767 | sccb_mask_t send_mask; |
768 | |
769 | do { |
770 | spin_lock_irqsave(&sclp_lock, flags); |
771 | reg = NULL; |
772 | list_for_each(l, &sclp_reg_list) { |
773 | reg = list_entry(l, struct sclp_register, list); |
774 | receive_mask = reg->send_mask & sclp_receive_mask; |
775 | send_mask = reg->receive_mask & sclp_send_mask; |
776 | if (reg->sclp_receive_mask != receive_mask || |
777 | reg->sclp_send_mask != send_mask) { |
778 | reg->sclp_receive_mask = receive_mask; |
779 | reg->sclp_send_mask = send_mask; |
780 | break; |
781 | } else |
782 | reg = NULL; |
783 | } |
784 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
785 | if (reg && reg->state_change_fn) { |
786 | /* STCG: State-change callback (b=callback) */ |
787 | sclp_trace(prio: 2, id: "STCG" , a: 0, b: (u64)reg->state_change_fn, |
788 | err: false); |
789 | |
790 | reg->state_change_fn(reg); |
791 | } |
792 | } while (reg); |
793 | } |
794 | |
795 | struct sclp_statechangebuf { |
796 | struct evbuf_header ; |
797 | u8 validity_sclp_active_facility_mask : 1; |
798 | u8 validity_sclp_receive_mask : 1; |
799 | u8 validity_sclp_send_mask : 1; |
800 | u8 validity_read_data_function_mask : 1; |
801 | u16 _zeros : 12; |
802 | u16 mask_length; |
803 | u64 sclp_active_facility_mask; |
804 | u8 masks[2 * 1021 + 4]; /* variable length */ |
805 | /* |
806 | * u8 sclp_receive_mask[mask_length]; |
807 | * u8 sclp_send_mask[mask_length]; |
808 | * u32 read_data_function_mask; |
809 | */ |
810 | } __attribute__((packed)); |
811 | |
812 | |
813 | /* State change event callback. Inform listeners of changes. */ |
814 | static void |
815 | sclp_state_change_cb(struct evbuf_header *evbuf) |
816 | { |
817 | unsigned long flags; |
818 | struct sclp_statechangebuf *scbuf; |
819 | |
820 | BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE); |
821 | |
822 | scbuf = (struct sclp_statechangebuf *) evbuf; |
823 | spin_lock_irqsave(&sclp_lock, flags); |
824 | if (scbuf->validity_sclp_receive_mask) |
825 | sclp_receive_mask = sccb_get_recv_mask(scbuf); |
826 | if (scbuf->validity_sclp_send_mask) |
827 | sclp_send_mask = sccb_get_send_mask(scbuf); |
828 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
829 | if (scbuf->validity_sclp_active_facility_mask) |
830 | sclp.facilities = scbuf->sclp_active_facility_mask; |
831 | sclp_dispatch_state_change(); |
832 | } |
833 | |
834 | static struct sclp_register sclp_state_change_event = { |
835 | .receive_mask = EVTYP_STATECHANGE_MASK, |
836 | .receiver_fn = sclp_state_change_cb |
837 | }; |
838 | |
839 | /* Calculate receive and send mask of currently registered listeners. |
840 | * Called while sclp_lock is locked. */ |
841 | static inline void |
842 | __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) |
843 | { |
844 | struct list_head *l; |
845 | struct sclp_register *t; |
846 | |
847 | *receive_mask = 0; |
848 | *send_mask = 0; |
849 | list_for_each(l, &sclp_reg_list) { |
850 | t = list_entry(l, struct sclp_register, list); |
851 | *receive_mask |= t->receive_mask; |
852 | *send_mask |= t->send_mask; |
853 | } |
854 | } |
855 | |
856 | /* Register event listener. Return 0 on success, non-zero otherwise. */ |
857 | int |
858 | sclp_register(struct sclp_register *reg) |
859 | { |
860 | unsigned long flags; |
861 | sccb_mask_t receive_mask; |
862 | sccb_mask_t send_mask; |
863 | int rc; |
864 | |
865 | /* REG: Event listener registered (b=caller) */ |
866 | sclp_trace_register(prio: 2, id: "REG" , a: 0, _RET_IP_, reg); |
867 | |
868 | rc = sclp_init(); |
869 | if (rc) |
870 | return rc; |
871 | spin_lock_irqsave(&sclp_lock, flags); |
872 | /* Check event mask for collisions */ |
873 | __sclp_get_mask(receive_mask: &receive_mask, send_mask: &send_mask); |
874 | if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { |
875 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
876 | return -EBUSY; |
877 | } |
878 | /* Trigger initial state change callback */ |
879 | reg->sclp_receive_mask = 0; |
880 | reg->sclp_send_mask = 0; |
881 | list_add(new: ®->list, head: &sclp_reg_list); |
882 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
883 | rc = sclp_init_mask(calculate: 1); |
884 | if (rc) { |
885 | spin_lock_irqsave(&sclp_lock, flags); |
886 | list_del(entry: ®->list); |
887 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
888 | } |
889 | return rc; |
890 | } |
891 | |
892 | EXPORT_SYMBOL(sclp_register); |
893 | |
894 | /* Unregister event listener. */ |
895 | void |
896 | sclp_unregister(struct sclp_register *reg) |
897 | { |
898 | unsigned long flags; |
899 | |
900 | /* UREG: Event listener unregistered (b=caller) */ |
901 | sclp_trace_register(prio: 2, id: "UREG" , a: 0, _RET_IP_, reg); |
902 | |
903 | spin_lock_irqsave(&sclp_lock, flags); |
904 | list_del(entry: ®->list); |
905 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
906 | sclp_init_mask(calculate: 1); |
907 | } |
908 | |
909 | EXPORT_SYMBOL(sclp_unregister); |
910 | |
911 | /* Remove event buffers which are marked processed. Return the number of |
912 | * remaining event buffers. */ |
913 | int |
914 | sclp_remove_processed(struct sccb_header *sccb) |
915 | { |
916 | struct evbuf_header *evbuf; |
917 | int unprocessed; |
918 | u16 remaining; |
919 | |
920 | evbuf = (struct evbuf_header *) (sccb + 1); |
921 | unprocessed = 0; |
922 | remaining = sccb->length - sizeof(struct sccb_header); |
923 | while (remaining > 0) { |
924 | remaining -= evbuf->length; |
925 | if (evbuf->flags & 0x80) { |
926 | sccb->length -= evbuf->length; |
927 | memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), |
928 | remaining); |
929 | } else { |
930 | unprocessed++; |
931 | evbuf = (struct evbuf_header *) |
932 | ((addr_t) evbuf + evbuf->length); |
933 | } |
934 | } |
935 | return unprocessed; |
936 | } |
937 | |
938 | EXPORT_SYMBOL(sclp_remove_processed); |
939 | |
940 | /* Prepare init mask request. Called while sclp_lock is locked. */ |
941 | static inline void |
942 | __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask) |
943 | { |
944 | struct init_sccb *sccb = sclp_init_sccb; |
945 | |
946 | clear_page(page: sccb); |
947 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); |
948 | sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; |
949 | sclp_init_req.status = SCLP_REQ_FILLED; |
950 | sclp_init_req.start_count = 0; |
951 | sclp_init_req.callback = NULL; |
952 | sclp_init_req.callback_data = NULL; |
953 | sclp_init_req.sccb = sccb; |
954 | sccb->header.length = sizeof(*sccb); |
955 | if (sclp_mask_compat_mode) |
956 | sccb->mask_length = SCLP_MASK_SIZE_COMPAT; |
957 | else |
958 | sccb->mask_length = sizeof(sccb_mask_t); |
959 | sccb_set_recv_mask(sccb, receive_mask); |
960 | sccb_set_send_mask(sccb, send_mask); |
961 | sccb_set_sclp_recv_mask(sccb, 0); |
962 | sccb_set_sclp_send_mask(sccb, 0); |
963 | } |
964 | |
965 | /* Start init mask request. If calculate is non-zero, calculate the mask as |
966 | * requested by registered listeners. Use zero mask otherwise. Return 0 on |
967 | * success, non-zero otherwise. */ |
968 | static int |
969 | sclp_init_mask(int calculate) |
970 | { |
971 | unsigned long flags; |
972 | struct init_sccb *sccb = sclp_init_sccb; |
973 | sccb_mask_t receive_mask; |
974 | sccb_mask_t send_mask; |
975 | int retry; |
976 | int rc; |
977 | unsigned long wait; |
978 | |
979 | spin_lock_irqsave(&sclp_lock, flags); |
980 | /* Check if interface is in appropriate state */ |
981 | if (sclp_mask_state != sclp_mask_state_idle) { |
982 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
983 | return -EBUSY; |
984 | } |
985 | if (sclp_activation_state == sclp_activation_state_inactive) { |
986 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
987 | return -EINVAL; |
988 | } |
989 | sclp_mask_state = sclp_mask_state_initializing; |
990 | /* Determine mask */ |
991 | if (calculate) |
992 | __sclp_get_mask(receive_mask: &receive_mask, send_mask: &send_mask); |
993 | else { |
994 | receive_mask = 0; |
995 | send_mask = 0; |
996 | } |
997 | rc = -EIO; |
998 | for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { |
999 | /* Prepare request */ |
1000 | __sclp_make_init_req(receive_mask, send_mask); |
1001 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1002 | if (sclp_add_request(&sclp_init_req)) { |
1003 | /* Try again later */ |
1004 | wait = jiffies + SCLP_BUSY_INTERVAL * HZ; |
1005 | while (time_before(jiffies, wait)) |
1006 | sclp_sync_wait(); |
1007 | spin_lock_irqsave(&sclp_lock, flags); |
1008 | continue; |
1009 | } |
1010 | while (sclp_init_req.status != SCLP_REQ_DONE && |
1011 | sclp_init_req.status != SCLP_REQ_FAILED) |
1012 | sclp_sync_wait(); |
1013 | spin_lock_irqsave(&sclp_lock, flags); |
1014 | if (sclp_init_req.status == SCLP_REQ_DONE && |
1015 | sccb->header.response_code == 0x20) { |
1016 | /* Successful request */ |
1017 | if (calculate) { |
1018 | sclp_receive_mask = sccb_get_sclp_recv_mask(sccb); |
1019 | sclp_send_mask = sccb_get_sclp_send_mask(sccb); |
1020 | } else { |
1021 | sclp_receive_mask = 0; |
1022 | sclp_send_mask = 0; |
1023 | } |
1024 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1025 | sclp_dispatch_state_change(); |
1026 | spin_lock_irqsave(&sclp_lock, flags); |
1027 | rc = 0; |
1028 | break; |
1029 | } |
1030 | } |
1031 | sclp_mask_state = sclp_mask_state_idle; |
1032 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1033 | return rc; |
1034 | } |
1035 | |
1036 | /* Deactivate SCLP interface. On success, new requests will be rejected, |
1037 | * events will no longer be dispatched. Return 0 on success, non-zero |
1038 | * otherwise. */ |
1039 | int |
1040 | sclp_deactivate(void) |
1041 | { |
1042 | unsigned long flags; |
1043 | int rc; |
1044 | |
1045 | spin_lock_irqsave(&sclp_lock, flags); |
1046 | /* Deactivate can only be called when active */ |
1047 | if (sclp_activation_state != sclp_activation_state_active) { |
1048 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1049 | return -EINVAL; |
1050 | } |
1051 | sclp_activation_state = sclp_activation_state_deactivating; |
1052 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1053 | rc = sclp_init_mask(calculate: 0); |
1054 | spin_lock_irqsave(&sclp_lock, flags); |
1055 | if (rc == 0) |
1056 | sclp_activation_state = sclp_activation_state_inactive; |
1057 | else |
1058 | sclp_activation_state = sclp_activation_state_active; |
1059 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1060 | return rc; |
1061 | } |
1062 | |
1063 | EXPORT_SYMBOL(sclp_deactivate); |
1064 | |
1065 | /* Reactivate SCLP interface after sclp_deactivate. On success, new |
1066 | * requests will be accepted, events will be dispatched again. Return 0 on |
1067 | * success, non-zero otherwise. */ |
1068 | int |
1069 | sclp_reactivate(void) |
1070 | { |
1071 | unsigned long flags; |
1072 | int rc; |
1073 | |
1074 | spin_lock_irqsave(&sclp_lock, flags); |
1075 | /* Reactivate can only be called when inactive */ |
1076 | if (sclp_activation_state != sclp_activation_state_inactive) { |
1077 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1078 | return -EINVAL; |
1079 | } |
1080 | sclp_activation_state = sclp_activation_state_activating; |
1081 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1082 | rc = sclp_init_mask(calculate: 1); |
1083 | spin_lock_irqsave(&sclp_lock, flags); |
1084 | if (rc == 0) |
1085 | sclp_activation_state = sclp_activation_state_active; |
1086 | else |
1087 | sclp_activation_state = sclp_activation_state_inactive; |
1088 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1089 | return rc; |
1090 | } |
1091 | |
1092 | EXPORT_SYMBOL(sclp_reactivate); |
1093 | |
1094 | /* Handler for external interruption used during initialization. Modify |
1095 | * request state to done. */ |
1096 | static void sclp_check_handler(struct ext_code ext_code, |
1097 | unsigned int param32, unsigned long param64) |
1098 | { |
1099 | u32 finished_sccb; |
1100 | |
1101 | inc_irq_stat(IRQEXT_SCP); |
1102 | finished_sccb = param32 & 0xfffffff8; |
1103 | /* Is this the interrupt we are waiting for? */ |
1104 | if (finished_sccb == 0) |
1105 | return; |
1106 | if (finished_sccb != __pa(sclp_init_sccb)) |
1107 | panic(fmt: "sclp: unsolicited interrupt for buffer at 0x%x\n" , |
1108 | finished_sccb); |
1109 | spin_lock(lock: &sclp_lock); |
1110 | if (sclp_running_state == sclp_running_state_running) { |
1111 | sclp_init_req.status = SCLP_REQ_DONE; |
1112 | sclp_running_state = sclp_running_state_idle; |
1113 | } |
1114 | spin_unlock(lock: &sclp_lock); |
1115 | } |
1116 | |
1117 | /* Initial init mask request timed out. Modify request state to failed. */ |
1118 | static void |
1119 | sclp_check_timeout(struct timer_list *unused) |
1120 | { |
1121 | unsigned long flags; |
1122 | |
1123 | spin_lock_irqsave(&sclp_lock, flags); |
1124 | if (sclp_running_state == sclp_running_state_running) { |
1125 | sclp_init_req.status = SCLP_REQ_FAILED; |
1126 | sclp_running_state = sclp_running_state_idle; |
1127 | } |
1128 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1129 | } |
1130 | |
1131 | /* Perform a check of the SCLP interface. Return zero if the interface is |
1132 | * available and there are no pending requests from a previous instance. |
1133 | * Return non-zero otherwise. */ |
1134 | static int |
1135 | sclp_check_interface(void) |
1136 | { |
1137 | struct init_sccb *sccb; |
1138 | unsigned long flags; |
1139 | int retry; |
1140 | int rc; |
1141 | |
1142 | spin_lock_irqsave(&sclp_lock, flags); |
1143 | /* Prepare init mask command */ |
1144 | rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); |
1145 | if (rc) { |
1146 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1147 | return rc; |
1148 | } |
1149 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { |
1150 | __sclp_make_init_req(receive_mask: 0, send_mask: 0); |
1151 | sccb = (struct init_sccb *) sclp_init_req.sccb; |
1152 | rc = sclp_service_call_trace(command: sclp_init_req.command, sccb); |
1153 | if (rc == -EIO) |
1154 | break; |
1155 | sclp_init_req.status = SCLP_REQ_RUNNING; |
1156 | sclp_running_state = sclp_running_state_running; |
1157 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, |
1158 | cb: sclp_check_timeout); |
1159 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1160 | /* Enable service-signal interruption - needs to happen |
1161 | * with IRQs enabled. */ |
1162 | irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); |
1163 | /* Wait for signal from interrupt or timeout */ |
1164 | sclp_sync_wait(); |
1165 | /* Disable service-signal interruption - needs to happen |
1166 | * with IRQs enabled. */ |
1167 | irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); |
1168 | spin_lock_irqsave(&sclp_lock, flags); |
1169 | del_timer(timer: &sclp_request_timer); |
1170 | rc = -EBUSY; |
1171 | if (sclp_init_req.status == SCLP_REQ_DONE) { |
1172 | if (sccb->header.response_code == 0x20) { |
1173 | rc = 0; |
1174 | break; |
1175 | } else if (sccb->header.response_code == 0x74f0) { |
1176 | if (!sclp_mask_compat_mode) { |
1177 | sclp_mask_compat_mode = true; |
1178 | retry = 0; |
1179 | } |
1180 | } |
1181 | } |
1182 | } |
1183 | unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); |
1184 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1185 | return rc; |
1186 | } |
1187 | |
1188 | /* Reboot event handler. Reset send and receive mask to prevent pending SCLP |
1189 | * events from interfering with rebooted system. */ |
1190 | static int |
1191 | sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) |
1192 | { |
1193 | sclp_deactivate(); |
1194 | return NOTIFY_DONE; |
1195 | } |
1196 | |
1197 | static struct notifier_block sclp_reboot_notifier = { |
1198 | .notifier_call = sclp_reboot_event |
1199 | }; |
1200 | |
1201 | static ssize_t con_pages_show(struct device_driver *dev, char *buf) |
1202 | { |
1203 | return sysfs_emit(buf, fmt: "%i\n" , sclp_console_pages); |
1204 | } |
1205 | |
1206 | static DRIVER_ATTR_RO(con_pages); |
1207 | |
1208 | static ssize_t con_drop_store(struct device_driver *dev, const char *buf, size_t count) |
1209 | { |
1210 | int rc; |
1211 | |
1212 | rc = kstrtobool(s: buf, res: &sclp_console_drop); |
1213 | return rc ?: count; |
1214 | } |
1215 | |
1216 | static ssize_t con_drop_show(struct device_driver *dev, char *buf) |
1217 | { |
1218 | return sysfs_emit(buf, fmt: "%i\n" , sclp_console_drop); |
1219 | } |
1220 | |
1221 | static DRIVER_ATTR_RW(con_drop); |
1222 | |
1223 | static ssize_t con_full_show(struct device_driver *dev, char *buf) |
1224 | { |
1225 | return sysfs_emit(buf, fmt: "%lu\n" , sclp_console_full); |
1226 | } |
1227 | |
1228 | static DRIVER_ATTR_RO(con_full); |
1229 | |
1230 | static struct attribute *sclp_drv_attrs[] = { |
1231 | &driver_attr_con_pages.attr, |
1232 | &driver_attr_con_drop.attr, |
1233 | &driver_attr_con_full.attr, |
1234 | NULL, |
1235 | }; |
1236 | static struct attribute_group sclp_drv_attr_group = { |
1237 | .attrs = sclp_drv_attrs, |
1238 | }; |
1239 | static const struct attribute_group *sclp_drv_attr_groups[] = { |
1240 | &sclp_drv_attr_group, |
1241 | NULL, |
1242 | }; |
1243 | |
1244 | static struct platform_driver sclp_pdrv = { |
1245 | .driver = { |
1246 | .name = "sclp" , |
1247 | .groups = sclp_drv_attr_groups, |
1248 | }, |
1249 | }; |
1250 | |
1251 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero |
1252 | * otherwise. */ |
1253 | static int |
1254 | sclp_init(void) |
1255 | { |
1256 | unsigned long flags; |
1257 | int rc = 0; |
1258 | |
1259 | spin_lock_irqsave(&sclp_lock, flags); |
1260 | /* Check for previous or running initialization */ |
1261 | if (sclp_init_state != sclp_init_state_uninitialized) |
1262 | goto fail_unlock; |
1263 | sclp_init_state = sclp_init_state_initializing; |
1264 | sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); |
1265 | sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); |
1266 | BUG_ON(!sclp_read_sccb || !sclp_init_sccb); |
1267 | /* Set up variables */ |
1268 | list_add(new: &sclp_state_change_event.list, head: &sclp_reg_list); |
1269 | timer_setup(&sclp_request_timer, NULL, 0); |
1270 | timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0); |
1271 | /* Check interface */ |
1272 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1273 | rc = sclp_check_interface(); |
1274 | spin_lock_irqsave(&sclp_lock, flags); |
1275 | if (rc) |
1276 | goto fail_init_state_uninitialized; |
1277 | /* Register reboot handler */ |
1278 | rc = register_reboot_notifier(&sclp_reboot_notifier); |
1279 | if (rc) |
1280 | goto fail_init_state_uninitialized; |
1281 | /* Register interrupt handler */ |
1282 | rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler); |
1283 | if (rc) |
1284 | goto fail_unregister_reboot_notifier; |
1285 | sclp_init_state = sclp_init_state_initialized; |
1286 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1287 | /* Enable service-signal external interruption - needs to happen with |
1288 | * IRQs enabled. */ |
1289 | irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); |
1290 | sclp_init_mask(calculate: 1); |
1291 | return 0; |
1292 | |
1293 | fail_unregister_reboot_notifier: |
1294 | unregister_reboot_notifier(&sclp_reboot_notifier); |
1295 | fail_init_state_uninitialized: |
1296 | sclp_init_state = sclp_init_state_uninitialized; |
1297 | free_page((unsigned long) sclp_read_sccb); |
1298 | free_page((unsigned long) sclp_init_sccb); |
1299 | fail_unlock: |
1300 | spin_unlock_irqrestore(lock: &sclp_lock, flags); |
1301 | return rc; |
1302 | } |
1303 | |
1304 | static __init int sclp_initcall(void) |
1305 | { |
1306 | int rc; |
1307 | |
1308 | rc = platform_driver_register(&sclp_pdrv); |
1309 | if (rc) |
1310 | return rc; |
1311 | |
1312 | return sclp_init(); |
1313 | } |
1314 | |
1315 | arch_initcall(sclp_initcall); |
1316 | |