1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9#define KMSG_COMPONENT "ap"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <asm/facility.h>
15
16#include "ap_bus.h"
17#include "ap_debug.h"
18
19static void __ap_flush_queue(struct ap_queue *aq);
20
21/*
22 * some AP queue helper functions
23 */
24
25static inline bool ap_q_supports_bind(struct ap_queue *aq)
26{
27 return ap_test_bit(ptr: &aq->card->functions, AP_FUNC_EP11) ||
28 ap_test_bit(ptr: &aq->card->functions, AP_FUNC_ACCEL);
29}
30
31static inline bool ap_q_supports_assoc(struct ap_queue *aq)
32{
33 return ap_test_bit(ptr: &aq->card->functions, AP_FUNC_EP11);
34}
35
36static inline bool ap_q_needs_bind(struct ap_queue *aq)
37{
38 return ap_q_supports_bind(aq) && ap_sb_available();
39}
40
41/**
42 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
43 * @aq: The AP queue
44 * @ind: the notification indicator byte
45 *
46 * Enables interruption on AP queue via ap_aqic(). Based on the return
47 * value it waits a while and tests the AP queue if interrupts
48 * have been switched on using ap_test_queue().
49 */
50static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
51{
52 union ap_qirq_ctrl qirqctrl = { .value = 0 };
53 struct ap_queue_status status;
54
55 qirqctrl.ir = 1;
56 qirqctrl.isc = AP_ISC;
57 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
58 if (status.async)
59 return -EPERM;
60 switch (status.response_code) {
61 case AP_RESPONSE_NORMAL:
62 case AP_RESPONSE_OTHERWISE_CHANGED:
63 return 0;
64 case AP_RESPONSE_Q_NOT_AVAIL:
65 case AP_RESPONSE_DECONFIGURED:
66 case AP_RESPONSE_CHECKSTOPPED:
67 case AP_RESPONSE_INVALID_ADDRESS:
68 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
69 AP_QID_CARD(aq->qid),
70 AP_QID_QUEUE(aq->qid));
71 return -EOPNOTSUPP;
72 case AP_RESPONSE_RESET_IN_PROGRESS:
73 case AP_RESPONSE_BUSY:
74 default:
75 return -EBUSY;
76 }
77}
78
79/**
80 * __ap_send(): Send message to adjunct processor queue.
81 * @qid: The AP queue number
82 * @psmid: The program supplied message identifier
83 * @msg: The message text
84 * @msglen: The message length
85 * @special: Special Bit
86 *
87 * Returns AP queue status structure.
88 * Condition code 1 on NQAP can't happen because the L bit is 1.
89 * Condition code 2 on NQAP also means the send is incomplete,
90 * because a segment boundary was reached. The NQAP is repeated.
91 */
92static inline struct ap_queue_status
93__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
94 int special)
95{
96 if (special)
97 qid |= 0x400000UL;
98 return ap_nqap(qid, psmid, msg, msglen);
99}
100
101/* State machine definitions and helpers */
102
103static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
104{
105 return AP_SM_WAIT_NONE;
106}
107
108/**
109 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
110 * not change the state of the device.
111 * @aq: pointer to the AP queue
112 *
113 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
114 */
115static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
116{
117 struct ap_queue_status status;
118 struct ap_message *ap_msg;
119 bool found = false;
120 size_t reslen;
121 unsigned long resgr0 = 0;
122 int parts = 0;
123
124 /*
125 * DQAP loop until response code and resgr0 indicate that
126 * the msg is totally received. As we use the very same buffer
127 * the msg is overwritten with each invocation. That's intended
128 * and the receiver of the msg is informed with a msg rc code
129 * of EMSGSIZE in such a case.
130 */
131 do {
132 status = ap_dqap(aq->qid, &aq->reply->psmid,
133 aq->reply->msg, aq->reply->bufsize,
134 &aq->reply->len, &reslen, &resgr0);
135 parts++;
136 } while (status.response_code == 0xFF && resgr0 != 0);
137
138 switch (status.response_code) {
139 case AP_RESPONSE_NORMAL:
140 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
141 if (!status.queue_empty && !aq->queue_count)
142 aq->queue_count++;
143 if (aq->queue_count > 0)
144 mod_timer(timer: &aq->timeout,
145 expires: jiffies + aq->request_timeout);
146 list_for_each_entry(ap_msg, &aq->pendingq, list) {
147 if (ap_msg->psmid != aq->reply->psmid)
148 continue;
149 list_del_init(entry: &ap_msg->list);
150 aq->pendingq_count--;
151 if (parts > 1) {
152 ap_msg->rc = -EMSGSIZE;
153 ap_msg->receive(aq, ap_msg, NULL);
154 } else {
155 ap_msg->receive(aq, ap_msg, aq->reply);
156 }
157 found = true;
158 break;
159 }
160 if (!found) {
161 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
162 __func__, aq->reply->psmid,
163 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
164 }
165 fallthrough;
166 case AP_RESPONSE_NO_PENDING_REPLY:
167 if (!status.queue_empty || aq->queue_count <= 0)
168 break;
169 /* The card shouldn't forget requests but who knows. */
170 aq->queue_count = 0;
171 list_splice_init(list: &aq->pendingq, head: &aq->requestq);
172 aq->requestq_count += aq->pendingq_count;
173 aq->pendingq_count = 0;
174 break;
175 default:
176 break;
177 }
178 return status;
179}
180
181/**
182 * ap_sm_read(): Receive pending reply messages from an AP queue.
183 * @aq: pointer to the AP queue
184 *
185 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
186 */
187static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
188{
189 struct ap_queue_status status;
190
191 if (!aq->reply)
192 return AP_SM_WAIT_NONE;
193 status = ap_sm_recv(aq);
194 if (status.async)
195 return AP_SM_WAIT_NONE;
196 switch (status.response_code) {
197 case AP_RESPONSE_NORMAL:
198 if (aq->queue_count > 0) {
199 aq->sm_state = AP_SM_STATE_WORKING;
200 return AP_SM_WAIT_AGAIN;
201 }
202 aq->sm_state = AP_SM_STATE_IDLE;
203 break;
204 case AP_RESPONSE_NO_PENDING_REPLY:
205 if (aq->queue_count > 0)
206 return status.irq_enabled ?
207 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
208 aq->sm_state = AP_SM_STATE_IDLE;
209 break;
210 default:
211 aq->dev_state = AP_DEV_STATE_ERROR;
212 aq->last_err_rc = status.response_code;
213 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
214 __func__, status.response_code,
215 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
216 return AP_SM_WAIT_NONE;
217 }
218 /* Check and maybe enable irq support (again) on this queue */
219 if (!status.irq_enabled && status.queue_empty) {
220 void *lsi_ptr = ap_airq_ptr();
221
222 if (lsi_ptr && ap_queue_enable_irq(aq, ind: lsi_ptr) == 0) {
223 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
224 return AP_SM_WAIT_AGAIN;
225 }
226 }
227 return AP_SM_WAIT_NONE;
228}
229
230/**
231 * ap_sm_write(): Send messages from the request queue to an AP queue.
232 * @aq: pointer to the AP queue
233 *
234 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
235 */
236static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
237{
238 struct ap_queue_status status;
239 struct ap_message *ap_msg;
240 ap_qid_t qid = aq->qid;
241
242 if (aq->requestq_count <= 0)
243 return AP_SM_WAIT_NONE;
244
245 /* Start the next request on the queue. */
246 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
247 status = __ap_send(qid, ap_msg->psmid,
248 ap_msg->msg, ap_msg->len,
249 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
250 if (status.async)
251 return AP_SM_WAIT_NONE;
252 switch (status.response_code) {
253 case AP_RESPONSE_NORMAL:
254 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
255 if (aq->queue_count == 1)
256 mod_timer(timer: &aq->timeout, expires: jiffies + aq->request_timeout);
257 list_move_tail(list: &ap_msg->list, head: &aq->pendingq);
258 aq->requestq_count--;
259 aq->pendingq_count++;
260 if (aq->queue_count < aq->card->queue_depth) {
261 aq->sm_state = AP_SM_STATE_WORKING;
262 return AP_SM_WAIT_AGAIN;
263 }
264 fallthrough;
265 case AP_RESPONSE_Q_FULL:
266 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
267 return status.irq_enabled ?
268 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
269 case AP_RESPONSE_RESET_IN_PROGRESS:
270 aq->sm_state = AP_SM_STATE_RESET_WAIT;
271 return AP_SM_WAIT_LOW_TIMEOUT;
272 case AP_RESPONSE_INVALID_DOMAIN:
273 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
274 fallthrough;
275 case AP_RESPONSE_MESSAGE_TOO_BIG:
276 case AP_RESPONSE_REQ_FAC_NOT_INST:
277 list_del_init(entry: &ap_msg->list);
278 aq->requestq_count--;
279 ap_msg->rc = -EINVAL;
280 ap_msg->receive(aq, ap_msg, NULL);
281 return AP_SM_WAIT_AGAIN;
282 default:
283 aq->dev_state = AP_DEV_STATE_ERROR;
284 aq->last_err_rc = status.response_code;
285 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
286 __func__, status.response_code,
287 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
288 return AP_SM_WAIT_NONE;
289 }
290}
291
292/**
293 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
294 * @aq: pointer to the AP queue
295 *
296 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
297 */
298static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
299{
300 return min(ap_sm_read(aq), ap_sm_write(aq));
301}
302
303/**
304 * ap_sm_reset(): Reset an AP queue.
305 * @aq: The AP queue
306 *
307 * Submit the Reset command to an AP queue.
308 */
309static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
310{
311 struct ap_queue_status status;
312
313 status = ap_rapq(aq->qid, aq->rapq_fbit);
314 if (status.async)
315 return AP_SM_WAIT_NONE;
316 switch (status.response_code) {
317 case AP_RESPONSE_NORMAL:
318 case AP_RESPONSE_RESET_IN_PROGRESS:
319 aq->sm_state = AP_SM_STATE_RESET_WAIT;
320 aq->rapq_fbit = 0;
321 aq->se_bound = false;
322 return AP_SM_WAIT_LOW_TIMEOUT;
323 default:
324 aq->dev_state = AP_DEV_STATE_ERROR;
325 aq->last_err_rc = status.response_code;
326 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
327 __func__, status.response_code,
328 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
329 return AP_SM_WAIT_NONE;
330 }
331}
332
333/**
334 * ap_sm_reset_wait(): Test queue for completion of the reset operation
335 * @aq: pointer to the AP queue
336 *
337 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
338 */
339static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
340{
341 struct ap_queue_status status;
342 void *lsi_ptr;
343
344 if (aq->queue_count > 0 && aq->reply)
345 /* Try to read a completed message and get the status */
346 status = ap_sm_recv(aq);
347 else
348 /* Get the status with TAPQ */
349 status = ap_tapq(aq->qid, NULL);
350
351 switch (status.response_code) {
352 case AP_RESPONSE_NORMAL:
353 lsi_ptr = ap_airq_ptr();
354 if (lsi_ptr && ap_queue_enable_irq(aq, ind: lsi_ptr) == 0)
355 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
356 else
357 aq->sm_state = (aq->queue_count > 0) ?
358 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
359 return AP_SM_WAIT_AGAIN;
360 case AP_RESPONSE_BUSY:
361 case AP_RESPONSE_RESET_IN_PROGRESS:
362 return AP_SM_WAIT_LOW_TIMEOUT;
363 case AP_RESPONSE_Q_NOT_AVAIL:
364 case AP_RESPONSE_DECONFIGURED:
365 case AP_RESPONSE_CHECKSTOPPED:
366 default:
367 aq->dev_state = AP_DEV_STATE_ERROR;
368 aq->last_err_rc = status.response_code;
369 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
370 __func__, status.response_code,
371 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
372 return AP_SM_WAIT_NONE;
373 }
374}
375
376/**
377 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
378 * @aq: pointer to the AP queue
379 *
380 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
381 */
382static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
383{
384 struct ap_queue_status status;
385
386 if (aq->queue_count > 0 && aq->reply)
387 /* Try to read a completed message and get the status */
388 status = ap_sm_recv(aq);
389 else
390 /* Get the status with TAPQ */
391 status = ap_tapq(aq->qid, NULL);
392
393 if (status.irq_enabled == 1) {
394 /* Irqs are now enabled */
395 aq->sm_state = (aq->queue_count > 0) ?
396 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
397 }
398
399 switch (status.response_code) {
400 case AP_RESPONSE_NORMAL:
401 if (aq->queue_count > 0)
402 return AP_SM_WAIT_AGAIN;
403 fallthrough;
404 case AP_RESPONSE_NO_PENDING_REPLY:
405 return AP_SM_WAIT_LOW_TIMEOUT;
406 default:
407 aq->dev_state = AP_DEV_STATE_ERROR;
408 aq->last_err_rc = status.response_code;
409 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
410 __func__, status.response_code,
411 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
412 return AP_SM_WAIT_NONE;
413 }
414}
415
416/**
417 * ap_sm_assoc_wait(): Test queue for completion of a pending
418 * association request.
419 * @aq: pointer to the AP queue
420 */
421static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
422{
423 struct ap_queue_status status;
424 struct ap_tapq_gr2 info;
425
426 status = ap_test_queue(aq->qid, 1, &info);
427 /* handle asynchronous error on this queue */
428 if (status.async && status.response_code) {
429 aq->dev_state = AP_DEV_STATE_ERROR;
430 aq->last_err_rc = status.response_code;
431 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
432 __func__, status.response_code,
433 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
434 return AP_SM_WAIT_NONE;
435 }
436 if (status.response_code > AP_RESPONSE_BUSY) {
437 aq->dev_state = AP_DEV_STATE_ERROR;
438 aq->last_err_rc = status.response_code;
439 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
440 __func__, status.response_code,
441 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
442 return AP_SM_WAIT_NONE;
443 }
444
445 /* check bs bits */
446 switch (info.bs) {
447 case AP_BS_Q_USABLE:
448 /* association is through */
449 aq->sm_state = AP_SM_STATE_IDLE;
450 AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
451 __func__, AP_QID_CARD(aq->qid),
452 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
453 return AP_SM_WAIT_NONE;
454 case AP_BS_Q_USABLE_NO_SECURE_KEY:
455 /* association still pending */
456 return AP_SM_WAIT_LOW_TIMEOUT;
457 default:
458 /* reset from 'outside' happened or no idea at all */
459 aq->assoc_idx = ASSOC_IDX_INVALID;
460 aq->dev_state = AP_DEV_STATE_ERROR;
461 aq->last_err_rc = status.response_code;
462 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
463 __func__, info.bs,
464 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
465 return AP_SM_WAIT_NONE;
466 }
467}
468
469/*
470 * AP state machine jump table
471 */
472static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
473 [AP_SM_STATE_RESET_START] = {
474 [AP_SM_EVENT_POLL] = ap_sm_reset,
475 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
476 },
477 [AP_SM_STATE_RESET_WAIT] = {
478 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
479 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
480 },
481 [AP_SM_STATE_SETIRQ_WAIT] = {
482 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
483 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
484 },
485 [AP_SM_STATE_IDLE] = {
486 [AP_SM_EVENT_POLL] = ap_sm_write,
487 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
488 },
489 [AP_SM_STATE_WORKING] = {
490 [AP_SM_EVENT_POLL] = ap_sm_read_write,
491 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
492 },
493 [AP_SM_STATE_QUEUE_FULL] = {
494 [AP_SM_EVENT_POLL] = ap_sm_read,
495 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
496 },
497 [AP_SM_STATE_ASSOC_WAIT] = {
498 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
499 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
500 },
501};
502
503enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
504{
505 if (aq->config && !aq->chkstop &&
506 aq->dev_state > AP_DEV_STATE_UNINITIATED)
507 return ap_jumptable[aq->sm_state][event](aq);
508 else
509 return AP_SM_WAIT_NONE;
510}
511
512enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
513{
514 enum ap_sm_wait wait;
515
516 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
517 ;
518 return wait;
519}
520
521/*
522 * AP queue related attributes.
523 */
524static ssize_t request_count_show(struct device *dev,
525 struct device_attribute *attr,
526 char *buf)
527{
528 struct ap_queue *aq = to_ap_queue(dev);
529 bool valid = false;
530 u64 req_cnt;
531
532 spin_lock_bh(lock: &aq->lock);
533 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
534 req_cnt = aq->total_request_count;
535 valid = true;
536 }
537 spin_unlock_bh(lock: &aq->lock);
538
539 if (valid)
540 return sysfs_emit(buf, fmt: "%llu\n", req_cnt);
541 else
542 return sysfs_emit(buf, fmt: "-\n");
543}
544
545static ssize_t request_count_store(struct device *dev,
546 struct device_attribute *attr,
547 const char *buf, size_t count)
548{
549 struct ap_queue *aq = to_ap_queue(dev);
550
551 spin_lock_bh(lock: &aq->lock);
552 aq->total_request_count = 0;
553 spin_unlock_bh(lock: &aq->lock);
554
555 return count;
556}
557
558static DEVICE_ATTR_RW(request_count);
559
560static ssize_t requestq_count_show(struct device *dev,
561 struct device_attribute *attr, char *buf)
562{
563 struct ap_queue *aq = to_ap_queue(dev);
564 unsigned int reqq_cnt = 0;
565
566 spin_lock_bh(lock: &aq->lock);
567 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
568 reqq_cnt = aq->requestq_count;
569 spin_unlock_bh(lock: &aq->lock);
570 return sysfs_emit(buf, fmt: "%d\n", reqq_cnt);
571}
572
573static DEVICE_ATTR_RO(requestq_count);
574
575static ssize_t pendingq_count_show(struct device *dev,
576 struct device_attribute *attr, char *buf)
577{
578 struct ap_queue *aq = to_ap_queue(dev);
579 unsigned int penq_cnt = 0;
580
581 spin_lock_bh(lock: &aq->lock);
582 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
583 penq_cnt = aq->pendingq_count;
584 spin_unlock_bh(lock: &aq->lock);
585 return sysfs_emit(buf, fmt: "%d\n", penq_cnt);
586}
587
588static DEVICE_ATTR_RO(pendingq_count);
589
590static ssize_t reset_show(struct device *dev,
591 struct device_attribute *attr, char *buf)
592{
593 struct ap_queue *aq = to_ap_queue(dev);
594 int rc = 0;
595
596 spin_lock_bh(lock: &aq->lock);
597 switch (aq->sm_state) {
598 case AP_SM_STATE_RESET_START:
599 case AP_SM_STATE_RESET_WAIT:
600 rc = sysfs_emit(buf, fmt: "Reset in progress.\n");
601 break;
602 case AP_SM_STATE_WORKING:
603 case AP_SM_STATE_QUEUE_FULL:
604 rc = sysfs_emit(buf, fmt: "Reset Timer armed.\n");
605 break;
606 default:
607 rc = sysfs_emit(buf, fmt: "No Reset Timer set.\n");
608 }
609 spin_unlock_bh(lock: &aq->lock);
610 return rc;
611}
612
613static ssize_t reset_store(struct device *dev,
614 struct device_attribute *attr,
615 const char *buf, size_t count)
616{
617 struct ap_queue *aq = to_ap_queue(dev);
618
619 spin_lock_bh(lock: &aq->lock);
620 __ap_flush_queue(aq);
621 aq->sm_state = AP_SM_STATE_RESET_START;
622 ap_wait(wait: ap_sm_event(aq, event: AP_SM_EVENT_POLL));
623 spin_unlock_bh(lock: &aq->lock);
624
625 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
626 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
627
628 return count;
629}
630
631static DEVICE_ATTR_RW(reset);
632
633static ssize_t interrupt_show(struct device *dev,
634 struct device_attribute *attr, char *buf)
635{
636 struct ap_queue *aq = to_ap_queue(dev);
637 struct ap_queue_status status;
638 int rc = 0;
639
640 spin_lock_bh(lock: &aq->lock);
641 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
642 rc = sysfs_emit(buf, fmt: "Enable Interrupt pending.\n");
643 } else {
644 status = ap_tapq(aq->qid, NULL);
645 if (status.irq_enabled)
646 rc = sysfs_emit(buf, fmt: "Interrupts enabled.\n");
647 else
648 rc = sysfs_emit(buf, fmt: "Interrupts disabled.\n");
649 }
650 spin_unlock_bh(lock: &aq->lock);
651
652 return rc;
653}
654
655static DEVICE_ATTR_RO(interrupt);
656
657static ssize_t config_show(struct device *dev,
658 struct device_attribute *attr, char *buf)
659{
660 struct ap_queue *aq = to_ap_queue(dev);
661 int rc;
662
663 spin_lock_bh(lock: &aq->lock);
664 rc = sysfs_emit(buf, fmt: "%d\n", aq->config ? 1 : 0);
665 spin_unlock_bh(lock: &aq->lock);
666 return rc;
667}
668
669static DEVICE_ATTR_RO(config);
670
671static ssize_t chkstop_show(struct device *dev,
672 struct device_attribute *attr, char *buf)
673{
674 struct ap_queue *aq = to_ap_queue(dev);
675 int rc;
676
677 spin_lock_bh(lock: &aq->lock);
678 rc = sysfs_emit(buf, fmt: "%d\n", aq->chkstop ? 1 : 0);
679 spin_unlock_bh(lock: &aq->lock);
680 return rc;
681}
682
683static DEVICE_ATTR_RO(chkstop);
684
685static ssize_t ap_functions_show(struct device *dev,
686 struct device_attribute *attr, char *buf)
687{
688 struct ap_queue *aq = to_ap_queue(dev);
689 struct ap_queue_status status;
690 struct ap_tapq_gr2 info;
691
692 status = ap_test_queue(aq->qid, 1, &info);
693 if (status.response_code > AP_RESPONSE_BUSY) {
694 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
695 __func__, status.response_code,
696 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
697 return -EIO;
698 }
699
700 return sysfs_emit(buf, fmt: "0x%08X\n", info.fac);
701}
702
703static DEVICE_ATTR_RO(ap_functions);
704
705#ifdef CONFIG_ZCRYPT_DEBUG
706static ssize_t states_show(struct device *dev,
707 struct device_attribute *attr, char *buf)
708{
709 struct ap_queue *aq = to_ap_queue(dev);
710 int rc = 0;
711
712 spin_lock_bh(&aq->lock);
713 /* queue device state */
714 switch (aq->dev_state) {
715 case AP_DEV_STATE_UNINITIATED:
716 rc = sysfs_emit(buf, "UNINITIATED\n");
717 break;
718 case AP_DEV_STATE_OPERATING:
719 rc = sysfs_emit(buf, "OPERATING");
720 break;
721 case AP_DEV_STATE_SHUTDOWN:
722 rc = sysfs_emit(buf, "SHUTDOWN");
723 break;
724 case AP_DEV_STATE_ERROR:
725 rc = sysfs_emit(buf, "ERROR");
726 break;
727 default:
728 rc = sysfs_emit(buf, "UNKNOWN");
729 }
730 /* state machine state */
731 if (aq->dev_state) {
732 switch (aq->sm_state) {
733 case AP_SM_STATE_RESET_START:
734 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
735 break;
736 case AP_SM_STATE_RESET_WAIT:
737 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
738 break;
739 case AP_SM_STATE_SETIRQ_WAIT:
740 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
741 break;
742 case AP_SM_STATE_IDLE:
743 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
744 break;
745 case AP_SM_STATE_WORKING:
746 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
747 break;
748 case AP_SM_STATE_QUEUE_FULL:
749 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
750 break;
751 case AP_SM_STATE_ASSOC_WAIT:
752 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
753 break;
754 default:
755 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
756 }
757 }
758 spin_unlock_bh(&aq->lock);
759
760 return rc;
761}
762static DEVICE_ATTR_RO(states);
763
764static ssize_t last_err_rc_show(struct device *dev,
765 struct device_attribute *attr, char *buf)
766{
767 struct ap_queue *aq = to_ap_queue(dev);
768 int rc;
769
770 spin_lock_bh(&aq->lock);
771 rc = aq->last_err_rc;
772 spin_unlock_bh(&aq->lock);
773
774 switch (rc) {
775 case AP_RESPONSE_NORMAL:
776 return sysfs_emit(buf, "NORMAL\n");
777 case AP_RESPONSE_Q_NOT_AVAIL:
778 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
779 case AP_RESPONSE_RESET_IN_PROGRESS:
780 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
781 case AP_RESPONSE_DECONFIGURED:
782 return sysfs_emit(buf, "DECONFIGURED\n");
783 case AP_RESPONSE_CHECKSTOPPED:
784 return sysfs_emit(buf, "CHECKSTOPPED\n");
785 case AP_RESPONSE_BUSY:
786 return sysfs_emit(buf, "BUSY\n");
787 case AP_RESPONSE_INVALID_ADDRESS:
788 return sysfs_emit(buf, "INVALID_ADDRESS\n");
789 case AP_RESPONSE_OTHERWISE_CHANGED:
790 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
791 case AP_RESPONSE_Q_FULL:
792 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
793 case AP_RESPONSE_INDEX_TOO_BIG:
794 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
795 case AP_RESPONSE_NO_FIRST_PART:
796 return sysfs_emit(buf, "NO_FIRST_PART\n");
797 case AP_RESPONSE_MESSAGE_TOO_BIG:
798 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
799 case AP_RESPONSE_REQ_FAC_NOT_INST:
800 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
801 default:
802 return sysfs_emit(buf, "response code %d\n", rc);
803 }
804}
805static DEVICE_ATTR_RO(last_err_rc);
806#endif
807
808static struct attribute *ap_queue_dev_attrs[] = {
809 &dev_attr_request_count.attr,
810 &dev_attr_requestq_count.attr,
811 &dev_attr_pendingq_count.attr,
812 &dev_attr_reset.attr,
813 &dev_attr_interrupt.attr,
814 &dev_attr_config.attr,
815 &dev_attr_chkstop.attr,
816 &dev_attr_ap_functions.attr,
817#ifdef CONFIG_ZCRYPT_DEBUG
818 &dev_attr_states.attr,
819 &dev_attr_last_err_rc.attr,
820#endif
821 NULL
822};
823
824static struct attribute_group ap_queue_dev_attr_group = {
825 .attrs = ap_queue_dev_attrs
826};
827
828static const struct attribute_group *ap_queue_dev_attr_groups[] = {
829 &ap_queue_dev_attr_group,
830 NULL
831};
832
833static struct device_type ap_queue_type = {
834 .name = "ap_queue",
835 .groups = ap_queue_dev_attr_groups,
836};
837
838static ssize_t se_bind_show(struct device *dev,
839 struct device_attribute *attr, char *buf)
840{
841 struct ap_queue *aq = to_ap_queue(dev);
842 struct ap_queue_status status;
843 struct ap_tapq_gr2 info;
844
845 if (!ap_q_supports_bind(aq))
846 return sysfs_emit(buf, fmt: "-\n");
847
848 status = ap_test_queue(aq->qid, 1, &info);
849 if (status.response_code > AP_RESPONSE_BUSY) {
850 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
851 __func__, status.response_code,
852 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
853 return -EIO;
854 }
855 switch (info.bs) {
856 case AP_BS_Q_USABLE:
857 case AP_BS_Q_USABLE_NO_SECURE_KEY:
858 return sysfs_emit(buf, fmt: "bound\n");
859 default:
860 return sysfs_emit(buf, fmt: "unbound\n");
861 }
862}
863
864static ssize_t se_bind_store(struct device *dev,
865 struct device_attribute *attr,
866 const char *buf, size_t count)
867{
868 struct ap_queue *aq = to_ap_queue(dev);
869 struct ap_queue_status status;
870 bool value;
871 int rc;
872
873 if (!ap_q_supports_bind(aq))
874 return -EINVAL;
875
876 /* only 0 (unbind) and 1 (bind) allowed */
877 rc = kstrtobool(s: buf, res: &value);
878 if (rc)
879 return rc;
880
881 if (value) {
882 /* bind, do BAPQ */
883 spin_lock_bh(lock: &aq->lock);
884 if (aq->sm_state < AP_SM_STATE_IDLE) {
885 spin_unlock_bh(lock: &aq->lock);
886 return -EBUSY;
887 }
888 status = ap_bapq(aq->qid);
889 spin_unlock_bh(lock: &aq->lock);
890 if (!status.response_code) {
891 aq->se_bound = true;
892 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
893 AP_QID_CARD(aq->qid),
894 AP_QID_QUEUE(aq->qid));
895 } else {
896 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
897 __func__, status.response_code,
898 AP_QID_CARD(aq->qid),
899 AP_QID_QUEUE(aq->qid));
900 return -EIO;
901 }
902 } else {
903 /* unbind, set F bit arg and trigger RAPQ */
904 spin_lock_bh(lock: &aq->lock);
905 __ap_flush_queue(aq);
906 aq->rapq_fbit = 1;
907 aq->assoc_idx = ASSOC_IDX_INVALID;
908 aq->sm_state = AP_SM_STATE_RESET_START;
909 ap_wait(wait: ap_sm_event(aq, event: AP_SM_EVENT_POLL));
910 spin_unlock_bh(lock: &aq->lock);
911 }
912
913 return count;
914}
915
916static DEVICE_ATTR_RW(se_bind);
917
918static ssize_t se_associate_show(struct device *dev,
919 struct device_attribute *attr, char *buf)
920{
921 struct ap_queue *aq = to_ap_queue(dev);
922 struct ap_queue_status status;
923 struct ap_tapq_gr2 info;
924
925 if (!ap_q_supports_assoc(aq))
926 return sysfs_emit(buf, fmt: "-\n");
927
928 status = ap_test_queue(aq->qid, 1, &info);
929 if (status.response_code > AP_RESPONSE_BUSY) {
930 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
931 __func__, status.response_code,
932 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
933 return -EIO;
934 }
935
936 switch (info.bs) {
937 case AP_BS_Q_USABLE:
938 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
939 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
940 return -EIO;
941 }
942 return sysfs_emit(buf, fmt: "associated %u\n", aq->assoc_idx);
943 case AP_BS_Q_USABLE_NO_SECURE_KEY:
944 if (aq->assoc_idx != ASSOC_IDX_INVALID)
945 return sysfs_emit(buf, fmt: "association pending\n");
946 fallthrough;
947 default:
948 return sysfs_emit(buf, fmt: "unassociated\n");
949 }
950}
951
952static ssize_t se_associate_store(struct device *dev,
953 struct device_attribute *attr,
954 const char *buf, size_t count)
955{
956 struct ap_queue *aq = to_ap_queue(dev);
957 struct ap_queue_status status;
958 unsigned int value;
959 int rc;
960
961 if (!ap_q_supports_assoc(aq))
962 return -EINVAL;
963
964 /* association index needs to be >= 0 */
965 rc = kstrtouint(s: buf, base: 0, res: &value);
966 if (rc)
967 return rc;
968 if (value >= ASSOC_IDX_INVALID)
969 return -EINVAL;
970
971 spin_lock_bh(lock: &aq->lock);
972
973 /* sm should be in idle state */
974 if (aq->sm_state != AP_SM_STATE_IDLE) {
975 spin_unlock_bh(lock: &aq->lock);
976 return -EBUSY;
977 }
978
979 /* already associated or association pending ? */
980 if (aq->assoc_idx != ASSOC_IDX_INVALID) {
981 spin_unlock_bh(lock: &aq->lock);
982 return -EINVAL;
983 }
984
985 /* trigger the asynchronous association request */
986 status = ap_aapq(aq->qid, value);
987 switch (status.response_code) {
988 case AP_RESPONSE_NORMAL:
989 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
990 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
991 aq->assoc_idx = value;
992 ap_wait(wait: ap_sm_event(aq, event: AP_SM_EVENT_POLL));
993 spin_unlock_bh(lock: &aq->lock);
994 break;
995 default:
996 spin_unlock_bh(lock: &aq->lock);
997 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
998 __func__, status.response_code,
999 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1000 return -EIO;
1001 }
1002
1003 return count;
1004}
1005
1006static DEVICE_ATTR_RW(se_associate);
1007
1008static struct attribute *ap_queue_dev_sb_attrs[] = {
1009 &dev_attr_se_bind.attr,
1010 &dev_attr_se_associate.attr,
1011 NULL
1012};
1013
1014static struct attribute_group ap_queue_dev_sb_attr_group = {
1015 .attrs = ap_queue_dev_sb_attrs
1016};
1017
1018static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1019 &ap_queue_dev_sb_attr_group,
1020 NULL
1021};
1022
1023static void ap_queue_device_release(struct device *dev)
1024{
1025 struct ap_queue *aq = to_ap_queue(dev);
1026
1027 spin_lock_bh(lock: &ap_queues_lock);
1028 hash_del(node: &aq->hnode);
1029 spin_unlock_bh(lock: &ap_queues_lock);
1030
1031 kfree(objp: aq);
1032}
1033
1034struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1035{
1036 struct ap_queue *aq;
1037
1038 aq = kzalloc(size: sizeof(*aq), GFP_KERNEL);
1039 if (!aq)
1040 return NULL;
1041 aq->ap_dev.device.release = ap_queue_device_release;
1042 aq->ap_dev.device.type = &ap_queue_type;
1043 aq->ap_dev.device_type = device_type;
1044 // add optional SE secure binding attributes group
1045 if (ap_sb_available() && is_prot_virt_guest())
1046 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1047 aq->qid = qid;
1048 spin_lock_init(&aq->lock);
1049 INIT_LIST_HEAD(list: &aq->pendingq);
1050 INIT_LIST_HEAD(list: &aq->requestq);
1051 timer_setup(&aq->timeout, ap_request_timeout, 0);
1052
1053 return aq;
1054}
1055
1056void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1057{
1058 aq->reply = reply;
1059
1060 spin_lock_bh(lock: &aq->lock);
1061 ap_wait(wait: ap_sm_event(aq, event: AP_SM_EVENT_POLL));
1062 spin_unlock_bh(lock: &aq->lock);
1063}
1064EXPORT_SYMBOL(ap_queue_init_reply);
1065
1066/**
1067 * ap_queue_message(): Queue a request to an AP device.
1068 * @aq: The AP device to queue the message to
1069 * @ap_msg: The message that is to be added
1070 */
1071int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1072{
1073 int rc = 0;
1074
1075 /* msg needs to have a valid receive-callback */
1076 BUG_ON(!ap_msg->receive);
1077
1078 spin_lock_bh(lock: &aq->lock);
1079
1080 /* only allow to queue new messages if device state is ok */
1081 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1082 list_add_tail(new: &ap_msg->list, head: &aq->requestq);
1083 aq->requestq_count++;
1084 aq->total_request_count++;
1085 atomic64_inc(v: &aq->card->total_request_count);
1086 } else {
1087 rc = -ENODEV;
1088 }
1089
1090 /* Send/receive as many request from the queue as possible. */
1091 ap_wait(wait: ap_sm_event_loop(aq, event: AP_SM_EVENT_POLL));
1092
1093 spin_unlock_bh(lock: &aq->lock);
1094
1095 return rc;
1096}
1097EXPORT_SYMBOL(ap_queue_message);
1098
1099/**
1100 * ap_queue_usable(): Check if queue is usable just now.
1101 * @aq: The AP queue device to test for usability.
1102 * This function is intended for the scheduler to query if it makes
1103 * sense to enqueue a message into this AP queue device by calling
1104 * ap_queue_message(). The perspective is very short-term as the
1105 * state machine and device state(s) may change at any time.
1106 */
1107bool ap_queue_usable(struct ap_queue *aq)
1108{
1109 bool rc = true;
1110
1111 spin_lock_bh(lock: &aq->lock);
1112
1113 /* check for not configured or checkstopped */
1114 if (!aq->config || aq->chkstop) {
1115 rc = false;
1116 goto unlock_and_out;
1117 }
1118
1119 /* device state needs to be ok */
1120 if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1121 rc = false;
1122 goto unlock_and_out;
1123 }
1124
1125 /* SE guest's queues additionally need to be bound */
1126 if (ap_q_needs_bind(aq) && !aq->se_bound)
1127 rc = false;
1128
1129unlock_and_out:
1130 spin_unlock_bh(lock: &aq->lock);
1131 return rc;
1132}
1133EXPORT_SYMBOL(ap_queue_usable);
1134
1135/**
1136 * ap_cancel_message(): Cancel a crypto request.
1137 * @aq: The AP device that has the message queued
1138 * @ap_msg: The message that is to be removed
1139 *
1140 * Cancel a crypto request. This is done by removing the request
1141 * from the device pending or request queue. Note that the
1142 * request stays on the AP queue. When it finishes the message
1143 * reply will be discarded because the psmid can't be found.
1144 */
1145void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1146{
1147 struct ap_message *tmp;
1148
1149 spin_lock_bh(lock: &aq->lock);
1150 if (!list_empty(head: &ap_msg->list)) {
1151 list_for_each_entry(tmp, &aq->pendingq, list)
1152 if (tmp->psmid == ap_msg->psmid) {
1153 aq->pendingq_count--;
1154 goto found;
1155 }
1156 aq->requestq_count--;
1157found:
1158 list_del_init(entry: &ap_msg->list);
1159 }
1160 spin_unlock_bh(lock: &aq->lock);
1161}
1162EXPORT_SYMBOL(ap_cancel_message);
1163
1164/**
1165 * __ap_flush_queue(): Flush requests.
1166 * @aq: Pointer to the AP queue
1167 *
1168 * Flush all requests from the request/pending queue of an AP device.
1169 */
1170static void __ap_flush_queue(struct ap_queue *aq)
1171{
1172 struct ap_message *ap_msg, *next;
1173
1174 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1175 list_del_init(entry: &ap_msg->list);
1176 aq->pendingq_count--;
1177 ap_msg->rc = -EAGAIN;
1178 ap_msg->receive(aq, ap_msg, NULL);
1179 }
1180 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1181 list_del_init(entry: &ap_msg->list);
1182 aq->requestq_count--;
1183 ap_msg->rc = -EAGAIN;
1184 ap_msg->receive(aq, ap_msg, NULL);
1185 }
1186 aq->queue_count = 0;
1187}
1188
1189void ap_flush_queue(struct ap_queue *aq)
1190{
1191 spin_lock_bh(lock: &aq->lock);
1192 __ap_flush_queue(aq);
1193 spin_unlock_bh(lock: &aq->lock);
1194}
1195EXPORT_SYMBOL(ap_flush_queue);
1196
1197void ap_queue_prepare_remove(struct ap_queue *aq)
1198{
1199 spin_lock_bh(lock: &aq->lock);
1200 /* flush queue */
1201 __ap_flush_queue(aq);
1202 /* move queue device state to SHUTDOWN in progress */
1203 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1204 spin_unlock_bh(lock: &aq->lock);
1205 del_timer_sync(timer: &aq->timeout);
1206}
1207
1208void ap_queue_remove(struct ap_queue *aq)
1209{
1210 /*
1211 * all messages have been flushed and the device state
1212 * is SHUTDOWN. Now reset with zero which also clears
1213 * the irq registration and move the device state
1214 * to the initial value AP_DEV_STATE_UNINITIATED.
1215 */
1216 spin_lock_bh(lock: &aq->lock);
1217 ap_zapq(aq->qid, 0);
1218 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1219 spin_unlock_bh(lock: &aq->lock);
1220}
1221
1222void _ap_queue_init_state(struct ap_queue *aq)
1223{
1224 aq->dev_state = AP_DEV_STATE_OPERATING;
1225 aq->sm_state = AP_SM_STATE_RESET_START;
1226 aq->last_err_rc = 0;
1227 aq->assoc_idx = ASSOC_IDX_INVALID;
1228 ap_wait(wait: ap_sm_event(aq, event: AP_SM_EVENT_POLL));
1229}
1230
1231void ap_queue_init_state(struct ap_queue *aq)
1232{
1233 spin_lock_bh(lock: &aq->lock);
1234 _ap_queue_init_state(aq);
1235 spin_unlock_bh(lock: &aq->lock);
1236}
1237EXPORT_SYMBOL(ap_queue_init_state);
1238

source code of linux/drivers/s390/crypto/ap_queue.c