1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * CXL Flash Device Driver |
4 | * |
5 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation |
6 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation |
7 | * |
8 | * Copyright (C) 2015 IBM Corporation |
9 | */ |
10 | |
11 | #include <linux/delay.h> |
12 | #include <linux/file.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/pci.h> |
15 | #include <linux/syscalls.h> |
16 | #include <asm/unaligned.h> |
17 | |
18 | #include <scsi/scsi.h> |
19 | #include <scsi/scsi_host.h> |
20 | #include <scsi/scsi_cmnd.h> |
21 | #include <scsi/scsi_eh.h> |
22 | #include <uapi/scsi/cxlflash_ioctl.h> |
23 | |
24 | #include "sislite.h" |
25 | #include "common.h" |
26 | #include "vlun.h" |
27 | #include "superpipe.h" |
28 | |
29 | struct cxlflash_global global; |
30 | |
31 | /** |
32 | * marshal_rele_to_resize() - translate release to resize structure |
33 | * @release: Source structure from which to translate/copy. |
34 | * @resize: Destination structure for the translate/copy. |
35 | */ |
36 | static void marshal_rele_to_resize(struct dk_cxlflash_release *release, |
37 | struct dk_cxlflash_resize *resize) |
38 | { |
39 | resize->hdr = release->hdr; |
40 | resize->context_id = release->context_id; |
41 | resize->rsrc_handle = release->rsrc_handle; |
42 | } |
43 | |
44 | /** |
45 | * marshal_det_to_rele() - translate detach to release structure |
46 | * @detach: Destination structure for the translate/copy. |
47 | * @release: Source structure from which to translate/copy. |
48 | */ |
49 | static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, |
50 | struct dk_cxlflash_release *release) |
51 | { |
52 | release->hdr = detach->hdr; |
53 | release->context_id = detach->context_id; |
54 | } |
55 | |
56 | /** |
57 | * marshal_udir_to_rele() - translate udirect to release structure |
58 | * @udirect: Source structure from which to translate/copy. |
59 | * @release: Destination structure for the translate/copy. |
60 | */ |
61 | static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect, |
62 | struct dk_cxlflash_release *release) |
63 | { |
64 | release->hdr = udirect->hdr; |
65 | release->context_id = udirect->context_id; |
66 | release->rsrc_handle = udirect->rsrc_handle; |
67 | } |
68 | |
69 | /** |
70 | * cxlflash_free_errpage() - frees resources associated with global error page |
71 | */ |
72 | void cxlflash_free_errpage(void) |
73 | { |
74 | |
75 | mutex_lock(&global.mutex); |
76 | if (global.err_page) { |
77 | __free_page(global.err_page); |
78 | global.err_page = NULL; |
79 | } |
80 | mutex_unlock(lock: &global.mutex); |
81 | } |
82 | |
83 | /** |
84 | * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts |
85 | * @cfg: Internal structure associated with the host. |
86 | * |
87 | * When the host needs to go down, all users must be quiesced and their |
88 | * memory freed. This is accomplished by putting the contexts in error |
89 | * state which will notify the user and let them 'drive' the tear down. |
90 | * Meanwhile, this routine camps until all user contexts have been removed. |
91 | * |
92 | * Note that the main loop in this routine will always execute at least once |
93 | * to flush the reset_waitq. |
94 | */ |
95 | void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) |
96 | { |
97 | struct device *dev = &cfg->dev->dev; |
98 | int i, found = true; |
99 | |
100 | cxlflash_mark_contexts_error(cfg); |
101 | |
102 | while (true) { |
103 | for (i = 0; i < MAX_CONTEXT; i++) |
104 | if (cfg->ctx_tbl[i]) { |
105 | found = true; |
106 | break; |
107 | } |
108 | |
109 | if (!found && list_empty(head: &cfg->ctx_err_recovery)) |
110 | return; |
111 | |
112 | dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n" , |
113 | __func__); |
114 | wake_up_all(&cfg->reset_waitq); |
115 | ssleep(seconds: 1); |
116 | found = false; |
117 | } |
118 | } |
119 | |
120 | /** |
121 | * find_error_context() - locates a context by cookie on the error recovery list |
122 | * @cfg: Internal structure associated with the host. |
123 | * @rctxid: Desired context by id. |
124 | * @file: Desired context by file. |
125 | * |
126 | * Return: Found context on success, NULL on failure |
127 | */ |
128 | static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid, |
129 | struct file *file) |
130 | { |
131 | struct ctx_info *ctxi; |
132 | |
133 | list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list) |
134 | if ((ctxi->ctxid == rctxid) || (ctxi->file == file)) |
135 | return ctxi; |
136 | |
137 | return NULL; |
138 | } |
139 | |
140 | /** |
141 | * get_context() - obtains a validated and locked context reference |
142 | * @cfg: Internal structure associated with the host. |
143 | * @rctxid: Desired context (raw, un-decoded format). |
144 | * @arg: LUN information or file associated with request. |
145 | * @ctx_ctrl: Control information to 'steer' desired lookup. |
146 | * |
147 | * NOTE: despite the name pid, in linux, current->pid actually refers |
148 | * to the lightweight process id (tid) and can change if the process is |
149 | * multi threaded. The tgid remains constant for the process and only changes |
150 | * when the process of fork. For all intents and purposes, think of tgid |
151 | * as a pid in the traditional sense. |
152 | * |
153 | * Return: Validated context on success, NULL on failure |
154 | */ |
155 | struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, |
156 | void *arg, enum ctx_ctrl ctx_ctrl) |
157 | { |
158 | struct device *dev = &cfg->dev->dev; |
159 | struct ctx_info *ctxi = NULL; |
160 | struct lun_access *lun_access = NULL; |
161 | struct file *file = NULL; |
162 | struct llun_info *lli = arg; |
163 | u64 ctxid = DECODE_CTXID(rctxid); |
164 | int rc; |
165 | pid_t pid = task_tgid_nr(current), ctxpid = 0; |
166 | |
167 | if (ctx_ctrl & CTX_CTRL_FILE) { |
168 | lli = NULL; |
169 | file = (struct file *)arg; |
170 | } |
171 | |
172 | if (ctx_ctrl & CTX_CTRL_CLONE) |
173 | pid = task_ppid_nr(current); |
174 | |
175 | if (likely(ctxid < MAX_CONTEXT)) { |
176 | while (true) { |
177 | mutex_lock(&cfg->ctx_tbl_list_mutex); |
178 | ctxi = cfg->ctx_tbl[ctxid]; |
179 | if (ctxi) |
180 | if ((file && (ctxi->file != file)) || |
181 | (!file && (ctxi->ctxid != rctxid))) |
182 | ctxi = NULL; |
183 | |
184 | if ((ctx_ctrl & CTX_CTRL_ERR) || |
185 | (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK))) |
186 | ctxi = find_error_context(cfg, rctxid, file); |
187 | if (!ctxi) { |
188 | mutex_unlock(lock: &cfg->ctx_tbl_list_mutex); |
189 | goto out; |
190 | } |
191 | |
192 | /* |
193 | * Need to acquire ownership of the context while still |
194 | * under the table/list lock to serialize with a remove |
195 | * thread. Use the 'try' to avoid stalling the |
196 | * table/list lock for a single context. |
197 | * |
198 | * Note that the lock order is: |
199 | * |
200 | * cfg->ctx_tbl_list_mutex -> ctxi->mutex |
201 | * |
202 | * Therefore release ctx_tbl_list_mutex before retrying. |
203 | */ |
204 | rc = mutex_trylock(lock: &ctxi->mutex); |
205 | mutex_unlock(lock: &cfg->ctx_tbl_list_mutex); |
206 | if (rc) |
207 | break; /* got the context's lock! */ |
208 | } |
209 | |
210 | if (ctxi->unavail) |
211 | goto denied; |
212 | |
213 | ctxpid = ctxi->pid; |
214 | if (likely(!(ctx_ctrl & CTX_CTRL_NOPID))) |
215 | if (pid != ctxpid) |
216 | goto denied; |
217 | |
218 | if (lli) { |
219 | list_for_each_entry(lun_access, &ctxi->luns, list) |
220 | if (lun_access->lli == lli) |
221 | goto out; |
222 | goto denied; |
223 | } |
224 | } |
225 | |
226 | out: |
227 | dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u " |
228 | "ctx_ctrl=%u\n" , __func__, rctxid, ctxi, ctxpid, pid, |
229 | ctx_ctrl); |
230 | |
231 | return ctxi; |
232 | |
233 | denied: |
234 | mutex_unlock(lock: &ctxi->mutex); |
235 | ctxi = NULL; |
236 | goto out; |
237 | } |
238 | |
239 | /** |
240 | * put_context() - release a context that was retrieved from get_context() |
241 | * @ctxi: Context to release. |
242 | * |
243 | * For now, releasing the context equates to unlocking it's mutex. |
244 | */ |
245 | void put_context(struct ctx_info *ctxi) |
246 | { |
247 | mutex_unlock(lock: &ctxi->mutex); |
248 | } |
249 | |
250 | /** |
251 | * afu_attach() - attach a context to the AFU |
252 | * @cfg: Internal structure associated with the host. |
253 | * @ctxi: Context to attach. |
254 | * |
255 | * Upon setting the context capabilities, they must be confirmed with |
256 | * a read back operation as the context might have been closed since |
257 | * the mailbox was unlocked. When this occurs, registration is failed. |
258 | * |
259 | * Return: 0 on success, -errno on failure |
260 | */ |
261 | static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) |
262 | { |
263 | struct device *dev = &cfg->dev->dev; |
264 | struct afu *afu = cfg->afu; |
265 | struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; |
266 | int rc = 0; |
267 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
268 | u64 val; |
269 | int i; |
270 | |
271 | /* Unlock cap and restrict user to read/write cmds in translated mode */ |
272 | readq_be(&ctrl_map->mbox_r); |
273 | val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); |
274 | writeq_be(val, &ctrl_map->ctx_cap); |
275 | val = readq_be(&ctrl_map->ctx_cap); |
276 | if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { |
277 | dev_err(dev, "%s: ctx may be closed val=%016llx\n" , |
278 | __func__, val); |
279 | rc = -EAGAIN; |
280 | goto out; |
281 | } |
282 | |
283 | if (afu_is_ocxl_lisn(afu)) { |
284 | /* Set up the LISN effective address for each interrupt */ |
285 | for (i = 0; i < ctxi->irqs; i++) { |
286 | val = cfg->ops->get_irq_objhndl(ctxi->ctx, i); |
287 | writeq_be(val, &ctrl_map->lisn_ea[i]); |
288 | } |
289 | |
290 | /* Use primary HWQ PASID as identifier for all interrupts */ |
291 | val = hwq->ctx_hndl; |
292 | writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]); |
293 | writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]); |
294 | } |
295 | |
296 | /* Set up MMIO registers pointing to the RHT */ |
297 | writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); |
298 | val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); |
299 | writeq_be(val, &ctrl_map->rht_cnt_id); |
300 | out: |
301 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
302 | return rc; |
303 | } |
304 | |
305 | /** |
306 | * read_cap16() - issues a SCSI READ_CAP16 command |
307 | * @sdev: SCSI device associated with LUN. |
308 | * @lli: LUN destined for capacity request. |
309 | * |
310 | * The READ_CAP16 can take quite a while to complete. Should an EEH occur while |
311 | * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of |
312 | * the recovery, the handler drains all currently running ioctls, waiting until |
313 | * they have completed before proceeding with a reset. As this routine is used |
314 | * on the ioctl path, this can create a condition where the EEH handler becomes |
315 | * stuck, infinitely waiting for this ioctl thread. To avoid this behavior, |
316 | * temporarily unmark this thread as an ioctl thread by releasing the ioctl |
317 | * read semaphore. This will allow the EEH handler to proceed with a recovery |
318 | * while this thread is still running. Once the scsi_execute_cmd() returns, |
319 | * reacquire the ioctl read semaphore and check the adapter state in case it |
320 | * changed while inside of scsi_execute_cmd(). The state check will wait if the |
321 | * adapter is still being recovered or return a failure if the recovery failed. |
322 | * In the event that the adapter reset failed, simply return the failure as the |
323 | * ioctl would be unable to continue. |
324 | * |
325 | * Note that the above puts a requirement on this routine to only be called on |
326 | * an ioctl thread. |
327 | * |
328 | * Return: 0 on success, -errno on failure |
329 | */ |
330 | static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) |
331 | { |
332 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
333 | struct device *dev = &cfg->dev->dev; |
334 | struct glun_info *gli = lli->parent; |
335 | struct scsi_sense_hdr sshdr; |
336 | const struct scsi_exec_args exec_args = { |
337 | .sshdr = &sshdr, |
338 | }; |
339 | u8 *cmd_buf = NULL; |
340 | u8 *scsi_cmd = NULL; |
341 | int rc = 0; |
342 | int result = 0; |
343 | int retry_cnt = 0; |
344 | u32 to = CMD_TIMEOUT * HZ; |
345 | |
346 | retry: |
347 | cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); |
348 | scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); |
349 | if (unlikely(!cmd_buf || !scsi_cmd)) { |
350 | rc = -ENOMEM; |
351 | goto out; |
352 | } |
353 | |
354 | scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */ |
355 | scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ |
356 | put_unaligned_be32(CMD_BUFSIZE, p: &scsi_cmd[10]); |
357 | |
358 | dev_dbg(dev, "%s: %ssending cmd(%02x)\n" , __func__, |
359 | retry_cnt ? "re" : "" , scsi_cmd[0]); |
360 | |
361 | /* Drop the ioctl read semaphore across lengthy call */ |
362 | up_read(sem: &cfg->ioctl_rwsem); |
363 | result = scsi_execute_cmd(sdev, cmd: scsi_cmd, opf: REQ_OP_DRV_IN, buffer: cmd_buf, |
364 | CMD_BUFSIZE, timeout: to, CMD_RETRIES, args: &exec_args); |
365 | down_read(sem: &cfg->ioctl_rwsem); |
366 | rc = check_state(cfg); |
367 | if (rc) { |
368 | dev_err(dev, "%s: Failed state result=%08x\n" , |
369 | __func__, result); |
370 | rc = -ENODEV; |
371 | goto out; |
372 | } |
373 | |
374 | if (result > 0 && scsi_sense_valid(sshdr: &sshdr)) { |
375 | if (result & SAM_STAT_CHECK_CONDITION) { |
376 | switch (sshdr.sense_key) { |
377 | case NO_SENSE: |
378 | case RECOVERED_ERROR: |
379 | case NOT_READY: |
380 | result &= ~SAM_STAT_CHECK_CONDITION; |
381 | break; |
382 | case UNIT_ATTENTION: |
383 | switch (sshdr.asc) { |
384 | case 0x29: /* Power on Reset or Device Reset */ |
385 | fallthrough; |
386 | case 0x2A: /* Device capacity changed */ |
387 | case 0x3F: /* Report LUNs changed */ |
388 | /* Retry the command once more */ |
389 | if (retry_cnt++ < 1) { |
390 | kfree(objp: cmd_buf); |
391 | kfree(objp: scsi_cmd); |
392 | goto retry; |
393 | } |
394 | } |
395 | break; |
396 | default: |
397 | break; |
398 | } |
399 | } |
400 | } |
401 | |
402 | if (result) { |
403 | dev_err(dev, "%s: command failed, result=%08x\n" , |
404 | __func__, result); |
405 | rc = -EIO; |
406 | goto out; |
407 | } |
408 | |
409 | /* |
410 | * Read cap was successful, grab values from the buffer; |
411 | * note that we don't need to worry about unaligned access |
412 | * as the buffer is allocated on an aligned boundary. |
413 | */ |
414 | mutex_lock(&gli->mutex); |
415 | gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0])); |
416 | gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8])); |
417 | mutex_unlock(lock: &gli->mutex); |
418 | |
419 | out: |
420 | kfree(objp: cmd_buf); |
421 | kfree(objp: scsi_cmd); |
422 | |
423 | dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n" , |
424 | __func__, gli->max_lba, gli->blk_len, rc); |
425 | return rc; |
426 | } |
427 | |
428 | /** |
429 | * get_rhte() - obtains validated resource handle table entry reference |
430 | * @ctxi: Context owning the resource handle. |
431 | * @rhndl: Resource handle associated with entry. |
432 | * @lli: LUN associated with request. |
433 | * |
434 | * Return: Validated RHTE on success, NULL on failure |
435 | */ |
436 | struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, |
437 | struct llun_info *lli) |
438 | { |
439 | struct cxlflash_cfg *cfg = ctxi->cfg; |
440 | struct device *dev = &cfg->dev->dev; |
441 | struct sisl_rht_entry *rhte = NULL; |
442 | |
443 | if (unlikely(!ctxi->rht_start)) { |
444 | dev_dbg(dev, "%s: Context does not have allocated RHT\n" , |
445 | __func__); |
446 | goto out; |
447 | } |
448 | |
449 | if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { |
450 | dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n" , |
451 | __func__, rhndl); |
452 | goto out; |
453 | } |
454 | |
455 | if (unlikely(ctxi->rht_lun[rhndl] != lli)) { |
456 | dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n" , |
457 | __func__, rhndl); |
458 | goto out; |
459 | } |
460 | |
461 | rhte = &ctxi->rht_start[rhndl]; |
462 | if (unlikely(rhte->nmask == 0)) { |
463 | dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n" , |
464 | __func__, rhndl); |
465 | rhte = NULL; |
466 | goto out; |
467 | } |
468 | |
469 | out: |
470 | return rhte; |
471 | } |
472 | |
473 | /** |
474 | * rhte_checkout() - obtains free/empty resource handle table entry |
475 | * @ctxi: Context owning the resource handle. |
476 | * @lli: LUN associated with request. |
477 | * |
478 | * Return: Free RHTE on success, NULL on failure |
479 | */ |
480 | struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, |
481 | struct llun_info *lli) |
482 | { |
483 | struct cxlflash_cfg *cfg = ctxi->cfg; |
484 | struct device *dev = &cfg->dev->dev; |
485 | struct sisl_rht_entry *rhte = NULL; |
486 | int i; |
487 | |
488 | /* Find a free RHT entry */ |
489 | for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) |
490 | if (ctxi->rht_start[i].nmask == 0) { |
491 | rhte = &ctxi->rht_start[i]; |
492 | ctxi->rht_out++; |
493 | break; |
494 | } |
495 | |
496 | if (likely(rhte)) |
497 | ctxi->rht_lun[i] = lli; |
498 | |
499 | dev_dbg(dev, "%s: returning rhte=%p index=%d\n" , __func__, rhte, i); |
500 | return rhte; |
501 | } |
502 | |
503 | /** |
504 | * rhte_checkin() - releases a resource handle table entry |
505 | * @ctxi: Context owning the resource handle. |
506 | * @rhte: RHTE to release. |
507 | */ |
508 | void rhte_checkin(struct ctx_info *ctxi, |
509 | struct sisl_rht_entry *rhte) |
510 | { |
511 | u32 rsrc_handle = rhte - ctxi->rht_start; |
512 | |
513 | rhte->nmask = 0; |
514 | rhte->fp = 0; |
515 | ctxi->rht_out--; |
516 | ctxi->rht_lun[rsrc_handle] = NULL; |
517 | ctxi->rht_needs_ws[rsrc_handle] = false; |
518 | } |
519 | |
520 | /** |
521 | * rht_format1() - populates a RHTE for format 1 |
522 | * @rhte: RHTE to populate. |
523 | * @lun_id: LUN ID of LUN associated with RHTE. |
524 | * @perm: Desired permissions for RHTE. |
525 | * @port_sel: Port selection mask |
526 | */ |
527 | static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm, |
528 | u32 port_sel) |
529 | { |
530 | /* |
531 | * Populate the Format 1 RHT entry for direct access (physical |
532 | * LUN) using the synchronization sequence defined in the |
533 | * SISLite specification. |
534 | */ |
535 | struct sisl_rht_entry_f1 dummy = { 0 }; |
536 | struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; |
537 | |
538 | memset(rhte_f1, 0, sizeof(*rhte_f1)); |
539 | rhte_f1->fp = SISL_RHT_FP(1U, 0); |
540 | dma_wmb(); /* Make setting of format bit visible */ |
541 | |
542 | rhte_f1->lun_id = lun_id; |
543 | dma_wmb(); /* Make setting of LUN id visible */ |
544 | |
545 | /* |
546 | * Use a dummy RHT Format 1 entry to build the second dword |
547 | * of the entry that must be populated in a single write when |
548 | * enabled (valid bit set to TRUE). |
549 | */ |
550 | dummy.valid = 0x80; |
551 | dummy.fp = SISL_RHT_FP(1U, perm); |
552 | dummy.port_sel = port_sel; |
553 | rhte_f1->dw = dummy.dw; |
554 | |
555 | dma_wmb(); /* Make remaining RHT entry fields visible */ |
556 | } |
557 | |
558 | /** |
559 | * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode |
560 | * @gli: LUN to attach. |
561 | * @mode: Desired mode of the LUN. |
562 | * @locked: Mutex status on current thread. |
563 | * |
564 | * Return: 0 on success, -errno on failure |
565 | */ |
566 | int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) |
567 | { |
568 | int rc = 0; |
569 | |
570 | if (!locked) |
571 | mutex_lock(&gli->mutex); |
572 | |
573 | if (gli->mode == MODE_NONE) |
574 | gli->mode = mode; |
575 | else if (gli->mode != mode) { |
576 | pr_debug("%s: gli_mode=%d requested_mode=%d\n" , |
577 | __func__, gli->mode, mode); |
578 | rc = -EINVAL; |
579 | goto out; |
580 | } |
581 | |
582 | gli->users++; |
583 | WARN_ON(gli->users <= 0); |
584 | out: |
585 | pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n" , |
586 | __func__, rc, gli->mode, gli->users); |
587 | if (!locked) |
588 | mutex_unlock(lock: &gli->mutex); |
589 | return rc; |
590 | } |
591 | |
592 | /** |
593 | * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode |
594 | * @gli: LUN to detach. |
595 | * |
596 | * When resetting the mode, terminate block allocation resources as they |
597 | * are no longer required (service is safe to call even when block allocation |
598 | * resources were not present - such as when transitioning from physical mode). |
599 | * These resources will be reallocated when needed (subsequent transition to |
600 | * virtual mode). |
601 | */ |
602 | void cxlflash_lun_detach(struct glun_info *gli) |
603 | { |
604 | mutex_lock(&gli->mutex); |
605 | WARN_ON(gli->mode == MODE_NONE); |
606 | if (--gli->users == 0) { |
607 | gli->mode = MODE_NONE; |
608 | cxlflash_ba_terminate(ba_lun: &gli->blka.ba_lun); |
609 | } |
610 | pr_debug("%s: gli->users=%u\n" , __func__, gli->users); |
611 | WARN_ON(gli->users < 0); |
612 | mutex_unlock(lock: &gli->mutex); |
613 | } |
614 | |
615 | /** |
616 | * _cxlflash_disk_release() - releases the specified resource entry |
617 | * @sdev: SCSI device associated with LUN. |
618 | * @ctxi: Context owning resources. |
619 | * @release: Release ioctl data structure. |
620 | * |
621 | * For LUNs in virtual mode, the virtual LUN associated with the specified |
622 | * resource handle is resized to 0 prior to releasing the RHTE. Note that the |
623 | * AFU sync should _not_ be performed when the context is sitting on the error |
624 | * recovery list. A context on the error recovery list is not known to the AFU |
625 | * due to reset. When the context is recovered, it will be reattached and made |
626 | * known again to the AFU. |
627 | * |
628 | * Return: 0 on success, -errno on failure |
629 | */ |
630 | int _cxlflash_disk_release(struct scsi_device *sdev, |
631 | struct ctx_info *ctxi, |
632 | struct dk_cxlflash_release *release) |
633 | { |
634 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
635 | struct device *dev = &cfg->dev->dev; |
636 | struct llun_info *lli = sdev->hostdata; |
637 | struct glun_info *gli = lli->parent; |
638 | struct afu *afu = cfg->afu; |
639 | bool put_ctx = false; |
640 | |
641 | struct dk_cxlflash_resize size; |
642 | res_hndl_t rhndl = release->rsrc_handle; |
643 | |
644 | int rc = 0; |
645 | int rcr = 0; |
646 | u64 ctxid = DECODE_CTXID(release->context_id), |
647 | rctxid = release->context_id; |
648 | |
649 | struct sisl_rht_entry *rhte; |
650 | struct sisl_rht_entry_f1 *rhte_f1; |
651 | |
652 | dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n" , |
653 | __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); |
654 | |
655 | if (!ctxi) { |
656 | ctxi = get_context(cfg, rctxid, arg: lli, ctx_ctrl: CTX_CTRL_ERR_FALLBACK); |
657 | if (unlikely(!ctxi)) { |
658 | dev_dbg(dev, "%s: Bad context ctxid=%llu\n" , |
659 | __func__, ctxid); |
660 | rc = -EINVAL; |
661 | goto out; |
662 | } |
663 | |
664 | put_ctx = true; |
665 | } |
666 | |
667 | rhte = get_rhte(ctxi, rhndl, lli); |
668 | if (unlikely(!rhte)) { |
669 | dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n" , |
670 | __func__, rhndl); |
671 | rc = -EINVAL; |
672 | goto out; |
673 | } |
674 | |
675 | /* |
676 | * Resize to 0 for virtual LUNS by setting the size |
677 | * to 0. This will clear LXT_START and LXT_CNT fields |
678 | * in the RHT entry and properly sync with the AFU. |
679 | * |
680 | * Afterwards we clear the remaining fields. |
681 | */ |
682 | switch (gli->mode) { |
683 | case MODE_VIRTUAL: |
684 | marshal_rele_to_resize(release, resize: &size); |
685 | size.req_size = 0; |
686 | rc = _cxlflash_vlun_resize(sdev, ctxi, resize: &size); |
687 | if (rc) { |
688 | dev_dbg(dev, "%s: resize failed rc %d\n" , __func__, rc); |
689 | goto out; |
690 | } |
691 | |
692 | break; |
693 | case MODE_PHYSICAL: |
694 | /* |
695 | * Clear the Format 1 RHT entry for direct access |
696 | * (physical LUN) using the synchronization sequence |
697 | * defined in the SISLite specification. |
698 | */ |
699 | rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; |
700 | |
701 | rhte_f1->valid = 0; |
702 | dma_wmb(); /* Make revocation of RHT entry visible */ |
703 | |
704 | rhte_f1->lun_id = 0; |
705 | dma_wmb(); /* Make clearing of LUN id visible */ |
706 | |
707 | rhte_f1->dw = 0; |
708 | dma_wmb(); /* Make RHT entry bottom-half clearing visible */ |
709 | |
710 | if (!ctxi->err_recovery_active) { |
711 | rcr = cxlflash_afu_sync(afu, c: ctxid, r: rhndl, AFU_HW_SYNC); |
712 | if (unlikely(rcr)) |
713 | dev_dbg(dev, "%s: AFU sync failed rc=%d\n" , |
714 | __func__, rcr); |
715 | } |
716 | break; |
717 | default: |
718 | WARN(1, "Unsupported LUN mode!" ); |
719 | goto out; |
720 | } |
721 | |
722 | rhte_checkin(ctxi, rhte); |
723 | cxlflash_lun_detach(gli); |
724 | |
725 | out: |
726 | if (put_ctx) |
727 | put_context(ctxi); |
728 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
729 | return rc; |
730 | } |
731 | |
732 | int cxlflash_disk_release(struct scsi_device *sdev, |
733 | struct dk_cxlflash_release *release) |
734 | { |
735 | return _cxlflash_disk_release(sdev, NULL, release); |
736 | } |
737 | |
738 | /** |
739 | * destroy_context() - releases a context |
740 | * @cfg: Internal structure associated with the host. |
741 | * @ctxi: Context to release. |
742 | * |
743 | * This routine is safe to be called with a a non-initialized context. |
744 | * Also note that the routine conditionally checks for the existence |
745 | * of the context control map before clearing the RHT registers and |
746 | * context capabilities because it is possible to destroy a context |
747 | * while the context is in the error state (previous mapping was |
748 | * removed [so there is no need to worry about clearing] and context |
749 | * is waiting for a new mapping). |
750 | */ |
751 | static void destroy_context(struct cxlflash_cfg *cfg, |
752 | struct ctx_info *ctxi) |
753 | { |
754 | struct afu *afu = cfg->afu; |
755 | |
756 | if (ctxi->initialized) { |
757 | WARN_ON(!list_empty(&ctxi->luns)); |
758 | |
759 | /* Clear RHT registers and drop all capabilities for context */ |
760 | if (afu->afu_map && ctxi->ctrl_map) { |
761 | writeq_be(0, &ctxi->ctrl_map->rht_start); |
762 | writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); |
763 | writeq_be(0, &ctxi->ctrl_map->ctx_cap); |
764 | } |
765 | } |
766 | |
767 | /* Free memory associated with context */ |
768 | free_page((ulong)ctxi->rht_start); |
769 | kfree(objp: ctxi->rht_needs_ws); |
770 | kfree(objp: ctxi->rht_lun); |
771 | kfree(objp: ctxi); |
772 | } |
773 | |
774 | /** |
775 | * create_context() - allocates and initializes a context |
776 | * @cfg: Internal structure associated with the host. |
777 | * |
778 | * Return: Allocated context on success, NULL on failure |
779 | */ |
780 | static struct ctx_info *create_context(struct cxlflash_cfg *cfg) |
781 | { |
782 | struct device *dev = &cfg->dev->dev; |
783 | struct ctx_info *ctxi = NULL; |
784 | struct llun_info **lli = NULL; |
785 | u8 *ws = NULL; |
786 | struct sisl_rht_entry *rhte; |
787 | |
788 | ctxi = kzalloc(size: sizeof(*ctxi), GFP_KERNEL); |
789 | lli = kzalloc(size: (MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); |
790 | ws = kzalloc(size: (MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); |
791 | if (unlikely(!ctxi || !lli || !ws)) { |
792 | dev_err(dev, "%s: Unable to allocate context\n" , __func__); |
793 | goto err; |
794 | } |
795 | |
796 | rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); |
797 | if (unlikely(!rhte)) { |
798 | dev_err(dev, "%s: Unable to allocate RHT\n" , __func__); |
799 | goto err; |
800 | } |
801 | |
802 | ctxi->rht_lun = lli; |
803 | ctxi->rht_needs_ws = ws; |
804 | ctxi->rht_start = rhte; |
805 | out: |
806 | return ctxi; |
807 | |
808 | err: |
809 | kfree(objp: ws); |
810 | kfree(objp: lli); |
811 | kfree(objp: ctxi); |
812 | ctxi = NULL; |
813 | goto out; |
814 | } |
815 | |
816 | /** |
817 | * init_context() - initializes a previously allocated context |
818 | * @ctxi: Previously allocated context |
819 | * @cfg: Internal structure associated with the host. |
820 | * @ctx: Previously obtained context cookie. |
821 | * @ctxid: Previously obtained process element associated with CXL context. |
822 | * @file: Previously obtained file associated with CXL context. |
823 | * @perms: User-specified permissions. |
824 | * @irqs: User-specified number of interrupts. |
825 | */ |
826 | static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, |
827 | void *ctx, int ctxid, struct file *file, u32 perms, |
828 | u64 irqs) |
829 | { |
830 | struct afu *afu = cfg->afu; |
831 | |
832 | ctxi->rht_perms = perms; |
833 | ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; |
834 | ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); |
835 | ctxi->irqs = irqs; |
836 | ctxi->pid = task_tgid_nr(current); /* tgid = pid */ |
837 | ctxi->ctx = ctx; |
838 | ctxi->cfg = cfg; |
839 | ctxi->file = file; |
840 | ctxi->initialized = true; |
841 | mutex_init(&ctxi->mutex); |
842 | kref_init(kref: &ctxi->kref); |
843 | INIT_LIST_HEAD(list: &ctxi->luns); |
844 | INIT_LIST_HEAD(list: &ctxi->list); /* initialize for list_empty() */ |
845 | } |
846 | |
847 | /** |
848 | * remove_context() - context kref release handler |
849 | * @kref: Kernel reference associated with context to be removed. |
850 | * |
851 | * When a context no longer has any references it can safely be removed |
852 | * from global access and destroyed. Note that it is assumed the thread |
853 | * relinquishing access to the context holds its mutex. |
854 | */ |
855 | static void remove_context(struct kref *kref) |
856 | { |
857 | struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref); |
858 | struct cxlflash_cfg *cfg = ctxi->cfg; |
859 | u64 ctxid = DECODE_CTXID(ctxi->ctxid); |
860 | |
861 | /* Remove context from table/error list */ |
862 | WARN_ON(!mutex_is_locked(&ctxi->mutex)); |
863 | ctxi->unavail = true; |
864 | mutex_unlock(lock: &ctxi->mutex); |
865 | mutex_lock(&cfg->ctx_tbl_list_mutex); |
866 | mutex_lock(&ctxi->mutex); |
867 | |
868 | if (!list_empty(head: &ctxi->list)) |
869 | list_del(entry: &ctxi->list); |
870 | cfg->ctx_tbl[ctxid] = NULL; |
871 | mutex_unlock(lock: &cfg->ctx_tbl_list_mutex); |
872 | mutex_unlock(lock: &ctxi->mutex); |
873 | |
874 | /* Context now completely uncoupled/unreachable */ |
875 | destroy_context(cfg, ctxi); |
876 | } |
877 | |
878 | /** |
879 | * _cxlflash_disk_detach() - detaches a LUN from a context |
880 | * @sdev: SCSI device associated with LUN. |
881 | * @ctxi: Context owning resources. |
882 | * @detach: Detach ioctl data structure. |
883 | * |
884 | * As part of the detach, all per-context resources associated with the LUN |
885 | * are cleaned up. When detaching the last LUN for a context, the context |
886 | * itself is cleaned up and released. |
887 | * |
888 | * Return: 0 on success, -errno on failure |
889 | */ |
890 | static int _cxlflash_disk_detach(struct scsi_device *sdev, |
891 | struct ctx_info *ctxi, |
892 | struct dk_cxlflash_detach *detach) |
893 | { |
894 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
895 | struct device *dev = &cfg->dev->dev; |
896 | struct llun_info *lli = sdev->hostdata; |
897 | struct lun_access *lun_access, *t; |
898 | struct dk_cxlflash_release rel; |
899 | bool put_ctx = false; |
900 | |
901 | int i; |
902 | int rc = 0; |
903 | u64 ctxid = DECODE_CTXID(detach->context_id), |
904 | rctxid = detach->context_id; |
905 | |
906 | dev_dbg(dev, "%s: ctxid=%llu\n" , __func__, ctxid); |
907 | |
908 | if (!ctxi) { |
909 | ctxi = get_context(cfg, rctxid, arg: lli, ctx_ctrl: CTX_CTRL_ERR_FALLBACK); |
910 | if (unlikely(!ctxi)) { |
911 | dev_dbg(dev, "%s: Bad context ctxid=%llu\n" , |
912 | __func__, ctxid); |
913 | rc = -EINVAL; |
914 | goto out; |
915 | } |
916 | |
917 | put_ctx = true; |
918 | } |
919 | |
920 | /* Cleanup outstanding resources tied to this LUN */ |
921 | if (ctxi->rht_out) { |
922 | marshal_det_to_rele(detach, release: &rel); |
923 | for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { |
924 | if (ctxi->rht_lun[i] == lli) { |
925 | rel.rsrc_handle = i; |
926 | _cxlflash_disk_release(sdev, ctxi, release: &rel); |
927 | } |
928 | |
929 | /* No need to loop further if we're done */ |
930 | if (ctxi->rht_out == 0) |
931 | break; |
932 | } |
933 | } |
934 | |
935 | /* Take our LUN out of context, free the node */ |
936 | list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) |
937 | if (lun_access->lli == lli) { |
938 | list_del(entry: &lun_access->list); |
939 | kfree(objp: lun_access); |
940 | lun_access = NULL; |
941 | break; |
942 | } |
943 | |
944 | /* |
945 | * Release the context reference and the sdev reference that |
946 | * bound this LUN to the context. |
947 | */ |
948 | if (kref_put(kref: &ctxi->kref, release: remove_context)) |
949 | put_ctx = false; |
950 | scsi_device_put(sdev); |
951 | out: |
952 | if (put_ctx) |
953 | put_context(ctxi); |
954 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
955 | return rc; |
956 | } |
957 | |
958 | static int cxlflash_disk_detach(struct scsi_device *sdev, |
959 | struct dk_cxlflash_detach *detach) |
960 | { |
961 | return _cxlflash_disk_detach(sdev, NULL, detach); |
962 | } |
963 | |
964 | /** |
965 | * cxlflash_cxl_release() - release handler for adapter file descriptor |
966 | * @inode: File-system inode associated with fd. |
967 | * @file: File installed with adapter file descriptor. |
968 | * |
969 | * This routine is the release handler for the fops registered with |
970 | * the CXL services on an initial attach for a context. It is called |
971 | * when a close (explicity by the user or as part of a process tear |
972 | * down) is performed on the adapter file descriptor returned to the |
973 | * user. The user should be aware that explicitly performing a close |
974 | * considered catastrophic and subsequent usage of the superpipe API |
975 | * with previously saved off tokens will fail. |
976 | * |
977 | * This routine derives the context reference and calls detach for |
978 | * each LUN associated with the context.The final detach operation |
979 | * causes the context itself to be freed. With exception to when the |
980 | * CXL process element (context id) lookup fails (a case that should |
981 | * theoretically never occur), every call into this routine results |
982 | * in a complete freeing of a context. |
983 | * |
984 | * Detaching the LUN is typically an ioctl() operation and the underlying |
985 | * code assumes that ioctl_rwsem has been acquired as a reader. To support |
986 | * that design point, the semaphore is acquired and released around detach. |
987 | * |
988 | * Return: 0 on success |
989 | */ |
990 | static int cxlflash_cxl_release(struct inode *inode, struct file *file) |
991 | { |
992 | struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, |
993 | cxl_fops); |
994 | void *ctx = cfg->ops->fops_get_context(file); |
995 | struct device *dev = &cfg->dev->dev; |
996 | struct ctx_info *ctxi = NULL; |
997 | struct dk_cxlflash_detach detach = { { 0 }, 0 }; |
998 | struct lun_access *lun_access, *t; |
999 | enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; |
1000 | int ctxid; |
1001 | |
1002 | ctxid = cfg->ops->process_element(ctx); |
1003 | if (unlikely(ctxid < 0)) { |
1004 | dev_err(dev, "%s: Context %p was closed ctxid=%d\n" , |
1005 | __func__, ctx, ctxid); |
1006 | goto out; |
1007 | } |
1008 | |
1009 | ctxi = get_context(cfg, rctxid: ctxid, arg: file, ctx_ctrl: ctrl); |
1010 | if (unlikely(!ctxi)) { |
1011 | ctxi = get_context(cfg, rctxid: ctxid, arg: file, ctx_ctrl: ctrl | CTX_CTRL_CLONE); |
1012 | if (!ctxi) { |
1013 | dev_dbg(dev, "%s: ctxid=%d already free\n" , |
1014 | __func__, ctxid); |
1015 | goto out_release; |
1016 | } |
1017 | |
1018 | dev_dbg(dev, "%s: Another process owns ctxid=%d\n" , |
1019 | __func__, ctxid); |
1020 | put_context(ctxi); |
1021 | goto out; |
1022 | } |
1023 | |
1024 | dev_dbg(dev, "%s: close for ctxid=%d\n" , __func__, ctxid); |
1025 | |
1026 | down_read(sem: &cfg->ioctl_rwsem); |
1027 | detach.context_id = ctxi->ctxid; |
1028 | list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) |
1029 | _cxlflash_disk_detach(sdev: lun_access->sdev, ctxi, detach: &detach); |
1030 | up_read(sem: &cfg->ioctl_rwsem); |
1031 | out_release: |
1032 | cfg->ops->fd_release(inode, file); |
1033 | out: |
1034 | dev_dbg(dev, "%s: returning\n" , __func__); |
1035 | return 0; |
1036 | } |
1037 | |
1038 | /** |
1039 | * unmap_context() - clears a previously established mapping |
1040 | * @ctxi: Context owning the mapping. |
1041 | * |
1042 | * This routine is used to switch between the error notification page |
1043 | * (dummy page of all 1's) and the real mapping (established by the CXL |
1044 | * fault handler). |
1045 | */ |
1046 | static void unmap_context(struct ctx_info *ctxi) |
1047 | { |
1048 | unmap_mapping_range(mapping: ctxi->file->f_mapping, holebegin: 0, holelen: 0, even_cows: 1); |
1049 | } |
1050 | |
1051 | /** |
1052 | * get_err_page() - obtains and allocates the error notification page |
1053 | * @cfg: Internal structure associated with the host. |
1054 | * |
1055 | * Return: error notification page on success, NULL on failure |
1056 | */ |
1057 | static struct page *get_err_page(struct cxlflash_cfg *cfg) |
1058 | { |
1059 | struct page *err_page = global.err_page; |
1060 | struct device *dev = &cfg->dev->dev; |
1061 | |
1062 | if (unlikely(!err_page)) { |
1063 | err_page = alloc_page(GFP_KERNEL); |
1064 | if (unlikely(!err_page)) { |
1065 | dev_err(dev, "%s: Unable to allocate err_page\n" , |
1066 | __func__); |
1067 | goto out; |
1068 | } |
1069 | |
1070 | memset(page_address(err_page), -1, PAGE_SIZE); |
1071 | |
1072 | /* Serialize update w/ other threads to avoid a leak */ |
1073 | mutex_lock(&global.mutex); |
1074 | if (likely(!global.err_page)) |
1075 | global.err_page = err_page; |
1076 | else { |
1077 | __free_page(err_page); |
1078 | err_page = global.err_page; |
1079 | } |
1080 | mutex_unlock(lock: &global.mutex); |
1081 | } |
1082 | |
1083 | out: |
1084 | dev_dbg(dev, "%s: returning err_page=%p\n" , __func__, err_page); |
1085 | return err_page; |
1086 | } |
1087 | |
1088 | /** |
1089 | * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor |
1090 | * @vmf: VM fault associated with current fault. |
1091 | * |
1092 | * To support error notification via MMIO, faults are 'caught' by this routine |
1093 | * that was inserted before passing back the adapter file descriptor on attach. |
1094 | * When a fault occurs, this routine evaluates if error recovery is active and |
1095 | * if so, installs the error page to 'notify' the user about the error state. |
1096 | * During normal operation, the fault is simply handled by the original fault |
1097 | * handler that was installed by CXL services as part of initializing the |
1098 | * adapter file descriptor. The VMA's page protection bits are toggled to |
1099 | * indicate cached/not-cached depending on the memory backing the fault. |
1100 | * |
1101 | * Return: 0 on success, VM_FAULT_SIGBUS on failure |
1102 | */ |
1103 | static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf) |
1104 | { |
1105 | struct vm_area_struct *vma = vmf->vma; |
1106 | struct file *file = vma->vm_file; |
1107 | struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, |
1108 | cxl_fops); |
1109 | void *ctx = cfg->ops->fops_get_context(file); |
1110 | struct device *dev = &cfg->dev->dev; |
1111 | struct ctx_info *ctxi = NULL; |
1112 | struct page *err_page = NULL; |
1113 | enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; |
1114 | vm_fault_t rc = 0; |
1115 | int ctxid; |
1116 | |
1117 | ctxid = cfg->ops->process_element(ctx); |
1118 | if (unlikely(ctxid < 0)) { |
1119 | dev_err(dev, "%s: Context %p was closed ctxid=%d\n" , |
1120 | __func__, ctx, ctxid); |
1121 | goto err; |
1122 | } |
1123 | |
1124 | ctxi = get_context(cfg, rctxid: ctxid, arg: file, ctx_ctrl: ctrl); |
1125 | if (unlikely(!ctxi)) { |
1126 | dev_dbg(dev, "%s: Bad context ctxid=%d\n" , __func__, ctxid); |
1127 | goto err; |
1128 | } |
1129 | |
1130 | dev_dbg(dev, "%s: fault for context %d\n" , __func__, ctxid); |
1131 | |
1132 | if (likely(!ctxi->err_recovery_active)) { |
1133 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1134 | rc = ctxi->cxl_mmap_vmops->fault(vmf); |
1135 | } else { |
1136 | dev_dbg(dev, "%s: err recovery active, use err_page\n" , |
1137 | __func__); |
1138 | |
1139 | err_page = get_err_page(cfg); |
1140 | if (unlikely(!err_page)) { |
1141 | dev_err(dev, "%s: Could not get err_page\n" , __func__); |
1142 | rc = VM_FAULT_RETRY; |
1143 | goto out; |
1144 | } |
1145 | |
1146 | get_page(page: err_page); |
1147 | vmf->page = err_page; |
1148 | vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); |
1149 | } |
1150 | |
1151 | out: |
1152 | if (likely(ctxi)) |
1153 | put_context(ctxi); |
1154 | dev_dbg(dev, "%s: returning rc=%x\n" , __func__, rc); |
1155 | return rc; |
1156 | |
1157 | err: |
1158 | rc = VM_FAULT_SIGBUS; |
1159 | goto out; |
1160 | } |
1161 | |
1162 | /* |
1163 | * Local MMAP vmops to 'catch' faults |
1164 | */ |
1165 | static const struct vm_operations_struct cxlflash_mmap_vmops = { |
1166 | .fault = cxlflash_mmap_fault, |
1167 | }; |
1168 | |
1169 | /** |
1170 | * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor |
1171 | * @file: File installed with adapter file descriptor. |
1172 | * @vma: VM area associated with mapping. |
1173 | * |
1174 | * Installs local mmap vmops to 'catch' faults for error notification support. |
1175 | * |
1176 | * Return: 0 on success, -errno on failure |
1177 | */ |
1178 | static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) |
1179 | { |
1180 | struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, |
1181 | cxl_fops); |
1182 | void *ctx = cfg->ops->fops_get_context(file); |
1183 | struct device *dev = &cfg->dev->dev; |
1184 | struct ctx_info *ctxi = NULL; |
1185 | enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; |
1186 | int ctxid; |
1187 | int rc = 0; |
1188 | |
1189 | ctxid = cfg->ops->process_element(ctx); |
1190 | if (unlikely(ctxid < 0)) { |
1191 | dev_err(dev, "%s: Context %p was closed ctxid=%d\n" , |
1192 | __func__, ctx, ctxid); |
1193 | rc = -EIO; |
1194 | goto out; |
1195 | } |
1196 | |
1197 | ctxi = get_context(cfg, rctxid: ctxid, arg: file, ctx_ctrl: ctrl); |
1198 | if (unlikely(!ctxi)) { |
1199 | dev_dbg(dev, "%s: Bad context ctxid=%d\n" , __func__, ctxid); |
1200 | rc = -EIO; |
1201 | goto out; |
1202 | } |
1203 | |
1204 | dev_dbg(dev, "%s: mmap for context %d\n" , __func__, ctxid); |
1205 | |
1206 | rc = cfg->ops->fd_mmap(file, vma); |
1207 | if (likely(!rc)) { |
1208 | /* Insert ourself in the mmap fault handler path */ |
1209 | ctxi->cxl_mmap_vmops = vma->vm_ops; |
1210 | vma->vm_ops = &cxlflash_mmap_vmops; |
1211 | } |
1212 | |
1213 | out: |
1214 | if (likely(ctxi)) |
1215 | put_context(ctxi); |
1216 | return rc; |
1217 | } |
1218 | |
1219 | const struct file_operations cxlflash_cxl_fops = { |
1220 | .owner = THIS_MODULE, |
1221 | .mmap = cxlflash_cxl_mmap, |
1222 | .release = cxlflash_cxl_release, |
1223 | }; |
1224 | |
1225 | /** |
1226 | * cxlflash_mark_contexts_error() - move contexts to error state and list |
1227 | * @cfg: Internal structure associated with the host. |
1228 | * |
1229 | * A context is only moved over to the error list when there are no outstanding |
1230 | * references to it. This ensures that a running operation has completed. |
1231 | * |
1232 | * Return: 0 on success, -errno on failure |
1233 | */ |
1234 | int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg) |
1235 | { |
1236 | int i, rc = 0; |
1237 | struct ctx_info *ctxi = NULL; |
1238 | |
1239 | mutex_lock(&cfg->ctx_tbl_list_mutex); |
1240 | |
1241 | for (i = 0; i < MAX_CONTEXT; i++) { |
1242 | ctxi = cfg->ctx_tbl[i]; |
1243 | if (ctxi) { |
1244 | mutex_lock(&ctxi->mutex); |
1245 | cfg->ctx_tbl[i] = NULL; |
1246 | list_add(new: &ctxi->list, head: &cfg->ctx_err_recovery); |
1247 | ctxi->err_recovery_active = true; |
1248 | ctxi->ctrl_map = NULL; |
1249 | unmap_context(ctxi); |
1250 | mutex_unlock(lock: &ctxi->mutex); |
1251 | } |
1252 | } |
1253 | |
1254 | mutex_unlock(lock: &cfg->ctx_tbl_list_mutex); |
1255 | return rc; |
1256 | } |
1257 | |
1258 | /* |
1259 | * Dummy NULL fops |
1260 | */ |
1261 | static const struct file_operations null_fops = { |
1262 | .owner = THIS_MODULE, |
1263 | }; |
1264 | |
1265 | /** |
1266 | * check_state() - checks and responds to the current adapter state |
1267 | * @cfg: Internal structure associated with the host. |
1268 | * |
1269 | * This routine can block and should only be used on process context. |
1270 | * It assumes that the caller is an ioctl thread and holding the ioctl |
1271 | * read semaphore. This is temporarily let up across the wait to allow |
1272 | * for draining actively running ioctls. Also note that when waking up |
1273 | * from waiting in reset, the state is unknown and must be checked again |
1274 | * before proceeding. |
1275 | * |
1276 | * Return: 0 on success, -errno on failure |
1277 | */ |
1278 | int check_state(struct cxlflash_cfg *cfg) |
1279 | { |
1280 | struct device *dev = &cfg->dev->dev; |
1281 | int rc = 0; |
1282 | |
1283 | retry: |
1284 | switch (cfg->state) { |
1285 | case STATE_RESET: |
1286 | dev_dbg(dev, "%s: Reset state, going to wait...\n" , __func__); |
1287 | up_read(sem: &cfg->ioctl_rwsem); |
1288 | rc = wait_event_interruptible(cfg->reset_waitq, |
1289 | cfg->state != STATE_RESET); |
1290 | down_read(sem: &cfg->ioctl_rwsem); |
1291 | if (unlikely(rc)) |
1292 | break; |
1293 | goto retry; |
1294 | case STATE_FAILTERM: |
1295 | dev_dbg(dev, "%s: Failed/Terminating\n" , __func__); |
1296 | rc = -ENODEV; |
1297 | break; |
1298 | default: |
1299 | break; |
1300 | } |
1301 | |
1302 | return rc; |
1303 | } |
1304 | |
1305 | /** |
1306 | * cxlflash_disk_attach() - attach a LUN to a context |
1307 | * @sdev: SCSI device associated with LUN. |
1308 | * @attach: Attach ioctl data structure. |
1309 | * |
1310 | * Creates a context and attaches LUN to it. A LUN can only be attached |
1311 | * one time to a context (subsequent attaches for the same context/LUN pair |
1312 | * are not supported). Additional LUNs can be attached to a context by |
1313 | * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header. |
1314 | * |
1315 | * Return: 0 on success, -errno on failure |
1316 | */ |
1317 | static int cxlflash_disk_attach(struct scsi_device *sdev, |
1318 | struct dk_cxlflash_attach *attach) |
1319 | { |
1320 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
1321 | struct device *dev = &cfg->dev->dev; |
1322 | struct afu *afu = cfg->afu; |
1323 | struct llun_info *lli = sdev->hostdata; |
1324 | struct glun_info *gli = lli->parent; |
1325 | struct ctx_info *ctxi = NULL; |
1326 | struct lun_access *lun_access = NULL; |
1327 | int rc = 0; |
1328 | u32 perms; |
1329 | int ctxid = -1; |
1330 | u64 irqs = attach->num_interrupts; |
1331 | u64 flags = 0UL; |
1332 | u64 rctxid = 0UL; |
1333 | struct file *file = NULL; |
1334 | |
1335 | void *ctx = NULL; |
1336 | |
1337 | int fd = -1; |
1338 | |
1339 | if (irqs > 4) { |
1340 | dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n" , |
1341 | __func__, irqs); |
1342 | rc = -EINVAL; |
1343 | goto out; |
1344 | } |
1345 | |
1346 | if (gli->max_lba == 0) { |
1347 | dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n" , |
1348 | __func__, lli->lun_id[sdev->channel]); |
1349 | rc = read_cap16(sdev, lli); |
1350 | if (rc) { |
1351 | dev_err(dev, "%s: Invalid device rc=%d\n" , |
1352 | __func__, rc); |
1353 | rc = -ENODEV; |
1354 | goto out; |
1355 | } |
1356 | dev_dbg(dev, "%s: LBA = %016llx\n" , __func__, gli->max_lba); |
1357 | dev_dbg(dev, "%s: BLK_LEN = %08x\n" , __func__, gli->blk_len); |
1358 | } |
1359 | |
1360 | if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { |
1361 | rctxid = attach->context_id; |
1362 | ctxi = get_context(cfg, rctxid, NULL, ctx_ctrl: 0); |
1363 | if (!ctxi) { |
1364 | dev_dbg(dev, "%s: Bad context rctxid=%016llx\n" , |
1365 | __func__, rctxid); |
1366 | rc = -EINVAL; |
1367 | goto out; |
1368 | } |
1369 | |
1370 | list_for_each_entry(lun_access, &ctxi->luns, list) |
1371 | if (lun_access->lli == lli) { |
1372 | dev_dbg(dev, "%s: Already attached\n" , |
1373 | __func__); |
1374 | rc = -EINVAL; |
1375 | goto out; |
1376 | } |
1377 | } |
1378 | |
1379 | rc = scsi_device_get(sdev); |
1380 | if (unlikely(rc)) { |
1381 | dev_err(dev, "%s: Unable to get sdev reference\n" , __func__); |
1382 | goto out; |
1383 | } |
1384 | |
1385 | lun_access = kzalloc(size: sizeof(*lun_access), GFP_KERNEL); |
1386 | if (unlikely(!lun_access)) { |
1387 | dev_err(dev, "%s: Unable to allocate lun_access\n" , __func__); |
1388 | rc = -ENOMEM; |
1389 | goto err; |
1390 | } |
1391 | |
1392 | lun_access->lli = lli; |
1393 | lun_access->sdev = sdev; |
1394 | |
1395 | /* Non-NULL context indicates reuse (another context reference) */ |
1396 | if (ctxi) { |
1397 | dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n" , |
1398 | __func__, rctxid); |
1399 | kref_get(kref: &ctxi->kref); |
1400 | list_add(new: &lun_access->list, head: &ctxi->luns); |
1401 | goto out_attach; |
1402 | } |
1403 | |
1404 | ctxi = create_context(cfg); |
1405 | if (unlikely(!ctxi)) { |
1406 | dev_err(dev, "%s: Failed to create context ctxid=%d\n" , |
1407 | __func__, ctxid); |
1408 | rc = -ENOMEM; |
1409 | goto err; |
1410 | } |
1411 | |
1412 | ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); |
1413 | if (IS_ERR_OR_NULL(ptr: ctx)) { |
1414 | dev_err(dev, "%s: Could not initialize context %p\n" , |
1415 | __func__, ctx); |
1416 | rc = -ENODEV; |
1417 | goto err; |
1418 | } |
1419 | |
1420 | rc = cfg->ops->start_work(ctx, irqs); |
1421 | if (unlikely(rc)) { |
1422 | dev_dbg(dev, "%s: Could not start context rc=%d\n" , |
1423 | __func__, rc); |
1424 | goto err; |
1425 | } |
1426 | |
1427 | ctxid = cfg->ops->process_element(ctx); |
1428 | if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { |
1429 | dev_err(dev, "%s: ctxid=%d invalid\n" , __func__, ctxid); |
1430 | rc = -EPERM; |
1431 | goto err; |
1432 | } |
1433 | |
1434 | file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); |
1435 | if (unlikely(fd < 0)) { |
1436 | rc = -ENODEV; |
1437 | dev_err(dev, "%s: Could not get file descriptor\n" , __func__); |
1438 | goto err; |
1439 | } |
1440 | |
1441 | /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ |
1442 | perms = SISL_RHT_PERM(attach->hdr.flags + 1); |
1443 | |
1444 | /* Context mutex is locked upon return */ |
1445 | init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs); |
1446 | |
1447 | rc = afu_attach(cfg, ctxi); |
1448 | if (unlikely(rc)) { |
1449 | dev_err(dev, "%s: Could not attach AFU rc %d\n" , __func__, rc); |
1450 | goto err; |
1451 | } |
1452 | |
1453 | /* |
1454 | * No error paths after this point. Once the fd is installed it's |
1455 | * visible to user space and can't be undone safely on this thread. |
1456 | * There is no need to worry about a deadlock here because no one |
1457 | * knows about us yet; we can be the only one holding our mutex. |
1458 | */ |
1459 | list_add(new: &lun_access->list, head: &ctxi->luns); |
1460 | mutex_lock(&cfg->ctx_tbl_list_mutex); |
1461 | mutex_lock(&ctxi->mutex); |
1462 | cfg->ctx_tbl[ctxid] = ctxi; |
1463 | mutex_unlock(lock: &cfg->ctx_tbl_list_mutex); |
1464 | fd_install(fd, file); |
1465 | |
1466 | out_attach: |
1467 | if (fd != -1) |
1468 | flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD; |
1469 | if (afu_is_sq_cmd_mode(afu)) |
1470 | flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; |
1471 | |
1472 | attach->hdr.return_flags = flags; |
1473 | attach->context_id = ctxi->ctxid; |
1474 | attach->block_size = gli->blk_len; |
1475 | attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); |
1476 | attach->last_lba = gli->max_lba; |
1477 | attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT; |
1478 | attach->max_xfer /= gli->blk_len; |
1479 | |
1480 | out: |
1481 | attach->adap_fd = fd; |
1482 | |
1483 | if (ctxi) |
1484 | put_context(ctxi); |
1485 | |
1486 | dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n" , |
1487 | __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); |
1488 | return rc; |
1489 | |
1490 | err: |
1491 | /* Cleanup CXL context; okay to 'stop' even if it was not started */ |
1492 | if (!IS_ERR_OR_NULL(ptr: ctx)) { |
1493 | cfg->ops->stop_context(ctx); |
1494 | cfg->ops->release_context(ctx); |
1495 | ctx = NULL; |
1496 | } |
1497 | |
1498 | /* |
1499 | * Here, we're overriding the fops with a dummy all-NULL fops because |
1500 | * fput() calls the release fop, which will cause us to mistakenly |
1501 | * call into the CXL code. Rather than try to add yet more complexity |
1502 | * to that routine (cxlflash_cxl_release) we should try to fix the |
1503 | * issue here. |
1504 | */ |
1505 | if (fd > 0) { |
1506 | file->f_op = &null_fops; |
1507 | fput(file); |
1508 | put_unused_fd(fd); |
1509 | fd = -1; |
1510 | file = NULL; |
1511 | } |
1512 | |
1513 | /* Cleanup our context */ |
1514 | if (ctxi) { |
1515 | destroy_context(cfg, ctxi); |
1516 | ctxi = NULL; |
1517 | } |
1518 | |
1519 | kfree(objp: lun_access); |
1520 | scsi_device_put(sdev); |
1521 | goto out; |
1522 | } |
1523 | |
1524 | /** |
1525 | * recover_context() - recovers a context in error |
1526 | * @cfg: Internal structure associated with the host. |
1527 | * @ctxi: Context to release. |
1528 | * @adap_fd: Adapter file descriptor associated with new/recovered context. |
1529 | * |
1530 | * Restablishes the state for a context-in-error. |
1531 | * |
1532 | * Return: 0 on success, -errno on failure |
1533 | */ |
1534 | static int recover_context(struct cxlflash_cfg *cfg, |
1535 | struct ctx_info *ctxi, |
1536 | int *adap_fd) |
1537 | { |
1538 | struct device *dev = &cfg->dev->dev; |
1539 | int rc = 0; |
1540 | int fd = -1; |
1541 | int ctxid = -1; |
1542 | struct file *file; |
1543 | void *ctx; |
1544 | struct afu *afu = cfg->afu; |
1545 | |
1546 | ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); |
1547 | if (IS_ERR_OR_NULL(ptr: ctx)) { |
1548 | dev_err(dev, "%s: Could not initialize context %p\n" , |
1549 | __func__, ctx); |
1550 | rc = -ENODEV; |
1551 | goto out; |
1552 | } |
1553 | |
1554 | rc = cfg->ops->start_work(ctx, ctxi->irqs); |
1555 | if (unlikely(rc)) { |
1556 | dev_dbg(dev, "%s: Could not start context rc=%d\n" , |
1557 | __func__, rc); |
1558 | goto err1; |
1559 | } |
1560 | |
1561 | ctxid = cfg->ops->process_element(ctx); |
1562 | if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { |
1563 | dev_err(dev, "%s: ctxid=%d invalid\n" , __func__, ctxid); |
1564 | rc = -EPERM; |
1565 | goto err2; |
1566 | } |
1567 | |
1568 | file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); |
1569 | if (unlikely(fd < 0)) { |
1570 | rc = -ENODEV; |
1571 | dev_err(dev, "%s: Could not get file descriptor\n" , __func__); |
1572 | goto err2; |
1573 | } |
1574 | |
1575 | /* Update with new MMIO area based on updated context id */ |
1576 | ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; |
1577 | |
1578 | rc = afu_attach(cfg, ctxi); |
1579 | if (rc) { |
1580 | dev_err(dev, "%s: Could not attach AFU rc %d\n" , __func__, rc); |
1581 | goto err3; |
1582 | } |
1583 | |
1584 | /* |
1585 | * No error paths after this point. Once the fd is installed it's |
1586 | * visible to user space and can't be undone safely on this thread. |
1587 | */ |
1588 | ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); |
1589 | ctxi->ctx = ctx; |
1590 | ctxi->file = file; |
1591 | |
1592 | /* |
1593 | * Put context back in table (note the reinit of the context list); |
1594 | * we must first drop the context's mutex and then acquire it in |
1595 | * order with the table/list mutex to avoid a deadlock - safe to do |
1596 | * here because no one can find us at this moment in time. |
1597 | */ |
1598 | mutex_unlock(lock: &ctxi->mutex); |
1599 | mutex_lock(&cfg->ctx_tbl_list_mutex); |
1600 | mutex_lock(&ctxi->mutex); |
1601 | list_del_init(entry: &ctxi->list); |
1602 | cfg->ctx_tbl[ctxid] = ctxi; |
1603 | mutex_unlock(lock: &cfg->ctx_tbl_list_mutex); |
1604 | fd_install(fd, file); |
1605 | *adap_fd = fd; |
1606 | out: |
1607 | dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n" , |
1608 | __func__, ctxid, fd, rc); |
1609 | return rc; |
1610 | |
1611 | err3: |
1612 | fput(file); |
1613 | put_unused_fd(fd); |
1614 | err2: |
1615 | cfg->ops->stop_context(ctx); |
1616 | err1: |
1617 | cfg->ops->release_context(ctx); |
1618 | goto out; |
1619 | } |
1620 | |
1621 | /** |
1622 | * cxlflash_afu_recover() - initiates AFU recovery |
1623 | * @sdev: SCSI device associated with LUN. |
1624 | * @recover: Recover ioctl data structure. |
1625 | * |
1626 | * Only a single recovery is allowed at a time to avoid exhausting CXL |
1627 | * resources (leading to recovery failure) in the event that we're up |
1628 | * against the maximum number of contexts limit. For similar reasons, |
1629 | * a context recovery is retried if there are multiple recoveries taking |
1630 | * place at the same time and the failure was due to CXL services being |
1631 | * unable to keep up. |
1632 | * |
1633 | * As this routine is called on ioctl context, it holds the ioctl r/w |
1634 | * semaphore that is used to drain ioctls in recovery scenarios. The |
1635 | * implementation to achieve the pacing described above (a local mutex) |
1636 | * requires that the ioctl r/w semaphore be dropped and reacquired to |
1637 | * avoid a 3-way deadlock when multiple process recoveries operate in |
1638 | * parallel. |
1639 | * |
1640 | * Because a user can detect an error condition before the kernel, it is |
1641 | * quite possible for this routine to act as the kernel's EEH detection |
1642 | * source (MMIO read of mbox_r). Because of this, there is a window of |
1643 | * time where an EEH might have been detected but not yet 'serviced' |
1644 | * (callback invoked, causing the device to enter reset state). To avoid |
1645 | * looping in this routine during that window, a 1 second sleep is in place |
1646 | * between the time the MMIO failure is detected and the time a wait on the |
1647 | * reset wait queue is attempted via check_state(). |
1648 | * |
1649 | * Return: 0 on success, -errno on failure |
1650 | */ |
1651 | static int cxlflash_afu_recover(struct scsi_device *sdev, |
1652 | struct dk_cxlflash_recover_afu *recover) |
1653 | { |
1654 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
1655 | struct device *dev = &cfg->dev->dev; |
1656 | struct llun_info *lli = sdev->hostdata; |
1657 | struct afu *afu = cfg->afu; |
1658 | struct ctx_info *ctxi = NULL; |
1659 | struct mutex *mutex = &cfg->ctx_recovery_mutex; |
1660 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
1661 | u64 flags; |
1662 | u64 ctxid = DECODE_CTXID(recover->context_id), |
1663 | rctxid = recover->context_id; |
1664 | long reg; |
1665 | bool locked = true; |
1666 | int lretry = 20; /* up to 2 seconds */ |
1667 | int new_adap_fd = -1; |
1668 | int rc = 0; |
1669 | |
1670 | atomic_inc(v: &cfg->recovery_threads); |
1671 | up_read(sem: &cfg->ioctl_rwsem); |
1672 | rc = mutex_lock_interruptible(mutex); |
1673 | down_read(sem: &cfg->ioctl_rwsem); |
1674 | if (rc) { |
1675 | locked = false; |
1676 | goto out; |
1677 | } |
1678 | |
1679 | rc = check_state(cfg); |
1680 | if (rc) { |
1681 | dev_err(dev, "%s: Failed state rc=%d\n" , __func__, rc); |
1682 | rc = -ENODEV; |
1683 | goto out; |
1684 | } |
1685 | |
1686 | dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n" , |
1687 | __func__, recover->reason, rctxid); |
1688 | |
1689 | retry: |
1690 | /* Ensure that this process is attached to the context */ |
1691 | ctxi = get_context(cfg, rctxid, arg: lli, ctx_ctrl: CTX_CTRL_ERR_FALLBACK); |
1692 | if (unlikely(!ctxi)) { |
1693 | dev_dbg(dev, "%s: Bad context ctxid=%llu\n" , __func__, ctxid); |
1694 | rc = -EINVAL; |
1695 | goto out; |
1696 | } |
1697 | |
1698 | if (ctxi->err_recovery_active) { |
1699 | retry_recover: |
1700 | rc = recover_context(cfg, ctxi, adap_fd: &new_adap_fd); |
1701 | if (unlikely(rc)) { |
1702 | dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n" , |
1703 | __func__, ctxid, rc); |
1704 | if ((rc == -ENODEV) && |
1705 | ((atomic_read(v: &cfg->recovery_threads) > 1) || |
1706 | (lretry--))) { |
1707 | dev_dbg(dev, "%s: Going to try again\n" , |
1708 | __func__); |
1709 | mutex_unlock(lock: mutex); |
1710 | msleep(msecs: 100); |
1711 | rc = mutex_lock_interruptible(mutex); |
1712 | if (rc) { |
1713 | locked = false; |
1714 | goto out; |
1715 | } |
1716 | goto retry_recover; |
1717 | } |
1718 | |
1719 | goto out; |
1720 | } |
1721 | |
1722 | ctxi->err_recovery_active = false; |
1723 | |
1724 | flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | |
1725 | DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; |
1726 | if (afu_is_sq_cmd_mode(afu)) |
1727 | flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; |
1728 | |
1729 | recover->hdr.return_flags = flags; |
1730 | recover->context_id = ctxi->ctxid; |
1731 | recover->adap_fd = new_adap_fd; |
1732 | recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); |
1733 | goto out; |
1734 | } |
1735 | |
1736 | /* Test if in error state */ |
1737 | reg = readq_be(&hwq->ctrl_map->mbox_r); |
1738 | if (reg == -1) { |
1739 | dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n" , __func__); |
1740 | |
1741 | /* |
1742 | * Before checking the state, put back the context obtained with |
1743 | * get_context() as it is no longer needed and sleep for a short |
1744 | * period of time (see prolog notes). |
1745 | */ |
1746 | put_context(ctxi); |
1747 | ctxi = NULL; |
1748 | ssleep(seconds: 1); |
1749 | rc = check_state(cfg); |
1750 | if (unlikely(rc)) |
1751 | goto out; |
1752 | goto retry; |
1753 | } |
1754 | |
1755 | dev_dbg(dev, "%s: MMIO working, no recovery required\n" , __func__); |
1756 | out: |
1757 | if (likely(ctxi)) |
1758 | put_context(ctxi); |
1759 | if (locked) |
1760 | mutex_unlock(lock: mutex); |
1761 | atomic_dec_if_positive(v: &cfg->recovery_threads); |
1762 | return rc; |
1763 | } |
1764 | |
1765 | /** |
1766 | * process_sense() - evaluates and processes sense data |
1767 | * @sdev: SCSI device associated with LUN. |
1768 | * @verify: Verify ioctl data structure. |
1769 | * |
1770 | * Return: 0 on success, -errno on failure |
1771 | */ |
1772 | static int process_sense(struct scsi_device *sdev, |
1773 | struct dk_cxlflash_verify *verify) |
1774 | { |
1775 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
1776 | struct device *dev = &cfg->dev->dev; |
1777 | struct llun_info *lli = sdev->hostdata; |
1778 | struct glun_info *gli = lli->parent; |
1779 | u64 prev_lba = gli->max_lba; |
1780 | struct scsi_sense_hdr sshdr = { 0 }; |
1781 | int rc = 0; |
1782 | |
1783 | rc = scsi_normalize_sense(sense_buffer: (const u8 *)&verify->sense_data, |
1784 | DK_CXLFLASH_VERIFY_SENSE_LEN, sshdr: &sshdr); |
1785 | if (!rc) { |
1786 | dev_err(dev, "%s: Failed to normalize sense data\n" , __func__); |
1787 | rc = -EINVAL; |
1788 | goto out; |
1789 | } |
1790 | |
1791 | switch (sshdr.sense_key) { |
1792 | case NO_SENSE: |
1793 | case RECOVERED_ERROR: |
1794 | case NOT_READY: |
1795 | break; |
1796 | case UNIT_ATTENTION: |
1797 | switch (sshdr.asc) { |
1798 | case 0x29: /* Power on Reset or Device Reset */ |
1799 | fallthrough; |
1800 | case 0x2A: /* Device settings/capacity changed */ |
1801 | rc = read_cap16(sdev, lli); |
1802 | if (rc) { |
1803 | rc = -ENODEV; |
1804 | break; |
1805 | } |
1806 | if (prev_lba != gli->max_lba) |
1807 | dev_dbg(dev, "%s: Capacity changed old=%lld " |
1808 | "new=%lld\n" , __func__, prev_lba, |
1809 | gli->max_lba); |
1810 | break; |
1811 | case 0x3F: /* Report LUNs changed, Rescan. */ |
1812 | scsi_scan_host(cfg->host); |
1813 | break; |
1814 | default: |
1815 | rc = -EIO; |
1816 | break; |
1817 | } |
1818 | break; |
1819 | default: |
1820 | rc = -EIO; |
1821 | break; |
1822 | } |
1823 | out: |
1824 | dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n" , __func__, |
1825 | sshdr.sense_key, sshdr.asc, sshdr.ascq, rc); |
1826 | return rc; |
1827 | } |
1828 | |
1829 | /** |
1830 | * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes |
1831 | * @sdev: SCSI device associated with LUN. |
1832 | * @verify: Verify ioctl data structure. |
1833 | * |
1834 | * Return: 0 on success, -errno on failure |
1835 | */ |
1836 | static int cxlflash_disk_verify(struct scsi_device *sdev, |
1837 | struct dk_cxlflash_verify *verify) |
1838 | { |
1839 | int rc = 0; |
1840 | struct ctx_info *ctxi = NULL; |
1841 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
1842 | struct device *dev = &cfg->dev->dev; |
1843 | struct llun_info *lli = sdev->hostdata; |
1844 | struct glun_info *gli = lli->parent; |
1845 | struct sisl_rht_entry *rhte = NULL; |
1846 | res_hndl_t rhndl = verify->rsrc_handle; |
1847 | u64 ctxid = DECODE_CTXID(verify->context_id), |
1848 | rctxid = verify->context_id; |
1849 | u64 last_lba = 0; |
1850 | |
1851 | dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, " |
1852 | "flags=%016llx\n" , __func__, ctxid, verify->rsrc_handle, |
1853 | verify->hint, verify->hdr.flags); |
1854 | |
1855 | ctxi = get_context(cfg, rctxid, arg: lli, ctx_ctrl: 0); |
1856 | if (unlikely(!ctxi)) { |
1857 | dev_dbg(dev, "%s: Bad context ctxid=%llu\n" , __func__, ctxid); |
1858 | rc = -EINVAL; |
1859 | goto out; |
1860 | } |
1861 | |
1862 | rhte = get_rhte(ctxi, rhndl, lli); |
1863 | if (unlikely(!rhte)) { |
1864 | dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n" , |
1865 | __func__, rhndl); |
1866 | rc = -EINVAL; |
1867 | goto out; |
1868 | } |
1869 | |
1870 | /* |
1871 | * Look at the hint/sense to see if it requires us to redrive |
1872 | * inquiry (i.e. the Unit attention is due to the WWN changing). |
1873 | */ |
1874 | if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) { |
1875 | /* Can't hold mutex across process_sense/read_cap16, |
1876 | * since we could have an intervening EEH event. |
1877 | */ |
1878 | ctxi->unavail = true; |
1879 | mutex_unlock(lock: &ctxi->mutex); |
1880 | rc = process_sense(sdev, verify); |
1881 | if (unlikely(rc)) { |
1882 | dev_err(dev, "%s: Failed to validate sense data (%d)\n" , |
1883 | __func__, rc); |
1884 | mutex_lock(&ctxi->mutex); |
1885 | ctxi->unavail = false; |
1886 | goto out; |
1887 | } |
1888 | mutex_lock(&ctxi->mutex); |
1889 | ctxi->unavail = false; |
1890 | } |
1891 | |
1892 | switch (gli->mode) { |
1893 | case MODE_PHYSICAL: |
1894 | last_lba = gli->max_lba; |
1895 | break; |
1896 | case MODE_VIRTUAL: |
1897 | /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */ |
1898 | last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len); |
1899 | last_lba /= CXLFLASH_BLOCK_SIZE; |
1900 | last_lba--; |
1901 | break; |
1902 | default: |
1903 | WARN(1, "Unsupported LUN mode!" ); |
1904 | } |
1905 | |
1906 | verify->last_lba = last_lba; |
1907 | |
1908 | out: |
1909 | if (likely(ctxi)) |
1910 | put_context(ctxi); |
1911 | dev_dbg(dev, "%s: returning rc=%d llba=%llx\n" , |
1912 | __func__, rc, verify->last_lba); |
1913 | return rc; |
1914 | } |
1915 | |
1916 | /** |
1917 | * decode_ioctl() - translates an encoded ioctl to an easily identifiable string |
1918 | * @cmd: The ioctl command to decode. |
1919 | * |
1920 | * Return: A string identifying the decoded ioctl. |
1921 | */ |
1922 | static char *decode_ioctl(unsigned int cmd) |
1923 | { |
1924 | switch (cmd) { |
1925 | case DK_CXLFLASH_ATTACH: |
1926 | return __stringify_1(DK_CXLFLASH_ATTACH); |
1927 | case DK_CXLFLASH_USER_DIRECT: |
1928 | return __stringify_1(DK_CXLFLASH_USER_DIRECT); |
1929 | case DK_CXLFLASH_USER_VIRTUAL: |
1930 | return __stringify_1(DK_CXLFLASH_USER_VIRTUAL); |
1931 | case DK_CXLFLASH_VLUN_RESIZE: |
1932 | return __stringify_1(DK_CXLFLASH_VLUN_RESIZE); |
1933 | case DK_CXLFLASH_RELEASE: |
1934 | return __stringify_1(DK_CXLFLASH_RELEASE); |
1935 | case DK_CXLFLASH_DETACH: |
1936 | return __stringify_1(DK_CXLFLASH_DETACH); |
1937 | case DK_CXLFLASH_VERIFY: |
1938 | return __stringify_1(DK_CXLFLASH_VERIFY); |
1939 | case DK_CXLFLASH_VLUN_CLONE: |
1940 | return __stringify_1(DK_CXLFLASH_VLUN_CLONE); |
1941 | case DK_CXLFLASH_RECOVER_AFU: |
1942 | return __stringify_1(DK_CXLFLASH_RECOVER_AFU); |
1943 | case DK_CXLFLASH_MANAGE_LUN: |
1944 | return __stringify_1(DK_CXLFLASH_MANAGE_LUN); |
1945 | } |
1946 | |
1947 | return "UNKNOWN" ; |
1948 | } |
1949 | |
1950 | /** |
1951 | * cxlflash_disk_direct_open() - opens a direct (physical) disk |
1952 | * @sdev: SCSI device associated with LUN. |
1953 | * @arg: UDirect ioctl data structure. |
1954 | * |
1955 | * On successful return, the user is informed of the resource handle |
1956 | * to be used to identify the direct lun and the size (in blocks) of |
1957 | * the direct lun in last LBA format. |
1958 | * |
1959 | * Return: 0 on success, -errno on failure |
1960 | */ |
1961 | static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) |
1962 | { |
1963 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
1964 | struct device *dev = &cfg->dev->dev; |
1965 | struct afu *afu = cfg->afu; |
1966 | struct llun_info *lli = sdev->hostdata; |
1967 | struct glun_info *gli = lli->parent; |
1968 | struct dk_cxlflash_release rel = { { 0 }, 0 }; |
1969 | |
1970 | struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; |
1971 | |
1972 | u64 ctxid = DECODE_CTXID(pphys->context_id), |
1973 | rctxid = pphys->context_id; |
1974 | u64 lun_size = 0; |
1975 | u64 last_lba = 0; |
1976 | u64 rsrc_handle = -1; |
1977 | u32 port = CHAN2PORTMASK(sdev->channel); |
1978 | |
1979 | int rc = 0; |
1980 | |
1981 | struct ctx_info *ctxi = NULL; |
1982 | struct sisl_rht_entry *rhte = NULL; |
1983 | |
1984 | dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n" , __func__, ctxid, lun_size); |
1985 | |
1986 | rc = cxlflash_lun_attach(gli, mode: MODE_PHYSICAL, locked: false); |
1987 | if (unlikely(rc)) { |
1988 | dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n" , __func__); |
1989 | goto out; |
1990 | } |
1991 | |
1992 | ctxi = get_context(cfg, rctxid, arg: lli, ctx_ctrl: 0); |
1993 | if (unlikely(!ctxi)) { |
1994 | dev_dbg(dev, "%s: Bad context ctxid=%llu\n" , __func__, ctxid); |
1995 | rc = -EINVAL; |
1996 | goto err1; |
1997 | } |
1998 | |
1999 | rhte = rhte_checkout(ctxi, lli); |
2000 | if (unlikely(!rhte)) { |
2001 | dev_dbg(dev, "%s: Too many opens ctxid=%lld\n" , |
2002 | __func__, ctxid); |
2003 | rc = -EMFILE; /* too many opens */ |
2004 | goto err1; |
2005 | } |
2006 | |
2007 | rsrc_handle = (rhte - ctxi->rht_start); |
2008 | |
2009 | rht_format1(rhte, lun_id: lli->lun_id[sdev->channel], perm: ctxi->rht_perms, port_sel: port); |
2010 | |
2011 | last_lba = gli->max_lba; |
2012 | pphys->hdr.return_flags = 0; |
2013 | pphys->last_lba = last_lba; |
2014 | pphys->rsrc_handle = rsrc_handle; |
2015 | |
2016 | rc = cxlflash_afu_sync(afu, c: ctxid, r: rsrc_handle, AFU_LW_SYNC); |
2017 | if (unlikely(rc)) { |
2018 | dev_dbg(dev, "%s: AFU sync failed rc=%d\n" , __func__, rc); |
2019 | goto err2; |
2020 | } |
2021 | |
2022 | out: |
2023 | if (likely(ctxi)) |
2024 | put_context(ctxi); |
2025 | dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n" , |
2026 | __func__, rsrc_handle, rc, last_lba); |
2027 | return rc; |
2028 | |
2029 | err2: |
2030 | marshal_udir_to_rele(udirect: pphys, release: &rel); |
2031 | _cxlflash_disk_release(sdev, ctxi, release: &rel); |
2032 | goto out; |
2033 | err1: |
2034 | cxlflash_lun_detach(gli); |
2035 | goto out; |
2036 | } |
2037 | |
2038 | /** |
2039 | * ioctl_common() - common IOCTL handler for driver |
2040 | * @sdev: SCSI device associated with LUN. |
2041 | * @cmd: IOCTL command. |
2042 | * |
2043 | * Handles common fencing operations that are valid for multiple ioctls. Always |
2044 | * allow through ioctls that are cleanup oriented in nature, even when operating |
2045 | * in a failed/terminating state. |
2046 | * |
2047 | * Return: 0 on success, -errno on failure |
2048 | */ |
2049 | static int ioctl_common(struct scsi_device *sdev, unsigned int cmd) |
2050 | { |
2051 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
2052 | struct device *dev = &cfg->dev->dev; |
2053 | struct llun_info *lli = sdev->hostdata; |
2054 | int rc = 0; |
2055 | |
2056 | if (unlikely(!lli)) { |
2057 | dev_dbg(dev, "%s: Unknown LUN\n" , __func__); |
2058 | rc = -EINVAL; |
2059 | goto out; |
2060 | } |
2061 | |
2062 | rc = check_state(cfg); |
2063 | if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) { |
2064 | switch (cmd) { |
2065 | case DK_CXLFLASH_VLUN_RESIZE: |
2066 | case DK_CXLFLASH_RELEASE: |
2067 | case DK_CXLFLASH_DETACH: |
2068 | dev_dbg(dev, "%s: Command override rc=%d\n" , |
2069 | __func__, rc); |
2070 | rc = 0; |
2071 | break; |
2072 | } |
2073 | } |
2074 | out: |
2075 | return rc; |
2076 | } |
2077 | |
2078 | /** |
2079 | * cxlflash_ioctl() - IOCTL handler for driver |
2080 | * @sdev: SCSI device associated with LUN. |
2081 | * @cmd: IOCTL command. |
2082 | * @arg: Userspace ioctl data structure. |
2083 | * |
2084 | * A read/write semaphore is used to implement a 'drain' of currently |
2085 | * running ioctls. The read semaphore is taken at the beginning of each |
2086 | * ioctl thread and released upon concluding execution. Additionally the |
2087 | * semaphore should be released and then reacquired in any ioctl execution |
2088 | * path which will wait for an event to occur that is outside the scope of |
2089 | * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, |
2090 | * a thread simply needs to acquire the write semaphore. |
2091 | * |
2092 | * Return: 0 on success, -errno on failure |
2093 | */ |
2094 | int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) |
2095 | { |
2096 | typedef int (*sioctl) (struct scsi_device *, void *); |
2097 | |
2098 | struct cxlflash_cfg *cfg = shost_priv(shost: sdev->host); |
2099 | struct device *dev = &cfg->dev->dev; |
2100 | struct afu *afu = cfg->afu; |
2101 | struct dk_cxlflash_hdr *hdr; |
2102 | char buf[sizeof(union cxlflash_ioctls)]; |
2103 | size_t size = 0; |
2104 | bool known_ioctl = false; |
2105 | int idx; |
2106 | int rc = 0; |
2107 | struct Scsi_Host *shost = sdev->host; |
2108 | sioctl do_ioctl = NULL; |
2109 | |
2110 | static const struct { |
2111 | size_t size; |
2112 | sioctl ioctl; |
2113 | } ioctl_tbl[] = { /* NOTE: order matters here */ |
2114 | {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach}, |
2115 | {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open}, |
2116 | {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release}, |
2117 | {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach}, |
2118 | {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify}, |
2119 | {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover}, |
2120 | {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun}, |
2121 | {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open}, |
2122 | {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize}, |
2123 | {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone}, |
2124 | }; |
2125 | |
2126 | /* Hold read semaphore so we can drain if needed */ |
2127 | down_read(sem: &cfg->ioctl_rwsem); |
2128 | |
2129 | /* Restrict command set to physical support only for internal LUN */ |
2130 | if (afu->internal_lun) |
2131 | switch (cmd) { |
2132 | case DK_CXLFLASH_RELEASE: |
2133 | case DK_CXLFLASH_USER_VIRTUAL: |
2134 | case DK_CXLFLASH_VLUN_RESIZE: |
2135 | case DK_CXLFLASH_VLUN_CLONE: |
2136 | dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n" , |
2137 | __func__, decode_ioctl(cmd), afu->internal_lun); |
2138 | rc = -EINVAL; |
2139 | goto cxlflash_ioctl_exit; |
2140 | } |
2141 | |
2142 | switch (cmd) { |
2143 | case DK_CXLFLASH_ATTACH: |
2144 | case DK_CXLFLASH_USER_DIRECT: |
2145 | case DK_CXLFLASH_RELEASE: |
2146 | case DK_CXLFLASH_DETACH: |
2147 | case DK_CXLFLASH_VERIFY: |
2148 | case DK_CXLFLASH_RECOVER_AFU: |
2149 | case DK_CXLFLASH_USER_VIRTUAL: |
2150 | case DK_CXLFLASH_VLUN_RESIZE: |
2151 | case DK_CXLFLASH_VLUN_CLONE: |
2152 | dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n" , |
2153 | __func__, decode_ioctl(cmd), cmd, shost->host_no, |
2154 | sdev->channel, sdev->id, sdev->lun); |
2155 | rc = ioctl_common(sdev, cmd); |
2156 | if (unlikely(rc)) |
2157 | goto cxlflash_ioctl_exit; |
2158 | |
2159 | fallthrough; |
2160 | |
2161 | case DK_CXLFLASH_MANAGE_LUN: |
2162 | known_ioctl = true; |
2163 | idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH); |
2164 | size = ioctl_tbl[idx].size; |
2165 | do_ioctl = ioctl_tbl[idx].ioctl; |
2166 | |
2167 | if (likely(do_ioctl)) |
2168 | break; |
2169 | |
2170 | fallthrough; |
2171 | default: |
2172 | rc = -EINVAL; |
2173 | goto cxlflash_ioctl_exit; |
2174 | } |
2175 | |
2176 | if (unlikely(copy_from_user(&buf, arg, size))) { |
2177 | dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n" , |
2178 | __func__, size, cmd, decode_ioctl(cmd), arg); |
2179 | rc = -EFAULT; |
2180 | goto cxlflash_ioctl_exit; |
2181 | } |
2182 | |
2183 | hdr = (struct dk_cxlflash_hdr *)&buf; |
2184 | if (hdr->version != DK_CXLFLASH_VERSION_0) { |
2185 | dev_dbg(dev, "%s: Version %u not supported for %s\n" , |
2186 | __func__, hdr->version, decode_ioctl(cmd)); |
2187 | rc = -EINVAL; |
2188 | goto cxlflash_ioctl_exit; |
2189 | } |
2190 | |
2191 | if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { |
2192 | dev_dbg(dev, "%s: Reserved/rflags populated\n" , __func__); |
2193 | rc = -EINVAL; |
2194 | goto cxlflash_ioctl_exit; |
2195 | } |
2196 | |
2197 | rc = do_ioctl(sdev, (void *)&buf); |
2198 | if (likely(!rc)) |
2199 | if (unlikely(copy_to_user(arg, &buf, size))) { |
2200 | dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n" , |
2201 | __func__, size, cmd, decode_ioctl(cmd), arg); |
2202 | rc = -EFAULT; |
2203 | } |
2204 | |
2205 | /* fall through to exit */ |
2206 | |
2207 | cxlflash_ioctl_exit: |
2208 | up_read(sem: &cfg->ioctl_rwsem); |
2209 | if (unlikely(rc && known_ioctl)) |
2210 | dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " |
2211 | "returned rc %d\n" , __func__, |
2212 | decode_ioctl(cmd), cmd, shost->host_no, |
2213 | sdev->channel, sdev->id, sdev->lun, rc); |
2214 | else |
2215 | dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " |
2216 | "returned rc %d\n" , __func__, decode_ioctl(cmd), |
2217 | cmd, shost->host_no, sdev->channel, sdev->id, |
2218 | sdev->lun, rc); |
2219 | return rc; |
2220 | } |
2221 | |