1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 1999 Eric Youngdale |
4 | * Copyright (C) 2014 Christoph Hellwig |
5 | * |
6 | * SCSI queueing library. |
7 | * Initial versions: Eric Youngdale (eric@andante.org). |
8 | * Based upon conversations with large numbers |
9 | * of people at Linux Expo. |
10 | */ |
11 | |
12 | #include <linux/bio.h> |
13 | #include <linux/bitops.h> |
14 | #include <linux/blkdev.h> |
15 | #include <linux/completion.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/export.h> |
18 | #include <linux/init.h> |
19 | #include <linux/pci.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/hardirq.h> |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/blk-mq.h> |
24 | #include <linux/blk-integrity.h> |
25 | #include <linux/ratelimit.h> |
26 | #include <asm/unaligned.h> |
27 | |
28 | #include <scsi/scsi.h> |
29 | #include <scsi/scsi_cmnd.h> |
30 | #include <scsi/scsi_dbg.h> |
31 | #include <scsi/scsi_device.h> |
32 | #include <scsi/scsi_driver.h> |
33 | #include <scsi/scsi_eh.h> |
34 | #include <scsi/scsi_host.h> |
35 | #include <scsi/scsi_transport.h> /* __scsi_init_queue() */ |
36 | #include <scsi/scsi_dh.h> |
37 | |
38 | #include <trace/events/scsi.h> |
39 | |
40 | #include "scsi_debugfs.h" |
41 | #include "scsi_priv.h" |
42 | #include "scsi_logging.h" |
43 | |
44 | /* |
45 | * Size of integrity metadata is usually small, 1 inline sg should |
46 | * cover normal cases. |
47 | */ |
48 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
49 | #define SCSI_INLINE_PROT_SG_CNT 0 |
50 | #define SCSI_INLINE_SG_CNT 0 |
51 | #else |
52 | #define SCSI_INLINE_PROT_SG_CNT 1 |
53 | #define SCSI_INLINE_SG_CNT 2 |
54 | #endif |
55 | |
56 | static struct kmem_cache *scsi_sense_cache; |
57 | static DEFINE_MUTEX(scsi_sense_cache_mutex); |
58 | |
59 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); |
60 | |
61 | int scsi_init_sense_cache(struct Scsi_Host *shost) |
62 | { |
63 | int ret = 0; |
64 | |
65 | mutex_lock(&scsi_sense_cache_mutex); |
66 | if (!scsi_sense_cache) { |
67 | scsi_sense_cache = |
68 | kmem_cache_create_usercopy(name: "scsi_sense_cache" , |
69 | SCSI_SENSE_BUFFERSIZE, align: 0, SLAB_HWCACHE_ALIGN, |
70 | useroffset: 0, SCSI_SENSE_BUFFERSIZE, NULL); |
71 | if (!scsi_sense_cache) |
72 | ret = -ENOMEM; |
73 | } |
74 | mutex_unlock(lock: &scsi_sense_cache_mutex); |
75 | return ret; |
76 | } |
77 | |
78 | static void |
79 | scsi_set_blocked(struct scsi_cmnd *cmd, int reason) |
80 | { |
81 | struct Scsi_Host *host = cmd->device->host; |
82 | struct scsi_device *device = cmd->device; |
83 | struct scsi_target *starget = scsi_target(sdev: device); |
84 | |
85 | /* |
86 | * Set the appropriate busy bit for the device/host. |
87 | * |
88 | * If the host/device isn't busy, assume that something actually |
89 | * completed, and that we should be able to queue a command now. |
90 | * |
91 | * Note that the prior mid-layer assumption that any host could |
92 | * always queue at least one command is now broken. The mid-layer |
93 | * will implement a user specifiable stall (see |
94 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) |
95 | * if a command is requeued with no other commands outstanding |
96 | * either for the device or for the host. |
97 | */ |
98 | switch (reason) { |
99 | case SCSI_MLQUEUE_HOST_BUSY: |
100 | atomic_set(v: &host->host_blocked, i: host->max_host_blocked); |
101 | break; |
102 | case SCSI_MLQUEUE_DEVICE_BUSY: |
103 | case SCSI_MLQUEUE_EH_RETRY: |
104 | atomic_set(v: &device->device_blocked, |
105 | i: device->max_device_blocked); |
106 | break; |
107 | case SCSI_MLQUEUE_TARGET_BUSY: |
108 | atomic_set(v: &starget->target_blocked, |
109 | i: starget->max_target_blocked); |
110 | break; |
111 | } |
112 | } |
113 | |
114 | static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) |
115 | { |
116 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); |
117 | |
118 | if (rq->rq_flags & RQF_DONTPREP) { |
119 | rq->rq_flags &= ~RQF_DONTPREP; |
120 | scsi_mq_uninit_cmd(cmd); |
121 | } else { |
122 | WARN_ON_ONCE(true); |
123 | } |
124 | |
125 | blk_mq_requeue_request(rq, kick_requeue_list: false); |
126 | if (!scsi_host_in_recovery(shost: cmd->device->host)) |
127 | blk_mq_delay_kick_requeue_list(q: rq->q, msecs); |
128 | } |
129 | |
130 | /** |
131 | * __scsi_queue_insert - private queue insertion |
132 | * @cmd: The SCSI command being requeued |
133 | * @reason: The reason for the requeue |
134 | * @unbusy: Whether the queue should be unbusied |
135 | * |
136 | * This is a private queue insertion. The public interface |
137 | * scsi_queue_insert() always assumes the queue should be unbusied |
138 | * because it's always called before the completion. This function is |
139 | * for a requeue after completion, which should only occur in this |
140 | * file. |
141 | */ |
142 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) |
143 | { |
144 | struct scsi_device *device = cmd->device; |
145 | |
146 | SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, |
147 | "Inserting command %p into mlqueue\n" , cmd)); |
148 | |
149 | scsi_set_blocked(cmd, reason); |
150 | |
151 | /* |
152 | * Decrement the counters, since these commands are no longer |
153 | * active on the host/device. |
154 | */ |
155 | if (unbusy) |
156 | scsi_device_unbusy(sdev: device, cmd); |
157 | |
158 | /* |
159 | * Requeue this command. It will go before all other commands |
160 | * that are already in the queue. Schedule requeue work under |
161 | * lock such that the kblockd_schedule_work() call happens |
162 | * before blk_mq_destroy_queue() finishes. |
163 | */ |
164 | cmd->result = 0; |
165 | |
166 | blk_mq_requeue_request(rq: scsi_cmd_to_rq(scmd: cmd), |
167 | kick_requeue_list: !scsi_host_in_recovery(shost: cmd->device->host)); |
168 | } |
169 | |
170 | /** |
171 | * scsi_queue_insert - Reinsert a command in the queue. |
172 | * @cmd: command that we are adding to queue. |
173 | * @reason: why we are inserting command to queue. |
174 | * |
175 | * We do this for one of two cases. Either the host is busy and it cannot accept |
176 | * any more commands for the time being, or the device returned QUEUE_FULL and |
177 | * can accept no more commands. |
178 | * |
179 | * Context: This could be called either from an interrupt context or a normal |
180 | * process context. |
181 | */ |
182 | void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) |
183 | { |
184 | __scsi_queue_insert(cmd, reason, unbusy: true); |
185 | } |
186 | |
187 | /** |
188 | * scsi_execute_cmd - insert request and wait for the result |
189 | * @sdev: scsi_device |
190 | * @cmd: scsi command |
191 | * @opf: block layer request cmd_flags |
192 | * @buffer: data buffer |
193 | * @bufflen: len of buffer |
194 | * @timeout: request timeout in HZ |
195 | * @retries: number of times to retry request |
196 | * @args: Optional args. See struct definition for field descriptions |
197 | * |
198 | * Returns the scsi_cmnd result field if a command was executed, or a negative |
199 | * Linux error code if we didn't get that far. |
200 | */ |
201 | int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, |
202 | blk_opf_t opf, void *buffer, unsigned int bufflen, |
203 | int timeout, int retries, |
204 | const struct scsi_exec_args *args) |
205 | { |
206 | static const struct scsi_exec_args default_args; |
207 | struct request *req; |
208 | struct scsi_cmnd *scmd; |
209 | int ret; |
210 | |
211 | if (!args) |
212 | args = &default_args; |
213 | else if (WARN_ON_ONCE(args->sense && |
214 | args->sense_len != SCSI_SENSE_BUFFERSIZE)) |
215 | return -EINVAL; |
216 | |
217 | req = scsi_alloc_request(q: sdev->request_queue, opf, flags: args->req_flags); |
218 | if (IS_ERR(ptr: req)) |
219 | return PTR_ERR(ptr: req); |
220 | |
221 | if (bufflen) { |
222 | ret = blk_rq_map_kern(sdev->request_queue, req, |
223 | buffer, bufflen, GFP_NOIO); |
224 | if (ret) |
225 | goto out; |
226 | } |
227 | scmd = blk_mq_rq_to_pdu(rq: req); |
228 | scmd->cmd_len = COMMAND_SIZE(cmd[0]); |
229 | memcpy(scmd->cmnd, cmd, scmd->cmd_len); |
230 | scmd->allowed = retries; |
231 | scmd->flags |= args->scmd_flags; |
232 | req->timeout = timeout; |
233 | req->rq_flags |= RQF_QUIET; |
234 | |
235 | /* |
236 | * head injection *required* here otherwise quiesce won't work |
237 | */ |
238 | blk_execute_rq(rq: req, at_head: true); |
239 | |
240 | /* |
241 | * Some devices (USB mass-storage in particular) may transfer |
242 | * garbage data together with a residue indicating that the data |
243 | * is invalid. Prevent the garbage from being misinterpreted |
244 | * and prevent security leaks by zeroing out the excess data. |
245 | */ |
246 | if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen)) |
247 | memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len); |
248 | |
249 | if (args->resid) |
250 | *args->resid = scmd->resid_len; |
251 | if (args->sense) |
252 | memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); |
253 | if (args->sshdr) |
254 | scsi_normalize_sense(sense_buffer: scmd->sense_buffer, sb_len: scmd->sense_len, |
255 | sshdr: args->sshdr); |
256 | |
257 | ret = scmd->result; |
258 | out: |
259 | blk_mq_free_request(rq: req); |
260 | |
261 | return ret; |
262 | } |
263 | EXPORT_SYMBOL(scsi_execute_cmd); |
264 | |
265 | /* |
266 | * Wake up the error handler if necessary. Avoid as follows that the error |
267 | * handler is not woken up if host in-flight requests number == |
268 | * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination |
269 | * with an RCU read lock in this function to ensure that this function in |
270 | * its entirety either finishes before scsi_eh_scmd_add() increases the |
271 | * host_failed counter or that it notices the shost state change made by |
272 | * scsi_eh_scmd_add(). |
273 | */ |
274 | static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
275 | { |
276 | unsigned long flags; |
277 | |
278 | rcu_read_lock(); |
279 | __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
280 | if (unlikely(scsi_host_in_recovery(shost))) { |
281 | spin_lock_irqsave(shost->host_lock, flags); |
282 | if (shost->host_failed || shost->host_eh_scheduled) |
283 | scsi_eh_wakeup(shost); |
284 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
285 | } |
286 | rcu_read_unlock(); |
287 | } |
288 | |
289 | void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) |
290 | { |
291 | struct Scsi_Host *shost = sdev->host; |
292 | struct scsi_target *starget = scsi_target(sdev); |
293 | |
294 | scsi_dec_host_busy(shost, cmd); |
295 | |
296 | if (starget->can_queue > 0) |
297 | atomic_dec(v: &starget->target_busy); |
298 | |
299 | sbitmap_put(sb: &sdev->budget_map, bitnr: cmd->budget_token); |
300 | cmd->budget_token = -1; |
301 | } |
302 | |
303 | /* |
304 | * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with |
305 | * interrupts disabled. |
306 | */ |
307 | static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) |
308 | { |
309 | struct scsi_device *current_sdev = data; |
310 | |
311 | if (sdev != current_sdev) |
312 | blk_mq_run_hw_queues(q: sdev->request_queue, async: true); |
313 | } |
314 | |
315 | /* |
316 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, |
317 | * and call blk_run_queue for all the scsi_devices on the target - |
318 | * including current_sdev first. |
319 | * |
320 | * Called with *no* scsi locks held. |
321 | */ |
322 | static void scsi_single_lun_run(struct scsi_device *current_sdev) |
323 | { |
324 | struct Scsi_Host *shost = current_sdev->host; |
325 | struct scsi_target *starget = scsi_target(sdev: current_sdev); |
326 | unsigned long flags; |
327 | |
328 | spin_lock_irqsave(shost->host_lock, flags); |
329 | starget->starget_sdev_user = NULL; |
330 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
331 | |
332 | /* |
333 | * Call blk_run_queue for all LUNs on the target, starting with |
334 | * current_sdev. We race with others (to set starget_sdev_user), |
335 | * but in most cases, we will be first. Ideally, each LU on the |
336 | * target would get some limited time or requests on the target. |
337 | */ |
338 | blk_mq_run_hw_queues(q: current_sdev->request_queue, |
339 | async: shost->queuecommand_may_block); |
340 | |
341 | spin_lock_irqsave(shost->host_lock, flags); |
342 | if (!starget->starget_sdev_user) |
343 | __starget_for_each_device(starget, current_sdev, |
344 | fn: scsi_kick_sdev_queue); |
345 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
346 | } |
347 | |
348 | static inline bool scsi_device_is_busy(struct scsi_device *sdev) |
349 | { |
350 | if (scsi_device_busy(sdev) >= sdev->queue_depth) |
351 | return true; |
352 | if (atomic_read(v: &sdev->device_blocked) > 0) |
353 | return true; |
354 | return false; |
355 | } |
356 | |
357 | static inline bool scsi_target_is_busy(struct scsi_target *starget) |
358 | { |
359 | if (starget->can_queue > 0) { |
360 | if (atomic_read(v: &starget->target_busy) >= starget->can_queue) |
361 | return true; |
362 | if (atomic_read(v: &starget->target_blocked) > 0) |
363 | return true; |
364 | } |
365 | return false; |
366 | } |
367 | |
368 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
369 | { |
370 | if (atomic_read(v: &shost->host_blocked) > 0) |
371 | return true; |
372 | if (shost->host_self_blocked) |
373 | return true; |
374 | return false; |
375 | } |
376 | |
377 | static void scsi_starved_list_run(struct Scsi_Host *shost) |
378 | { |
379 | LIST_HEAD(starved_list); |
380 | struct scsi_device *sdev; |
381 | unsigned long flags; |
382 | |
383 | spin_lock_irqsave(shost->host_lock, flags); |
384 | list_splice_init(list: &shost->starved_list, head: &starved_list); |
385 | |
386 | while (!list_empty(head: &starved_list)) { |
387 | struct request_queue *slq; |
388 | |
389 | /* |
390 | * As long as shost is accepting commands and we have |
391 | * starved queues, call blk_run_queue. scsi_request_fn |
392 | * drops the queue_lock and can add us back to the |
393 | * starved_list. |
394 | * |
395 | * host_lock protects the starved_list and starved_entry. |
396 | * scsi_request_fn must get the host_lock before checking |
397 | * or modifying starved_list or starved_entry. |
398 | */ |
399 | if (scsi_host_is_busy(shost)) |
400 | break; |
401 | |
402 | sdev = list_entry(starved_list.next, |
403 | struct scsi_device, starved_entry); |
404 | list_del_init(entry: &sdev->starved_entry); |
405 | if (scsi_target_is_busy(starget: scsi_target(sdev))) { |
406 | list_move_tail(list: &sdev->starved_entry, |
407 | head: &shost->starved_list); |
408 | continue; |
409 | } |
410 | |
411 | /* |
412 | * Once we drop the host lock, a racing scsi_remove_device() |
413 | * call may remove the sdev from the starved list and destroy |
414 | * it and the queue. Mitigate by taking a reference to the |
415 | * queue and never touching the sdev again after we drop the |
416 | * host lock. Note: if __scsi_remove_device() invokes |
417 | * blk_mq_destroy_queue() before the queue is run from this |
418 | * function then blk_run_queue() will return immediately since |
419 | * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING. |
420 | */ |
421 | slq = sdev->request_queue; |
422 | if (!blk_get_queue(slq)) |
423 | continue; |
424 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
425 | |
426 | blk_mq_run_hw_queues(q: slq, async: false); |
427 | blk_put_queue(slq); |
428 | |
429 | spin_lock_irqsave(shost->host_lock, flags); |
430 | } |
431 | /* put any unprocessed entries back */ |
432 | list_splice(list: &starved_list, head: &shost->starved_list); |
433 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
434 | } |
435 | |
436 | /** |
437 | * scsi_run_queue - Select a proper request queue to serve next. |
438 | * @q: last request's queue |
439 | * |
440 | * The previous command was completely finished, start a new one if possible. |
441 | */ |
442 | static void scsi_run_queue(struct request_queue *q) |
443 | { |
444 | struct scsi_device *sdev = q->queuedata; |
445 | |
446 | if (scsi_target(sdev)->single_lun) |
447 | scsi_single_lun_run(current_sdev: sdev); |
448 | if (!list_empty(head: &sdev->host->starved_list)) |
449 | scsi_starved_list_run(shost: sdev->host); |
450 | |
451 | /* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */ |
452 | blk_mq_kick_requeue_list(q); |
453 | } |
454 | |
455 | void scsi_requeue_run_queue(struct work_struct *work) |
456 | { |
457 | struct scsi_device *sdev; |
458 | struct request_queue *q; |
459 | |
460 | sdev = container_of(work, struct scsi_device, requeue_work); |
461 | q = sdev->request_queue; |
462 | scsi_run_queue(q); |
463 | } |
464 | |
465 | void scsi_run_host_queues(struct Scsi_Host *shost) |
466 | { |
467 | struct scsi_device *sdev; |
468 | |
469 | shost_for_each_device(sdev, shost) |
470 | scsi_run_queue(q: sdev->request_queue); |
471 | } |
472 | |
473 | static void scsi_uninit_cmd(struct scsi_cmnd *cmd) |
474 | { |
475 | if (!blk_rq_is_passthrough(rq: scsi_cmd_to_rq(scmd: cmd))) { |
476 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); |
477 | |
478 | if (drv->uninit_command) |
479 | drv->uninit_command(cmd); |
480 | } |
481 | } |
482 | |
483 | void scsi_free_sgtables(struct scsi_cmnd *cmd) |
484 | { |
485 | if (cmd->sdb.table.nents) |
486 | sg_free_table_chained(table: &cmd->sdb.table, |
487 | SCSI_INLINE_SG_CNT); |
488 | if (scsi_prot_sg_count(cmd)) |
489 | sg_free_table_chained(table: &cmd->prot_sdb->table, |
490 | SCSI_INLINE_PROT_SG_CNT); |
491 | } |
492 | EXPORT_SYMBOL_GPL(scsi_free_sgtables); |
493 | |
494 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) |
495 | { |
496 | scsi_free_sgtables(cmd); |
497 | scsi_uninit_cmd(cmd); |
498 | } |
499 | |
500 | static void scsi_run_queue_async(struct scsi_device *sdev) |
501 | { |
502 | if (scsi_host_in_recovery(shost: sdev->host)) |
503 | return; |
504 | |
505 | if (scsi_target(sdev)->single_lun || |
506 | !list_empty(head: &sdev->host->starved_list)) { |
507 | kblockd_schedule_work(work: &sdev->requeue_work); |
508 | } else { |
509 | /* |
510 | * smp_mb() present in sbitmap_queue_clear() or implied in |
511 | * .end_io is for ordering writing .device_busy in |
512 | * scsi_device_unbusy() and reading sdev->restarts. |
513 | */ |
514 | int old = atomic_read(v: &sdev->restarts); |
515 | |
516 | /* |
517 | * ->restarts has to be kept as non-zero if new budget |
518 | * contention occurs. |
519 | * |
520 | * No need to run queue when either another re-run |
521 | * queue wins in updating ->restarts or a new budget |
522 | * contention occurs. |
523 | */ |
524 | if (old && atomic_cmpxchg(v: &sdev->restarts, old, new: 0) == old) |
525 | blk_mq_run_hw_queues(q: sdev->request_queue, async: true); |
526 | } |
527 | } |
528 | |
529 | /* Returns false when no more bytes to process, true if there are more */ |
530 | static bool scsi_end_request(struct request *req, blk_status_t error, |
531 | unsigned int bytes) |
532 | { |
533 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
534 | struct scsi_device *sdev = cmd->device; |
535 | struct request_queue *q = sdev->request_queue; |
536 | |
537 | if (blk_update_request(rq: req, error, nr_bytes: bytes)) |
538 | return true; |
539 | |
540 | // XXX: |
541 | if (blk_queue_add_random(q)) |
542 | add_disk_randomness(disk: req->q->disk); |
543 | |
544 | if (!blk_rq_is_passthrough(rq: req)) { |
545 | WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); |
546 | cmd->flags &= ~SCMD_INITIALIZED; |
547 | } |
548 | |
549 | /* |
550 | * Calling rcu_barrier() is not necessary here because the |
551 | * SCSI error handler guarantees that the function called by |
552 | * call_rcu() has been called before scsi_end_request() is |
553 | * called. |
554 | */ |
555 | destroy_rcu_head(head: &cmd->rcu); |
556 | |
557 | /* |
558 | * In the MQ case the command gets freed by __blk_mq_end_request, |
559 | * so we have to do all cleanup that depends on it earlier. |
560 | * |
561 | * We also can't kick the queues from irq context, so we |
562 | * will have to defer it to a workqueue. |
563 | */ |
564 | scsi_mq_uninit_cmd(cmd); |
565 | |
566 | /* |
567 | * queue is still alive, so grab the ref for preventing it |
568 | * from being cleaned up during running queue. |
569 | */ |
570 | percpu_ref_get(ref: &q->q_usage_counter); |
571 | |
572 | __blk_mq_end_request(rq: req, error); |
573 | |
574 | scsi_run_queue_async(sdev); |
575 | |
576 | percpu_ref_put(ref: &q->q_usage_counter); |
577 | return false; |
578 | } |
579 | |
580 | /** |
581 | * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t |
582 | * @result: scsi error code |
583 | * |
584 | * Translate a SCSI result code into a blk_status_t value. |
585 | */ |
586 | static blk_status_t scsi_result_to_blk_status(int result) |
587 | { |
588 | /* |
589 | * Check the scsi-ml byte first in case we converted a host or status |
590 | * byte. |
591 | */ |
592 | switch (scsi_ml_byte(result)) { |
593 | case SCSIML_STAT_OK: |
594 | break; |
595 | case SCSIML_STAT_RESV_CONFLICT: |
596 | return BLK_STS_RESV_CONFLICT; |
597 | case SCSIML_STAT_NOSPC: |
598 | return BLK_STS_NOSPC; |
599 | case SCSIML_STAT_MED_ERROR: |
600 | return BLK_STS_MEDIUM; |
601 | case SCSIML_STAT_TGT_FAILURE: |
602 | return BLK_STS_TARGET; |
603 | case SCSIML_STAT_DL_TIMEOUT: |
604 | return BLK_STS_DURATION_LIMIT; |
605 | } |
606 | |
607 | switch (host_byte(result)) { |
608 | case DID_OK: |
609 | if (scsi_status_is_good(status: result)) |
610 | return BLK_STS_OK; |
611 | return BLK_STS_IOERR; |
612 | case DID_TRANSPORT_FAILFAST: |
613 | case DID_TRANSPORT_MARGINAL: |
614 | return BLK_STS_TRANSPORT; |
615 | default: |
616 | return BLK_STS_IOERR; |
617 | } |
618 | } |
619 | |
620 | /** |
621 | * scsi_rq_err_bytes - determine number of bytes till the next failure boundary |
622 | * @rq: request to examine |
623 | * |
624 | * Description: |
625 | * A request could be merge of IOs which require different failure |
626 | * handling. This function determines the number of bytes which |
627 | * can be failed from the beginning of the request without |
628 | * crossing into area which need to be retried further. |
629 | * |
630 | * Return: |
631 | * The number of bytes to fail. |
632 | */ |
633 | static unsigned int scsi_rq_err_bytes(const struct request *rq) |
634 | { |
635 | blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
636 | unsigned int bytes = 0; |
637 | struct bio *bio; |
638 | |
639 | if (!(rq->rq_flags & RQF_MIXED_MERGE)) |
640 | return blk_rq_bytes(rq); |
641 | |
642 | /* |
643 | * Currently the only 'mixing' which can happen is between |
644 | * different fastfail types. We can safely fail portions |
645 | * which have all the failfast bits that the first one has - |
646 | * the ones which are at least as eager to fail as the first |
647 | * one. |
648 | */ |
649 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
650 | if ((bio->bi_opf & ff) != ff) |
651 | break; |
652 | bytes += bio->bi_iter.bi_size; |
653 | } |
654 | |
655 | /* this could lead to infinite loop */ |
656 | BUG_ON(blk_rq_bytes(rq) && !bytes); |
657 | return bytes; |
658 | } |
659 | |
660 | static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) |
661 | { |
662 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
663 | unsigned long wait_for; |
664 | |
665 | if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) |
666 | return false; |
667 | |
668 | wait_for = (cmd->allowed + 1) * req->timeout; |
669 | if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { |
670 | scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n" , |
671 | wait_for/HZ); |
672 | return true; |
673 | } |
674 | return false; |
675 | } |
676 | |
677 | /* |
678 | * When ALUA transition state is returned, reprep the cmd to |
679 | * use the ALUA handler's transition timeout. Delay the reprep |
680 | * 1 sec to avoid aggressive retries of the target in that |
681 | * state. |
682 | */ |
683 | #define ALUA_TRANSITION_REPREP_DELAY 1000 |
684 | |
685 | /* Helper for scsi_io_completion() when special action required. */ |
686 | static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) |
687 | { |
688 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
689 | int level = 0; |
690 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP, |
691 | ACTION_RETRY, ACTION_DELAYED_RETRY} action; |
692 | struct scsi_sense_hdr sshdr; |
693 | bool sense_valid; |
694 | bool sense_current = true; /* false implies "deferred sense" */ |
695 | blk_status_t blk_stat; |
696 | |
697 | sense_valid = scsi_command_normalize_sense(cmd, sshdr: &sshdr); |
698 | if (sense_valid) |
699 | sense_current = !scsi_sense_is_deferred(sshdr: &sshdr); |
700 | |
701 | blk_stat = scsi_result_to_blk_status(result); |
702 | |
703 | if (host_byte(result) == DID_RESET) { |
704 | /* Third party bus reset or reset for error recovery |
705 | * reasons. Just retry the command and see what |
706 | * happens. |
707 | */ |
708 | action = ACTION_RETRY; |
709 | } else if (sense_valid && sense_current) { |
710 | switch (sshdr.sense_key) { |
711 | case UNIT_ATTENTION: |
712 | if (cmd->device->removable) { |
713 | /* Detected disc change. Set a bit |
714 | * and quietly refuse further access. |
715 | */ |
716 | cmd->device->changed = 1; |
717 | action = ACTION_FAIL; |
718 | } else { |
719 | /* Must have been a power glitch, or a |
720 | * bus reset. Could not have been a |
721 | * media change, so we just retry the |
722 | * command and see what happens. |
723 | */ |
724 | action = ACTION_RETRY; |
725 | } |
726 | break; |
727 | case ILLEGAL_REQUEST: |
728 | /* If we had an ILLEGAL REQUEST returned, then |
729 | * we may have performed an unsupported |
730 | * command. The only thing this should be |
731 | * would be a ten byte read where only a six |
732 | * byte read was supported. Also, on a system |
733 | * where READ CAPACITY failed, we may have |
734 | * read past the end of the disk. |
735 | */ |
736 | if ((cmd->device->use_10_for_rw && |
737 | sshdr.asc == 0x20 && sshdr.ascq == 0x00) && |
738 | (cmd->cmnd[0] == READ_10 || |
739 | cmd->cmnd[0] == WRITE_10)) { |
740 | /* This will issue a new 6-byte command. */ |
741 | cmd->device->use_10_for_rw = 0; |
742 | action = ACTION_REPREP; |
743 | } else if (sshdr.asc == 0x10) /* DIX */ { |
744 | action = ACTION_FAIL; |
745 | blk_stat = BLK_STS_PROTECTION; |
746 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
747 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
748 | action = ACTION_FAIL; |
749 | blk_stat = BLK_STS_TARGET; |
750 | } else |
751 | action = ACTION_FAIL; |
752 | break; |
753 | case ABORTED_COMMAND: |
754 | action = ACTION_FAIL; |
755 | if (sshdr.asc == 0x10) /* DIF */ |
756 | blk_stat = BLK_STS_PROTECTION; |
757 | break; |
758 | case NOT_READY: |
759 | /* If the device is in the process of becoming |
760 | * ready, or has a temporary blockage, retry. |
761 | */ |
762 | if (sshdr.asc == 0x04) { |
763 | switch (sshdr.ascq) { |
764 | case 0x01: /* becoming ready */ |
765 | case 0x04: /* format in progress */ |
766 | case 0x05: /* rebuild in progress */ |
767 | case 0x06: /* recalculation in progress */ |
768 | case 0x07: /* operation in progress */ |
769 | case 0x08: /* Long write in progress */ |
770 | case 0x09: /* self test in progress */ |
771 | case 0x11: /* notify (enable spinup) required */ |
772 | case 0x14: /* space allocation in progress */ |
773 | case 0x1a: /* start stop unit in progress */ |
774 | case 0x1b: /* sanitize in progress */ |
775 | case 0x1d: /* configuration in progress */ |
776 | case 0x24: /* depopulation in progress */ |
777 | case 0x25: /* depopulation restore in progress */ |
778 | action = ACTION_DELAYED_RETRY; |
779 | break; |
780 | case 0x0a: /* ALUA state transition */ |
781 | action = ACTION_DELAYED_REPREP; |
782 | break; |
783 | default: |
784 | action = ACTION_FAIL; |
785 | break; |
786 | } |
787 | } else |
788 | action = ACTION_FAIL; |
789 | break; |
790 | case VOLUME_OVERFLOW: |
791 | /* See SSC3rXX or current. */ |
792 | action = ACTION_FAIL; |
793 | break; |
794 | case DATA_PROTECT: |
795 | action = ACTION_FAIL; |
796 | if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) || |
797 | (sshdr.asc == 0x55 && |
798 | (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) { |
799 | /* Insufficient zone resources */ |
800 | blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; |
801 | } |
802 | break; |
803 | case COMPLETED: |
804 | fallthrough; |
805 | default: |
806 | action = ACTION_FAIL; |
807 | break; |
808 | } |
809 | } else |
810 | action = ACTION_FAIL; |
811 | |
812 | if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd)) |
813 | action = ACTION_FAIL; |
814 | |
815 | switch (action) { |
816 | case ACTION_FAIL: |
817 | /* Give up and fail the remainder of the request */ |
818 | if (!(req->rq_flags & RQF_QUIET)) { |
819 | static DEFINE_RATELIMIT_STATE(_rs, |
820 | DEFAULT_RATELIMIT_INTERVAL, |
821 | DEFAULT_RATELIMIT_BURST); |
822 | |
823 | if (unlikely(scsi_logging_level)) |
824 | level = |
825 | SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, |
826 | SCSI_LOG_MLCOMPLETE_BITS); |
827 | |
828 | /* |
829 | * if logging is enabled the failure will be printed |
830 | * in scsi_log_completion(), so avoid duplicate messages |
831 | */ |
832 | if (!level && __ratelimit(&_rs)) { |
833 | scsi_print_result(cmd, NULL, FAILED); |
834 | if (sense_valid) |
835 | scsi_print_sense(cmd); |
836 | scsi_print_command(cmd); |
837 | } |
838 | } |
839 | if (!scsi_end_request(req, error: blk_stat, bytes: scsi_rq_err_bytes(rq: req))) |
840 | return; |
841 | fallthrough; |
842 | case ACTION_REPREP: |
843 | scsi_mq_requeue_cmd(cmd, msecs: 0); |
844 | break; |
845 | case ACTION_DELAYED_REPREP: |
846 | scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY); |
847 | break; |
848 | case ACTION_RETRY: |
849 | /* Retry the same command immediately */ |
850 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, unbusy: false); |
851 | break; |
852 | case ACTION_DELAYED_RETRY: |
853 | /* Retry the same command after a delay */ |
854 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, unbusy: false); |
855 | break; |
856 | } |
857 | } |
858 | |
859 | /* |
860 | * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a |
861 | * new result that may suppress further error checking. Also modifies |
862 | * *blk_statp in some cases. |
863 | */ |
864 | static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, |
865 | blk_status_t *blk_statp) |
866 | { |
867 | bool sense_valid; |
868 | bool sense_current = true; /* false implies "deferred sense" */ |
869 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
870 | struct scsi_sense_hdr sshdr; |
871 | |
872 | sense_valid = scsi_command_normalize_sense(cmd, sshdr: &sshdr); |
873 | if (sense_valid) |
874 | sense_current = !scsi_sense_is_deferred(sshdr: &sshdr); |
875 | |
876 | if (blk_rq_is_passthrough(rq: req)) { |
877 | if (sense_valid) { |
878 | /* |
879 | * SG_IO wants current and deferred errors |
880 | */ |
881 | cmd->sense_len = min(8 + cmd->sense_buffer[7], |
882 | SCSI_SENSE_BUFFERSIZE); |
883 | } |
884 | if (sense_current) |
885 | *blk_statp = scsi_result_to_blk_status(result); |
886 | } else if (blk_rq_bytes(rq: req) == 0 && sense_current) { |
887 | /* |
888 | * Flush commands do not transfers any data, and thus cannot use |
889 | * good_bytes != blk_rq_bytes(req) as the signal for an error. |
890 | * This sets *blk_statp explicitly for the problem case. |
891 | */ |
892 | *blk_statp = scsi_result_to_blk_status(result); |
893 | } |
894 | /* |
895 | * Recovered errors need reporting, but they're always treated as |
896 | * success, so fiddle the result code here. For passthrough requests |
897 | * we already took a copy of the original into sreq->result which |
898 | * is what gets returned to the user |
899 | */ |
900 | if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { |
901 | bool do_print = true; |
902 | /* |
903 | * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d] |
904 | * skip print since caller wants ATA registers. Only occurs |
905 | * on SCSI ATA PASS_THROUGH commands when CK_COND=1 |
906 | */ |
907 | if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) |
908 | do_print = false; |
909 | else if (req->rq_flags & RQF_QUIET) |
910 | do_print = false; |
911 | if (do_print) |
912 | scsi_print_sense(cmd); |
913 | result = 0; |
914 | /* for passthrough, *blk_statp may be set */ |
915 | *blk_statp = BLK_STS_OK; |
916 | } |
917 | /* |
918 | * Another corner case: the SCSI status byte is non-zero but 'good'. |
919 | * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when |
920 | * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD |
921 | * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related |
922 | * intermediate statuses (both obsolete in SAM-4) as good. |
923 | */ |
924 | if ((result & 0xff) && scsi_status_is_good(status: result)) { |
925 | result = 0; |
926 | *blk_statp = BLK_STS_OK; |
927 | } |
928 | return result; |
929 | } |
930 | |
931 | /** |
932 | * scsi_io_completion - Completion processing for SCSI commands. |
933 | * @cmd: command that is finished. |
934 | * @good_bytes: number of processed bytes. |
935 | * |
936 | * We will finish off the specified number of sectors. If we are done, the |
937 | * command block will be released and the queue function will be goosed. If we |
938 | * are not done then we have to figure out what to do next: |
939 | * |
940 | * a) We can call scsi_mq_requeue_cmd(). The request will be |
941 | * unprepared and put back on the queue. Then a new command will |
942 | * be created for it. This should be used if we made forward |
943 | * progress, or if we want to switch from READ(10) to READ(6) for |
944 | * example. |
945 | * |
946 | * b) We can call scsi_io_completion_action(). The request will be |
947 | * put back on the queue and retried using the same command as |
948 | * before, possibly after a delay. |
949 | * |
950 | * c) We can call scsi_end_request() with blk_stat other than |
951 | * BLK_STS_OK, to fail the remainder of the request. |
952 | */ |
953 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
954 | { |
955 | int result = cmd->result; |
956 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
957 | blk_status_t blk_stat = BLK_STS_OK; |
958 | |
959 | if (unlikely(result)) /* a nz result may or may not be an error */ |
960 | result = scsi_io_completion_nz_result(cmd, result, blk_statp: &blk_stat); |
961 | |
962 | /* |
963 | * Next deal with any sectors which we were able to correctly |
964 | * handle. |
965 | */ |
966 | SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, |
967 | "%u sectors total, %d bytes done.\n" , |
968 | blk_rq_sectors(req), good_bytes)); |
969 | |
970 | /* |
971 | * Failed, zero length commands always need to drop down |
972 | * to retry code. Fast path should return in this block. |
973 | */ |
974 | if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { |
975 | if (likely(!scsi_end_request(req, blk_stat, good_bytes))) |
976 | return; /* no bytes remaining */ |
977 | } |
978 | |
979 | /* Kill remainder if no retries. */ |
980 | if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { |
981 | if (scsi_end_request(req, error: blk_stat, bytes: blk_rq_bytes(rq: req))) |
982 | WARN_ONCE(true, |
983 | "Bytes remaining after failed, no-retry command" ); |
984 | return; |
985 | } |
986 | |
987 | /* |
988 | * If there had been no error, but we have leftover bytes in the |
989 | * request just queue the command up again. |
990 | */ |
991 | if (likely(result == 0)) |
992 | scsi_mq_requeue_cmd(cmd, msecs: 0); |
993 | else |
994 | scsi_io_completion_action(cmd, result); |
995 | } |
996 | |
997 | static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, |
998 | struct request *rq) |
999 | { |
1000 | return sdev->dma_drain_len && blk_rq_is_passthrough(rq) && |
1001 | !op_is_write(op: req_op(req: rq)) && |
1002 | sdev->host->hostt->dma_need_drain(rq); |
1003 | } |
1004 | |
1005 | /** |
1006 | * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists |
1007 | * @cmd: SCSI command data structure to initialize. |
1008 | * |
1009 | * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled |
1010 | * for @cmd. |
1011 | * |
1012 | * Returns: |
1013 | * * BLK_STS_OK - on success |
1014 | * * BLK_STS_RESOURCE - if the failure is retryable |
1015 | * * BLK_STS_IOERR - if the failure is fatal |
1016 | */ |
1017 | blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) |
1018 | { |
1019 | struct scsi_device *sdev = cmd->device; |
1020 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); |
1021 | unsigned short nr_segs = blk_rq_nr_phys_segments(rq); |
1022 | struct scatterlist *last_sg = NULL; |
1023 | blk_status_t ret; |
1024 | bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq); |
1025 | int count; |
1026 | |
1027 | if (WARN_ON_ONCE(!nr_segs)) |
1028 | return BLK_STS_IOERR; |
1029 | |
1030 | /* |
1031 | * Make sure there is space for the drain. The driver must adjust |
1032 | * max_hw_segments to be prepared for this. |
1033 | */ |
1034 | if (need_drain) |
1035 | nr_segs++; |
1036 | |
1037 | /* |
1038 | * If sg table allocation fails, requeue request later. |
1039 | */ |
1040 | if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs, |
1041 | cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT))) |
1042 | return BLK_STS_RESOURCE; |
1043 | |
1044 | /* |
1045 | * Next, walk the list, and fill in the addresses and sizes of |
1046 | * each segment. |
1047 | */ |
1048 | count = __blk_rq_map_sg(q: rq->q, rq, sglist: cmd->sdb.table.sgl, last_sg: &last_sg); |
1049 | |
1050 | if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { |
1051 | unsigned int pad_len = |
1052 | (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
1053 | |
1054 | last_sg->length += pad_len; |
1055 | cmd->extra_len += pad_len; |
1056 | } |
1057 | |
1058 | if (need_drain) { |
1059 | sg_unmark_end(sg: last_sg); |
1060 | last_sg = sg_next(last_sg); |
1061 | sg_set_buf(sg: last_sg, buf: sdev->dma_drain_buf, buflen: sdev->dma_drain_len); |
1062 | sg_mark_end(sg: last_sg); |
1063 | |
1064 | cmd->extra_len += sdev->dma_drain_len; |
1065 | count++; |
1066 | } |
1067 | |
1068 | BUG_ON(count > cmd->sdb.table.nents); |
1069 | cmd->sdb.table.nents = count; |
1070 | cmd->sdb.length = blk_rq_payload_bytes(rq); |
1071 | |
1072 | if (blk_integrity_rq(rq)) { |
1073 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; |
1074 | int ivecs; |
1075 | |
1076 | if (WARN_ON_ONCE(!prot_sdb)) { |
1077 | /* |
1078 | * This can happen if someone (e.g. multipath) |
1079 | * queues a command to a device on an adapter |
1080 | * that does not support DIX. |
1081 | */ |
1082 | ret = BLK_STS_IOERR; |
1083 | goto out_free_sgtables; |
1084 | } |
1085 | |
1086 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
1087 | |
1088 | if (sg_alloc_table_chained(table: &prot_sdb->table, nents: ivecs, |
1089 | first_chunk: prot_sdb->table.sgl, |
1090 | SCSI_INLINE_PROT_SG_CNT)) { |
1091 | ret = BLK_STS_RESOURCE; |
1092 | goto out_free_sgtables; |
1093 | } |
1094 | |
1095 | count = blk_rq_map_integrity_sg(rq->q, rq->bio, |
1096 | prot_sdb->table.sgl); |
1097 | BUG_ON(count > ivecs); |
1098 | BUG_ON(count > queue_max_integrity_segments(rq->q)); |
1099 | |
1100 | cmd->prot_sdb = prot_sdb; |
1101 | cmd->prot_sdb->table.nents = count; |
1102 | } |
1103 | |
1104 | return BLK_STS_OK; |
1105 | out_free_sgtables: |
1106 | scsi_free_sgtables(cmd); |
1107 | return ret; |
1108 | } |
1109 | EXPORT_SYMBOL(scsi_alloc_sgtables); |
1110 | |
1111 | /** |
1112 | * scsi_initialize_rq - initialize struct scsi_cmnd partially |
1113 | * @rq: Request associated with the SCSI command to be initialized. |
1114 | * |
1115 | * This function initializes the members of struct scsi_cmnd that must be |
1116 | * initialized before request processing starts and that won't be |
1117 | * reinitialized if a SCSI command is requeued. |
1118 | */ |
1119 | static void scsi_initialize_rq(struct request *rq) |
1120 | { |
1121 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1122 | |
1123 | memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); |
1124 | cmd->cmd_len = MAX_COMMAND_SIZE; |
1125 | cmd->sense_len = 0; |
1126 | init_rcu_head(head: &cmd->rcu); |
1127 | cmd->jiffies_at_alloc = jiffies; |
1128 | cmd->retries = 0; |
1129 | } |
1130 | |
1131 | struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, |
1132 | blk_mq_req_flags_t flags) |
1133 | { |
1134 | struct request *rq; |
1135 | |
1136 | rq = blk_mq_alloc_request(q, opf, flags); |
1137 | if (!IS_ERR(ptr: rq)) |
1138 | scsi_initialize_rq(rq); |
1139 | return rq; |
1140 | } |
1141 | EXPORT_SYMBOL_GPL(scsi_alloc_request); |
1142 | |
1143 | /* |
1144 | * Only called when the request isn't completed by SCSI, and not freed by |
1145 | * SCSI |
1146 | */ |
1147 | static void scsi_cleanup_rq(struct request *rq) |
1148 | { |
1149 | if (rq->rq_flags & RQF_DONTPREP) { |
1150 | scsi_mq_uninit_cmd(cmd: blk_mq_rq_to_pdu(rq)); |
1151 | rq->rq_flags &= ~RQF_DONTPREP; |
1152 | } |
1153 | } |
1154 | |
1155 | /* Called before a request is prepared. See also scsi_mq_prep_fn(). */ |
1156 | void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) |
1157 | { |
1158 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); |
1159 | |
1160 | if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) { |
1161 | cmd->flags |= SCMD_INITIALIZED; |
1162 | scsi_initialize_rq(rq); |
1163 | } |
1164 | |
1165 | cmd->device = dev; |
1166 | INIT_LIST_HEAD(list: &cmd->eh_entry); |
1167 | INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); |
1168 | } |
1169 | |
1170 | static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, |
1171 | struct request *req) |
1172 | { |
1173 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1174 | |
1175 | /* |
1176 | * Passthrough requests may transfer data, in which case they must |
1177 | * a bio attached to them. Or they might contain a SCSI command |
1178 | * that does not transfer data, in which case they may optionally |
1179 | * submit a request without an attached bio. |
1180 | */ |
1181 | if (req->bio) { |
1182 | blk_status_t ret = scsi_alloc_sgtables(cmd); |
1183 | if (unlikely(ret != BLK_STS_OK)) |
1184 | return ret; |
1185 | } else { |
1186 | BUG_ON(blk_rq_bytes(req)); |
1187 | |
1188 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
1189 | } |
1190 | |
1191 | cmd->transfersize = blk_rq_bytes(rq: req); |
1192 | return BLK_STS_OK; |
1193 | } |
1194 | |
1195 | static blk_status_t |
1196 | scsi_device_state_check(struct scsi_device *sdev, struct request *req) |
1197 | { |
1198 | switch (sdev->sdev_state) { |
1199 | case SDEV_CREATED: |
1200 | return BLK_STS_OK; |
1201 | case SDEV_OFFLINE: |
1202 | case SDEV_TRANSPORT_OFFLINE: |
1203 | /* |
1204 | * If the device is offline we refuse to process any |
1205 | * commands. The device must be brought online |
1206 | * before trying any recovery commands. |
1207 | */ |
1208 | if (!sdev->offline_already) { |
1209 | sdev->offline_already = true; |
1210 | sdev_printk(KERN_ERR, sdev, |
1211 | "rejecting I/O to offline device\n" ); |
1212 | } |
1213 | return BLK_STS_IOERR; |
1214 | case SDEV_DEL: |
1215 | /* |
1216 | * If the device is fully deleted, we refuse to |
1217 | * process any commands as well. |
1218 | */ |
1219 | sdev_printk(KERN_ERR, sdev, |
1220 | "rejecting I/O to dead device\n" ); |
1221 | return BLK_STS_IOERR; |
1222 | case SDEV_BLOCK: |
1223 | case SDEV_CREATED_BLOCK: |
1224 | return BLK_STS_RESOURCE; |
1225 | case SDEV_QUIESCE: |
1226 | /* |
1227 | * If the device is blocked we only accept power management |
1228 | * commands. |
1229 | */ |
1230 | if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) |
1231 | return BLK_STS_RESOURCE; |
1232 | return BLK_STS_OK; |
1233 | default: |
1234 | /* |
1235 | * For any other not fully online state we only allow |
1236 | * power management commands. |
1237 | */ |
1238 | if (req && !(req->rq_flags & RQF_PM)) |
1239 | return BLK_STS_OFFLINE; |
1240 | return BLK_STS_OK; |
1241 | } |
1242 | } |
1243 | |
1244 | /* |
1245 | * scsi_dev_queue_ready: if we can send requests to sdev, assign one token |
1246 | * and return the token else return -1. |
1247 | */ |
1248 | static inline int scsi_dev_queue_ready(struct request_queue *q, |
1249 | struct scsi_device *sdev) |
1250 | { |
1251 | int token; |
1252 | |
1253 | token = sbitmap_get(sb: &sdev->budget_map); |
1254 | if (token < 0) |
1255 | return -1; |
1256 | |
1257 | if (!atomic_read(v: &sdev->device_blocked)) |
1258 | return token; |
1259 | |
1260 | /* |
1261 | * Only unblock if no other commands are pending and |
1262 | * if device_blocked has decreased to zero |
1263 | */ |
1264 | if (scsi_device_busy(sdev) > 1 || |
1265 | atomic_dec_return(v: &sdev->device_blocked) > 0) { |
1266 | sbitmap_put(sb: &sdev->budget_map, bitnr: token); |
1267 | return -1; |
1268 | } |
1269 | |
1270 | SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, |
1271 | "unblocking device at zero depth\n" )); |
1272 | |
1273 | return token; |
1274 | } |
1275 | |
1276 | /* |
1277 | * scsi_target_queue_ready: checks if there we can send commands to target |
1278 | * @sdev: scsi device on starget to check. |
1279 | */ |
1280 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, |
1281 | struct scsi_device *sdev) |
1282 | { |
1283 | struct scsi_target *starget = scsi_target(sdev); |
1284 | unsigned int busy; |
1285 | |
1286 | if (starget->single_lun) { |
1287 | spin_lock_irq(lock: shost->host_lock); |
1288 | if (starget->starget_sdev_user && |
1289 | starget->starget_sdev_user != sdev) { |
1290 | spin_unlock_irq(lock: shost->host_lock); |
1291 | return 0; |
1292 | } |
1293 | starget->starget_sdev_user = sdev; |
1294 | spin_unlock_irq(lock: shost->host_lock); |
1295 | } |
1296 | |
1297 | if (starget->can_queue <= 0) |
1298 | return 1; |
1299 | |
1300 | busy = atomic_inc_return(v: &starget->target_busy) - 1; |
1301 | if (atomic_read(v: &starget->target_blocked) > 0) { |
1302 | if (busy) |
1303 | goto starved; |
1304 | |
1305 | /* |
1306 | * unblock after target_blocked iterates to zero |
1307 | */ |
1308 | if (atomic_dec_return(v: &starget->target_blocked) > 0) |
1309 | goto out_dec; |
1310 | |
1311 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, |
1312 | "unblocking target at zero depth\n" )); |
1313 | } |
1314 | |
1315 | if (busy >= starget->can_queue) |
1316 | goto starved; |
1317 | |
1318 | return 1; |
1319 | |
1320 | starved: |
1321 | spin_lock_irq(lock: shost->host_lock); |
1322 | list_move_tail(list: &sdev->starved_entry, head: &shost->starved_list); |
1323 | spin_unlock_irq(lock: shost->host_lock); |
1324 | out_dec: |
1325 | if (starget->can_queue > 0) |
1326 | atomic_dec(v: &starget->target_busy); |
1327 | return 0; |
1328 | } |
1329 | |
1330 | /* |
1331 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else |
1332 | * return 0. We must end up running the queue again whenever 0 is |
1333 | * returned, else IO can hang. |
1334 | */ |
1335 | static inline int scsi_host_queue_ready(struct request_queue *q, |
1336 | struct Scsi_Host *shost, |
1337 | struct scsi_device *sdev, |
1338 | struct scsi_cmnd *cmd) |
1339 | { |
1340 | if (atomic_read(v: &shost->host_blocked) > 0) { |
1341 | if (scsi_host_busy(shost) > 0) |
1342 | goto starved; |
1343 | |
1344 | /* |
1345 | * unblock after host_blocked iterates to zero |
1346 | */ |
1347 | if (atomic_dec_return(v: &shost->host_blocked) > 0) |
1348 | goto out_dec; |
1349 | |
1350 | SCSI_LOG_MLQUEUE(3, |
1351 | shost_printk(KERN_INFO, shost, |
1352 | "unblocking host at zero depth\n" )); |
1353 | } |
1354 | |
1355 | if (shost->host_self_blocked) |
1356 | goto starved; |
1357 | |
1358 | /* We're OK to process the command, so we can't be starved */ |
1359 | if (!list_empty(head: &sdev->starved_entry)) { |
1360 | spin_lock_irq(lock: shost->host_lock); |
1361 | if (!list_empty(head: &sdev->starved_entry)) |
1362 | list_del_init(entry: &sdev->starved_entry); |
1363 | spin_unlock_irq(lock: shost->host_lock); |
1364 | } |
1365 | |
1366 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
1367 | |
1368 | return 1; |
1369 | |
1370 | starved: |
1371 | spin_lock_irq(lock: shost->host_lock); |
1372 | if (list_empty(head: &sdev->starved_entry)) |
1373 | list_add_tail(new: &sdev->starved_entry, head: &shost->starved_list); |
1374 | spin_unlock_irq(lock: shost->host_lock); |
1375 | out_dec: |
1376 | scsi_dec_host_busy(shost, cmd); |
1377 | return 0; |
1378 | } |
1379 | |
1380 | /* |
1381 | * Busy state exporting function for request stacking drivers. |
1382 | * |
1383 | * For efficiency, no lock is taken to check the busy state of |
1384 | * shost/starget/sdev, since the returned value is not guaranteed and |
1385 | * may be changed after request stacking drivers call the function, |
1386 | * regardless of taking lock or not. |
1387 | * |
1388 | * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi |
1389 | * needs to return 'not busy'. Otherwise, request stacking drivers |
1390 | * may hold requests forever. |
1391 | */ |
1392 | static bool scsi_mq_lld_busy(struct request_queue *q) |
1393 | { |
1394 | struct scsi_device *sdev = q->queuedata; |
1395 | struct Scsi_Host *shost; |
1396 | |
1397 | if (blk_queue_dying(q)) |
1398 | return false; |
1399 | |
1400 | shost = sdev->host; |
1401 | |
1402 | /* |
1403 | * Ignore host/starget busy state. |
1404 | * Since block layer does not have a concept of fairness across |
1405 | * multiple queues, congestion of host/starget needs to be handled |
1406 | * in SCSI layer. |
1407 | */ |
1408 | if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) |
1409 | return true; |
1410 | |
1411 | return false; |
1412 | } |
1413 | |
1414 | /* |
1415 | * Block layer request completion callback. May be called from interrupt |
1416 | * context. |
1417 | */ |
1418 | static void scsi_complete(struct request *rq) |
1419 | { |
1420 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1421 | enum scsi_disposition disposition; |
1422 | |
1423 | INIT_LIST_HEAD(list: &cmd->eh_entry); |
1424 | |
1425 | atomic_inc(v: &cmd->device->iodone_cnt); |
1426 | if (cmd->result) |
1427 | atomic_inc(v: &cmd->device->ioerr_cnt); |
1428 | |
1429 | disposition = scsi_decide_disposition(cmd); |
1430 | if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd)) |
1431 | disposition = SUCCESS; |
1432 | |
1433 | scsi_log_completion(cmd, disposition); |
1434 | |
1435 | switch (disposition) { |
1436 | case SUCCESS: |
1437 | scsi_finish_command(cmd); |
1438 | break; |
1439 | case NEEDS_RETRY: |
1440 | scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); |
1441 | break; |
1442 | case ADD_TO_MLQUEUE: |
1443 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); |
1444 | break; |
1445 | default: |
1446 | scsi_eh_scmd_add(cmd); |
1447 | break; |
1448 | } |
1449 | } |
1450 | |
1451 | /** |
1452 | * scsi_dispatch_cmd - Dispatch a command to the low-level driver. |
1453 | * @cmd: command block we are dispatching. |
1454 | * |
1455 | * Return: nonzero return request was rejected and device's queue needs to be |
1456 | * plugged. |
1457 | */ |
1458 | static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) |
1459 | { |
1460 | struct Scsi_Host *host = cmd->device->host; |
1461 | int rtn = 0; |
1462 | |
1463 | atomic_inc(v: &cmd->device->iorequest_cnt); |
1464 | |
1465 | /* check if the device is still usable */ |
1466 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { |
1467 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT |
1468 | * returns an immediate error upwards, and signals |
1469 | * that the device is no longer present */ |
1470 | cmd->result = DID_NO_CONNECT << 16; |
1471 | goto done; |
1472 | } |
1473 | |
1474 | /* Check to see if the scsi lld made this device blocked. */ |
1475 | if (unlikely(scsi_device_blocked(cmd->device))) { |
1476 | /* |
1477 | * in blocked state, the command is just put back on |
1478 | * the device queue. The suspend state has already |
1479 | * blocked the queue so future requests should not |
1480 | * occur until the device transitions out of the |
1481 | * suspend state. |
1482 | */ |
1483 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
1484 | "queuecommand : device blocked\n" )); |
1485 | atomic_dec(v: &cmd->device->iorequest_cnt); |
1486 | return SCSI_MLQUEUE_DEVICE_BUSY; |
1487 | } |
1488 | |
1489 | /* Store the LUN value in cmnd, if needed. */ |
1490 | if (cmd->device->lun_in_cdb) |
1491 | cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | |
1492 | (cmd->device->lun << 5 & 0xe0); |
1493 | |
1494 | scsi_log_send(cmd); |
1495 | |
1496 | /* |
1497 | * Before we queue this command, check if the command |
1498 | * length exceeds what the host adapter can handle. |
1499 | */ |
1500 | if (cmd->cmd_len > cmd->device->host->max_cmd_len) { |
1501 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
1502 | "queuecommand : command too long. " |
1503 | "cdb_size=%d host->max_cmd_len=%d\n" , |
1504 | cmd->cmd_len, cmd->device->host->max_cmd_len)); |
1505 | cmd->result = (DID_ABORT << 16); |
1506 | goto done; |
1507 | } |
1508 | |
1509 | if (unlikely(host->shost_state == SHOST_DEL)) { |
1510 | cmd->result = (DID_NO_CONNECT << 16); |
1511 | goto done; |
1512 | |
1513 | } |
1514 | |
1515 | trace_scsi_dispatch_cmd_start(cmd); |
1516 | rtn = host->hostt->queuecommand(host, cmd); |
1517 | if (rtn) { |
1518 | atomic_dec(v: &cmd->device->iorequest_cnt); |
1519 | trace_scsi_dispatch_cmd_error(cmd, rtn); |
1520 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && |
1521 | rtn != SCSI_MLQUEUE_TARGET_BUSY) |
1522 | rtn = SCSI_MLQUEUE_HOST_BUSY; |
1523 | |
1524 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
1525 | "queuecommand : request rejected\n" )); |
1526 | } |
1527 | |
1528 | return rtn; |
1529 | done: |
1530 | scsi_done(cmd); |
1531 | return 0; |
1532 | } |
1533 | |
1534 | /* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ |
1535 | static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) |
1536 | { |
1537 | return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * |
1538 | sizeof(struct scatterlist); |
1539 | } |
1540 | |
1541 | static blk_status_t scsi_prepare_cmd(struct request *req) |
1542 | { |
1543 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1544 | struct scsi_device *sdev = req->q->queuedata; |
1545 | struct Scsi_Host *shost = sdev->host; |
1546 | bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
1547 | struct scatterlist *sg; |
1548 | |
1549 | scsi_init_command(dev: sdev, cmd); |
1550 | |
1551 | cmd->eh_eflags = 0; |
1552 | cmd->prot_type = 0; |
1553 | cmd->prot_flags = 0; |
1554 | cmd->submitter = 0; |
1555 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
1556 | cmd->underflow = 0; |
1557 | cmd->transfersize = 0; |
1558 | cmd->host_scribble = NULL; |
1559 | cmd->result = 0; |
1560 | cmd->extra_len = 0; |
1561 | cmd->state = 0; |
1562 | if (in_flight) |
1563 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
1564 | |
1565 | /* |
1566 | * Only clear the driver-private command data if the LLD does not supply |
1567 | * a function to initialize that data. |
1568 | */ |
1569 | if (!shost->hostt->init_cmd_priv) |
1570 | memset(cmd + 1, 0, shost->hostt->cmd_size); |
1571 | |
1572 | cmd->prot_op = SCSI_PROT_NORMAL; |
1573 | if (blk_rq_bytes(rq: req)) |
1574 | cmd->sc_data_direction = rq_dma_dir(req); |
1575 | else |
1576 | cmd->sc_data_direction = DMA_NONE; |
1577 | |
1578 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; |
1579 | cmd->sdb.table.sgl = sg; |
1580 | |
1581 | if (scsi_host_get_prot(shost)) { |
1582 | memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); |
1583 | |
1584 | cmd->prot_sdb->table.sgl = |
1585 | (struct scatterlist *)(cmd->prot_sdb + 1); |
1586 | } |
1587 | |
1588 | /* |
1589 | * Special handling for passthrough commands, which don't go to the ULP |
1590 | * at all: |
1591 | */ |
1592 | if (blk_rq_is_passthrough(rq: req)) |
1593 | return scsi_setup_scsi_cmnd(sdev, req); |
1594 | |
1595 | if (sdev->handler && sdev->handler->prep_fn) { |
1596 | blk_status_t ret = sdev->handler->prep_fn(sdev, req); |
1597 | |
1598 | if (ret != BLK_STS_OK) |
1599 | return ret; |
1600 | } |
1601 | |
1602 | /* Usually overridden by the ULP */ |
1603 | cmd->allowed = 0; |
1604 | memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); |
1605 | return scsi_cmd_to_driver(cmd)->init_command(cmd); |
1606 | } |
1607 | |
1608 | static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly) |
1609 | { |
1610 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
1611 | |
1612 | switch (cmd->submitter) { |
1613 | case SUBMITTED_BY_BLOCK_LAYER: |
1614 | break; |
1615 | case SUBMITTED_BY_SCSI_ERROR_HANDLER: |
1616 | return scsi_eh_done(scmd: cmd); |
1617 | case SUBMITTED_BY_SCSI_RESET_IOCTL: |
1618 | return; |
1619 | } |
1620 | |
1621 | if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q))) |
1622 | return; |
1623 | if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) |
1624 | return; |
1625 | trace_scsi_dispatch_cmd_done(cmd); |
1626 | |
1627 | if (complete_directly) |
1628 | blk_mq_complete_request_direct(rq: req, complete: scsi_complete); |
1629 | else |
1630 | blk_mq_complete_request(rq: req); |
1631 | } |
1632 | |
1633 | void scsi_done(struct scsi_cmnd *cmd) |
1634 | { |
1635 | scsi_done_internal(cmd, complete_directly: false); |
1636 | } |
1637 | EXPORT_SYMBOL(scsi_done); |
1638 | |
1639 | void scsi_done_direct(struct scsi_cmnd *cmd) |
1640 | { |
1641 | scsi_done_internal(cmd, complete_directly: true); |
1642 | } |
1643 | EXPORT_SYMBOL(scsi_done_direct); |
1644 | |
1645 | static void scsi_mq_put_budget(struct request_queue *q, int budget_token) |
1646 | { |
1647 | struct scsi_device *sdev = q->queuedata; |
1648 | |
1649 | sbitmap_put(sb: &sdev->budget_map, bitnr: budget_token); |
1650 | } |
1651 | |
1652 | /* |
1653 | * When to reinvoke queueing after a resource shortage. It's 3 msecs to |
1654 | * not change behaviour from the previous unplug mechanism, experimentation |
1655 | * may prove this needs changing. |
1656 | */ |
1657 | #define SCSI_QUEUE_DELAY 3 |
1658 | |
1659 | static int scsi_mq_get_budget(struct request_queue *q) |
1660 | { |
1661 | struct scsi_device *sdev = q->queuedata; |
1662 | int token = scsi_dev_queue_ready(q, sdev); |
1663 | |
1664 | if (token >= 0) |
1665 | return token; |
1666 | |
1667 | atomic_inc(v: &sdev->restarts); |
1668 | |
1669 | /* |
1670 | * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy). |
1671 | * .restarts must be incremented before .device_busy is read because the |
1672 | * code in scsi_run_queue_async() depends on the order of these operations. |
1673 | */ |
1674 | smp_mb__after_atomic(); |
1675 | |
1676 | /* |
1677 | * If all in-flight requests originated from this LUN are completed |
1678 | * before reading .device_busy, sdev->device_busy will be observed as |
1679 | * zero, then blk_mq_delay_run_hw_queues() will dispatch this request |
1680 | * soon. Otherwise, completion of one of these requests will observe |
1681 | * the .restarts flag, and the request queue will be run for handling |
1682 | * this request, see scsi_end_request(). |
1683 | */ |
1684 | if (unlikely(scsi_device_busy(sdev) == 0 && |
1685 | !scsi_device_blocked(sdev))) |
1686 | blk_mq_delay_run_hw_queues(q: sdev->request_queue, SCSI_QUEUE_DELAY); |
1687 | return -1; |
1688 | } |
1689 | |
1690 | static void scsi_mq_set_rq_budget_token(struct request *req, int token) |
1691 | { |
1692 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1693 | |
1694 | cmd->budget_token = token; |
1695 | } |
1696 | |
1697 | static int scsi_mq_get_rq_budget_token(struct request *req) |
1698 | { |
1699 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1700 | |
1701 | return cmd->budget_token; |
1702 | } |
1703 | |
1704 | static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, |
1705 | const struct blk_mq_queue_data *bd) |
1706 | { |
1707 | struct request *req = bd->rq; |
1708 | struct request_queue *q = req->q; |
1709 | struct scsi_device *sdev = q->queuedata; |
1710 | struct Scsi_Host *shost = sdev->host; |
1711 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1712 | blk_status_t ret; |
1713 | int reason; |
1714 | |
1715 | WARN_ON_ONCE(cmd->budget_token < 0); |
1716 | |
1717 | /* |
1718 | * If the device is not in running state we will reject some or all |
1719 | * commands. |
1720 | */ |
1721 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { |
1722 | ret = scsi_device_state_check(sdev, req); |
1723 | if (ret != BLK_STS_OK) |
1724 | goto out_put_budget; |
1725 | } |
1726 | |
1727 | ret = BLK_STS_RESOURCE; |
1728 | if (!scsi_target_queue_ready(shost, sdev)) |
1729 | goto out_put_budget; |
1730 | if (unlikely(scsi_host_in_recovery(shost))) { |
1731 | if (cmd->flags & SCMD_FAIL_IF_RECOVERING) |
1732 | ret = BLK_STS_OFFLINE; |
1733 | goto out_dec_target_busy; |
1734 | } |
1735 | if (!scsi_host_queue_ready(q, shost, sdev, cmd)) |
1736 | goto out_dec_target_busy; |
1737 | |
1738 | if (!(req->rq_flags & RQF_DONTPREP)) { |
1739 | ret = scsi_prepare_cmd(req); |
1740 | if (ret != BLK_STS_OK) |
1741 | goto out_dec_host_busy; |
1742 | req->rq_flags |= RQF_DONTPREP; |
1743 | } else { |
1744 | clear_bit(SCMD_STATE_COMPLETE, addr: &cmd->state); |
1745 | } |
1746 | |
1747 | cmd->flags &= SCMD_PRESERVED_FLAGS; |
1748 | if (sdev->simple_tags) |
1749 | cmd->flags |= SCMD_TAGGED; |
1750 | if (bd->last) |
1751 | cmd->flags |= SCMD_LAST; |
1752 | |
1753 | scsi_set_resid(cmd, resid: 0); |
1754 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
1755 | cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; |
1756 | |
1757 | blk_mq_start_request(rq: req); |
1758 | reason = scsi_dispatch_cmd(cmd); |
1759 | if (reason) { |
1760 | scsi_set_blocked(cmd, reason); |
1761 | ret = BLK_STS_RESOURCE; |
1762 | goto out_dec_host_busy; |
1763 | } |
1764 | |
1765 | return BLK_STS_OK; |
1766 | |
1767 | out_dec_host_busy: |
1768 | scsi_dec_host_busy(shost, cmd); |
1769 | out_dec_target_busy: |
1770 | if (scsi_target(sdev)->can_queue > 0) |
1771 | atomic_dec(v: &scsi_target(sdev)->target_busy); |
1772 | out_put_budget: |
1773 | scsi_mq_put_budget(q, budget_token: cmd->budget_token); |
1774 | cmd->budget_token = -1; |
1775 | switch (ret) { |
1776 | case BLK_STS_OK: |
1777 | break; |
1778 | case BLK_STS_RESOURCE: |
1779 | case BLK_STS_ZONE_RESOURCE: |
1780 | if (scsi_device_blocked(sdev)) |
1781 | ret = BLK_STS_DEV_RESOURCE; |
1782 | break; |
1783 | case BLK_STS_AGAIN: |
1784 | cmd->result = DID_BUS_BUSY << 16; |
1785 | if (req->rq_flags & RQF_DONTPREP) |
1786 | scsi_mq_uninit_cmd(cmd); |
1787 | break; |
1788 | default: |
1789 | if (unlikely(!scsi_device_online(sdev))) |
1790 | cmd->result = DID_NO_CONNECT << 16; |
1791 | else |
1792 | cmd->result = DID_ERROR << 16; |
1793 | /* |
1794 | * Make sure to release all allocated resources when |
1795 | * we hit an error, as we will never see this command |
1796 | * again. |
1797 | */ |
1798 | if (req->rq_flags & RQF_DONTPREP) |
1799 | scsi_mq_uninit_cmd(cmd); |
1800 | scsi_run_queue_async(sdev); |
1801 | break; |
1802 | } |
1803 | return ret; |
1804 | } |
1805 | |
1806 | static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, |
1807 | unsigned int hctx_idx, unsigned int numa_node) |
1808 | { |
1809 | struct Scsi_Host *shost = set->driver_data; |
1810 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1811 | struct scatterlist *sg; |
1812 | int ret = 0; |
1813 | |
1814 | cmd->sense_buffer = |
1815 | kmem_cache_alloc_node(s: scsi_sense_cache, GFP_KERNEL, node: numa_node); |
1816 | if (!cmd->sense_buffer) |
1817 | return -ENOMEM; |
1818 | |
1819 | if (scsi_host_get_prot(shost)) { |
1820 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + |
1821 | shost->hostt->cmd_size; |
1822 | cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); |
1823 | } |
1824 | |
1825 | if (shost->hostt->init_cmd_priv) { |
1826 | ret = shost->hostt->init_cmd_priv(shost, cmd); |
1827 | if (ret < 0) |
1828 | kmem_cache_free(s: scsi_sense_cache, objp: cmd->sense_buffer); |
1829 | } |
1830 | |
1831 | return ret; |
1832 | } |
1833 | |
1834 | static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, |
1835 | unsigned int hctx_idx) |
1836 | { |
1837 | struct Scsi_Host *shost = set->driver_data; |
1838 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1839 | |
1840 | if (shost->hostt->exit_cmd_priv) |
1841 | shost->hostt->exit_cmd_priv(shost, cmd); |
1842 | kmem_cache_free(s: scsi_sense_cache, objp: cmd->sense_buffer); |
1843 | } |
1844 | |
1845 | |
1846 | static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) |
1847 | { |
1848 | struct Scsi_Host *shost = hctx->driver_data; |
1849 | |
1850 | if (shost->hostt->mq_poll) |
1851 | return shost->hostt->mq_poll(shost, hctx->queue_num); |
1852 | |
1853 | return 0; |
1854 | } |
1855 | |
1856 | static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
1857 | unsigned int hctx_idx) |
1858 | { |
1859 | struct Scsi_Host *shost = data; |
1860 | |
1861 | hctx->driver_data = shost; |
1862 | return 0; |
1863 | } |
1864 | |
1865 | static void scsi_map_queues(struct blk_mq_tag_set *set) |
1866 | { |
1867 | struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); |
1868 | |
1869 | if (shost->hostt->map_queues) |
1870 | return shost->hostt->map_queues(shost); |
1871 | blk_mq_map_queues(qmap: &set->map[HCTX_TYPE_DEFAULT]); |
1872 | } |
1873 | |
1874 | void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) |
1875 | { |
1876 | struct device *dev = shost->dma_dev; |
1877 | |
1878 | /* |
1879 | * this limit is imposed by hardware restrictions |
1880 | */ |
1881 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, |
1882 | SG_MAX_SEGMENTS)); |
1883 | |
1884 | if (scsi_host_prot_dma(shost)) { |
1885 | shost->sg_prot_tablesize = |
1886 | min_not_zero(shost->sg_prot_tablesize, |
1887 | (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); |
1888 | BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); |
1889 | blk_queue_max_integrity_segments(q, segs: shost->sg_prot_tablesize); |
1890 | } |
1891 | |
1892 | blk_queue_max_hw_sectors(q, shost->max_sectors); |
1893 | blk_queue_segment_boundary(q, shost->dma_boundary); |
1894 | dma_set_seg_boundary(dev, mask: shost->dma_boundary); |
1895 | |
1896 | blk_queue_max_segment_size(q, shost->max_segment_size); |
1897 | blk_queue_virt_boundary(q, shost->virt_boundary_mask); |
1898 | dma_set_max_seg_size(dev, size: queue_max_segment_size(q)); |
1899 | |
1900 | /* |
1901 | * Set a reasonable default alignment: The larger of 32-byte (dword), |
1902 | * which is a common minimum for HBAs, and the minimum DMA alignment, |
1903 | * which is set by the platform. |
1904 | * |
1905 | * Devices that require a bigger alignment can increase it later. |
1906 | */ |
1907 | blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); |
1908 | } |
1909 | EXPORT_SYMBOL_GPL(__scsi_init_queue); |
1910 | |
1911 | static const struct blk_mq_ops scsi_mq_ops_no_commit = { |
1912 | .get_budget = scsi_mq_get_budget, |
1913 | .put_budget = scsi_mq_put_budget, |
1914 | .queue_rq = scsi_queue_rq, |
1915 | .complete = scsi_complete, |
1916 | .timeout = scsi_timeout, |
1917 | #ifdef CONFIG_BLK_DEBUG_FS |
1918 | .show_rq = scsi_show_rq, |
1919 | #endif |
1920 | .init_request = scsi_mq_init_request, |
1921 | .exit_request = scsi_mq_exit_request, |
1922 | .cleanup_rq = scsi_cleanup_rq, |
1923 | .busy = scsi_mq_lld_busy, |
1924 | .map_queues = scsi_map_queues, |
1925 | .init_hctx = scsi_init_hctx, |
1926 | .poll = scsi_mq_poll, |
1927 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, |
1928 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, |
1929 | }; |
1930 | |
1931 | |
1932 | static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) |
1933 | { |
1934 | struct Scsi_Host *shost = hctx->driver_data; |
1935 | |
1936 | shost->hostt->commit_rqs(shost, hctx->queue_num); |
1937 | } |
1938 | |
1939 | static const struct blk_mq_ops scsi_mq_ops = { |
1940 | .get_budget = scsi_mq_get_budget, |
1941 | .put_budget = scsi_mq_put_budget, |
1942 | .queue_rq = scsi_queue_rq, |
1943 | .commit_rqs = scsi_commit_rqs, |
1944 | .complete = scsi_complete, |
1945 | .timeout = scsi_timeout, |
1946 | #ifdef CONFIG_BLK_DEBUG_FS |
1947 | .show_rq = scsi_show_rq, |
1948 | #endif |
1949 | .init_request = scsi_mq_init_request, |
1950 | .exit_request = scsi_mq_exit_request, |
1951 | .cleanup_rq = scsi_cleanup_rq, |
1952 | .busy = scsi_mq_lld_busy, |
1953 | .map_queues = scsi_map_queues, |
1954 | .init_hctx = scsi_init_hctx, |
1955 | .poll = scsi_mq_poll, |
1956 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, |
1957 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, |
1958 | }; |
1959 | |
1960 | int scsi_mq_setup_tags(struct Scsi_Host *shost) |
1961 | { |
1962 | unsigned int cmd_size, sgl_size; |
1963 | struct blk_mq_tag_set *tag_set = &shost->tag_set; |
1964 | |
1965 | sgl_size = max_t(unsigned int, sizeof(struct scatterlist), |
1966 | scsi_mq_inline_sgl_size(shost)); |
1967 | cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; |
1968 | if (scsi_host_get_prot(shost)) |
1969 | cmd_size += sizeof(struct scsi_data_buffer) + |
1970 | sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; |
1971 | |
1972 | memset(tag_set, 0, sizeof(*tag_set)); |
1973 | if (shost->hostt->commit_rqs) |
1974 | tag_set->ops = &scsi_mq_ops; |
1975 | else |
1976 | tag_set->ops = &scsi_mq_ops_no_commit; |
1977 | tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; |
1978 | tag_set->nr_maps = shost->nr_maps ? : 1; |
1979 | tag_set->queue_depth = shost->can_queue; |
1980 | tag_set->cmd_size = cmd_size; |
1981 | tag_set->numa_node = dev_to_node(dev: shost->dma_dev); |
1982 | tag_set->flags = BLK_MQ_F_SHOULD_MERGE; |
1983 | tag_set->flags |= |
1984 | BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); |
1985 | if (shost->queuecommand_may_block) |
1986 | tag_set->flags |= BLK_MQ_F_BLOCKING; |
1987 | tag_set->driver_data = shost; |
1988 | if (shost->host_tagset) |
1989 | tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; |
1990 | |
1991 | return blk_mq_alloc_tag_set(set: tag_set); |
1992 | } |
1993 | |
1994 | void scsi_mq_free_tags(struct kref *kref) |
1995 | { |
1996 | struct Scsi_Host *shost = container_of(kref, typeof(*shost), |
1997 | tagset_refcnt); |
1998 | |
1999 | blk_mq_free_tag_set(set: &shost->tag_set); |
2000 | complete(&shost->tagset_freed); |
2001 | } |
2002 | |
2003 | /** |
2004 | * scsi_device_from_queue - return sdev associated with a request_queue |
2005 | * @q: The request queue to return the sdev from |
2006 | * |
2007 | * Return the sdev associated with a request queue or NULL if the |
2008 | * request_queue does not reference a SCSI device. |
2009 | */ |
2010 | struct scsi_device *scsi_device_from_queue(struct request_queue *q) |
2011 | { |
2012 | struct scsi_device *sdev = NULL; |
2013 | |
2014 | if (q->mq_ops == &scsi_mq_ops_no_commit || |
2015 | q->mq_ops == &scsi_mq_ops) |
2016 | sdev = q->queuedata; |
2017 | if (!sdev || !get_device(dev: &sdev->sdev_gendev)) |
2018 | sdev = NULL; |
2019 | |
2020 | return sdev; |
2021 | } |
2022 | /* |
2023 | * pktcdvd should have been integrated into the SCSI layers, but for historical |
2024 | * reasons like the old IDE driver it isn't. This export allows it to safely |
2025 | * probe if a given device is a SCSI one and only attach to that. |
2026 | */ |
2027 | #ifdef CONFIG_CDROM_PKTCDVD_MODULE |
2028 | EXPORT_SYMBOL_GPL(scsi_device_from_queue); |
2029 | #endif |
2030 | |
2031 | /** |
2032 | * scsi_block_requests - Utility function used by low-level drivers to prevent |
2033 | * further commands from being queued to the device. |
2034 | * @shost: host in question |
2035 | * |
2036 | * There is no timer nor any other means by which the requests get unblocked |
2037 | * other than the low-level driver calling scsi_unblock_requests(). |
2038 | */ |
2039 | void scsi_block_requests(struct Scsi_Host *shost) |
2040 | { |
2041 | shost->host_self_blocked = 1; |
2042 | } |
2043 | EXPORT_SYMBOL(scsi_block_requests); |
2044 | |
2045 | /** |
2046 | * scsi_unblock_requests - Utility function used by low-level drivers to allow |
2047 | * further commands to be queued to the device. |
2048 | * @shost: host in question |
2049 | * |
2050 | * There is no timer nor any other means by which the requests get unblocked |
2051 | * other than the low-level driver calling scsi_unblock_requests(). This is done |
2052 | * as an API function so that changes to the internals of the scsi mid-layer |
2053 | * won't require wholesale changes to drivers that use this feature. |
2054 | */ |
2055 | void scsi_unblock_requests(struct Scsi_Host *shost) |
2056 | { |
2057 | shost->host_self_blocked = 0; |
2058 | scsi_run_host_queues(shost); |
2059 | } |
2060 | EXPORT_SYMBOL(scsi_unblock_requests); |
2061 | |
2062 | void scsi_exit_queue(void) |
2063 | { |
2064 | kmem_cache_destroy(s: scsi_sense_cache); |
2065 | } |
2066 | |
2067 | /** |
2068 | * scsi_mode_select - issue a mode select |
2069 | * @sdev: SCSI device to be queried |
2070 | * @pf: Page format bit (1 == standard, 0 == vendor specific) |
2071 | * @sp: Save page bit (0 == don't save, 1 == save) |
2072 | * @buffer: request buffer (may not be smaller than eight bytes) |
2073 | * @len: length of request buffer. |
2074 | * @timeout: command timeout |
2075 | * @retries: number of retries before failing |
2076 | * @data: returns a structure abstracting the mode header data |
2077 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
2078 | * must be SCSI_SENSE_BUFFERSIZE big. |
2079 | * |
2080 | * Returns zero if successful; negative error number or scsi |
2081 | * status on error |
2082 | * |
2083 | */ |
2084 | int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, |
2085 | unsigned char *buffer, int len, int timeout, int retries, |
2086 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) |
2087 | { |
2088 | unsigned char cmd[10]; |
2089 | unsigned char *real_buffer; |
2090 | const struct scsi_exec_args exec_args = { |
2091 | .sshdr = sshdr, |
2092 | }; |
2093 | int ret; |
2094 | |
2095 | memset(cmd, 0, sizeof(cmd)); |
2096 | cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); |
2097 | |
2098 | /* |
2099 | * Use MODE SELECT(10) if the device asked for it or if the mode page |
2100 | * and the mode select header cannot fit within the maximumm 255 bytes |
2101 | * of the MODE SELECT(6) command. |
2102 | */ |
2103 | if (sdev->use_10_for_ms || |
2104 | len + 4 > 255 || |
2105 | data->block_descriptor_length > 255) { |
2106 | if (len > 65535 - 8) |
2107 | return -EINVAL; |
2108 | real_buffer = kmalloc(size: 8 + len, GFP_KERNEL); |
2109 | if (!real_buffer) |
2110 | return -ENOMEM; |
2111 | memcpy(real_buffer + 8, buffer, len); |
2112 | len += 8; |
2113 | real_buffer[0] = 0; |
2114 | real_buffer[1] = 0; |
2115 | real_buffer[2] = data->medium_type; |
2116 | real_buffer[3] = data->device_specific; |
2117 | real_buffer[4] = data->longlba ? 0x01 : 0; |
2118 | real_buffer[5] = 0; |
2119 | put_unaligned_be16(val: data->block_descriptor_length, |
2120 | p: &real_buffer[6]); |
2121 | |
2122 | cmd[0] = MODE_SELECT_10; |
2123 | put_unaligned_be16(val: len, p: &cmd[7]); |
2124 | } else { |
2125 | if (data->longlba) |
2126 | return -EINVAL; |
2127 | |
2128 | real_buffer = kmalloc(size: 4 + len, GFP_KERNEL); |
2129 | if (!real_buffer) |
2130 | return -ENOMEM; |
2131 | memcpy(real_buffer + 4, buffer, len); |
2132 | len += 4; |
2133 | real_buffer[0] = 0; |
2134 | real_buffer[1] = data->medium_type; |
2135 | real_buffer[2] = data->device_specific; |
2136 | real_buffer[3] = data->block_descriptor_length; |
2137 | |
2138 | cmd[0] = MODE_SELECT; |
2139 | cmd[4] = len; |
2140 | } |
2141 | |
2142 | ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len, |
2143 | timeout, retries, &exec_args); |
2144 | kfree(objp: real_buffer); |
2145 | return ret; |
2146 | } |
2147 | EXPORT_SYMBOL_GPL(scsi_mode_select); |
2148 | |
2149 | /** |
2150 | * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. |
2151 | * @sdev: SCSI device to be queried |
2152 | * @dbd: set to prevent mode sense from returning block descriptors |
2153 | * @modepage: mode page being requested |
2154 | * @subpage: sub-page of the mode page being requested |
2155 | * @buffer: request buffer (may not be smaller than eight bytes) |
2156 | * @len: length of request buffer. |
2157 | * @timeout: command timeout |
2158 | * @retries: number of retries before failing |
2159 | * @data: returns a structure abstracting the mode header data |
2160 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
2161 | * must be SCSI_SENSE_BUFFERSIZE big. |
2162 | * |
2163 | * Returns zero if successful, or a negative error number on failure |
2164 | */ |
2165 | int |
2166 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, |
2167 | unsigned char *buffer, int len, int timeout, int retries, |
2168 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) |
2169 | { |
2170 | unsigned char cmd[12]; |
2171 | int use_10_for_ms; |
2172 | int ; |
2173 | int result, retry_count = retries; |
2174 | struct scsi_sense_hdr my_sshdr; |
2175 | const struct scsi_exec_args exec_args = { |
2176 | /* caller might not be interested in sense, but we need it */ |
2177 | .sshdr = sshdr ? : &my_sshdr, |
2178 | }; |
2179 | |
2180 | memset(data, 0, sizeof(*data)); |
2181 | memset(&cmd[0], 0, 12); |
2182 | |
2183 | dbd = sdev->set_dbd_for_ms ? 8 : dbd; |
2184 | cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ |
2185 | cmd[2] = modepage; |
2186 | cmd[3] = subpage; |
2187 | |
2188 | sshdr = exec_args.sshdr; |
2189 | |
2190 | retry: |
2191 | use_10_for_ms = sdev->use_10_for_ms || len > 255; |
2192 | |
2193 | if (use_10_for_ms) { |
2194 | if (len < 8 || len > 65535) |
2195 | return -EINVAL; |
2196 | |
2197 | cmd[0] = MODE_SENSE_10; |
2198 | put_unaligned_be16(val: len, p: &cmd[7]); |
2199 | header_length = 8; |
2200 | } else { |
2201 | if (len < 4) |
2202 | return -EINVAL; |
2203 | |
2204 | cmd[0] = MODE_SENSE; |
2205 | cmd[4] = len; |
2206 | header_length = 4; |
2207 | } |
2208 | |
2209 | memset(buffer, 0, len); |
2210 | |
2211 | result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, |
2212 | timeout, retries, &exec_args); |
2213 | if (result < 0) |
2214 | return result; |
2215 | |
2216 | /* This code looks awful: what it's doing is making sure an |
2217 | * ILLEGAL REQUEST sense return identifies the actual command |
2218 | * byte as the problem. MODE_SENSE commands can return |
2219 | * ILLEGAL REQUEST if the code page isn't supported */ |
2220 | |
2221 | if (!scsi_status_is_good(status: result)) { |
2222 | if (scsi_sense_valid(sshdr)) { |
2223 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && |
2224 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { |
2225 | /* |
2226 | * Invalid command operation code: retry using |
2227 | * MODE SENSE(6) if this was a MODE SENSE(10) |
2228 | * request, except if the request mode page is |
2229 | * too large for MODE SENSE single byte |
2230 | * allocation length field. |
2231 | */ |
2232 | if (use_10_for_ms) { |
2233 | if (len > 255) |
2234 | return -EIO; |
2235 | sdev->use_10_for_ms = 0; |
2236 | goto retry; |
2237 | } |
2238 | } |
2239 | if (scsi_status_is_check_condition(status: result) && |
2240 | sshdr->sense_key == UNIT_ATTENTION && |
2241 | retry_count) { |
2242 | retry_count--; |
2243 | goto retry; |
2244 | } |
2245 | } |
2246 | return -EIO; |
2247 | } |
2248 | if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && |
2249 | (modepage == 6 || modepage == 8))) { |
2250 | /* Initio breakage? */ |
2251 | header_length = 0; |
2252 | data->length = 13; |
2253 | data->medium_type = 0; |
2254 | data->device_specific = 0; |
2255 | data->longlba = 0; |
2256 | data->block_descriptor_length = 0; |
2257 | } else if (use_10_for_ms) { |
2258 | data->length = get_unaligned_be16(p: &buffer[0]) + 2; |
2259 | data->medium_type = buffer[2]; |
2260 | data->device_specific = buffer[3]; |
2261 | data->longlba = buffer[4] & 0x01; |
2262 | data->block_descriptor_length = get_unaligned_be16(p: &buffer[6]); |
2263 | } else { |
2264 | data->length = buffer[0] + 1; |
2265 | data->medium_type = buffer[1]; |
2266 | data->device_specific = buffer[2]; |
2267 | data->block_descriptor_length = buffer[3]; |
2268 | } |
2269 | data->header_length = header_length; |
2270 | |
2271 | return 0; |
2272 | } |
2273 | EXPORT_SYMBOL(scsi_mode_sense); |
2274 | |
2275 | /** |
2276 | * scsi_test_unit_ready - test if unit is ready |
2277 | * @sdev: scsi device to change the state of. |
2278 | * @timeout: command timeout |
2279 | * @retries: number of retries before failing |
2280 | * @sshdr: outpout pointer for decoded sense information. |
2281 | * |
2282 | * Returns zero if unsuccessful or an error if TUR failed. For |
2283 | * removable media, UNIT_ATTENTION sets ->changed flag. |
2284 | **/ |
2285 | int |
2286 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, |
2287 | struct scsi_sense_hdr *sshdr) |
2288 | { |
2289 | char cmd[] = { |
2290 | TEST_UNIT_READY, 0, 0, 0, 0, 0, |
2291 | }; |
2292 | const struct scsi_exec_args exec_args = { |
2293 | .sshdr = sshdr, |
2294 | }; |
2295 | int result; |
2296 | |
2297 | /* try to eat the UNIT_ATTENTION if there are enough retries */ |
2298 | do { |
2299 | result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, |
2300 | timeout, 1, &exec_args); |
2301 | if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) && |
2302 | sshdr->sense_key == UNIT_ATTENTION) |
2303 | sdev->changed = 1; |
2304 | } while (result > 0 && scsi_sense_valid(sshdr) && |
2305 | sshdr->sense_key == UNIT_ATTENTION && --retries); |
2306 | |
2307 | return result; |
2308 | } |
2309 | EXPORT_SYMBOL(scsi_test_unit_ready); |
2310 | |
2311 | /** |
2312 | * scsi_device_set_state - Take the given device through the device state model. |
2313 | * @sdev: scsi device to change the state of. |
2314 | * @state: state to change to. |
2315 | * |
2316 | * Returns zero if successful or an error if the requested |
2317 | * transition is illegal. |
2318 | */ |
2319 | int |
2320 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) |
2321 | { |
2322 | enum scsi_device_state oldstate = sdev->sdev_state; |
2323 | |
2324 | if (state == oldstate) |
2325 | return 0; |
2326 | |
2327 | switch (state) { |
2328 | case SDEV_CREATED: |
2329 | switch (oldstate) { |
2330 | case SDEV_CREATED_BLOCK: |
2331 | break; |
2332 | default: |
2333 | goto illegal; |
2334 | } |
2335 | break; |
2336 | |
2337 | case SDEV_RUNNING: |
2338 | switch (oldstate) { |
2339 | case SDEV_CREATED: |
2340 | case SDEV_OFFLINE: |
2341 | case SDEV_TRANSPORT_OFFLINE: |
2342 | case SDEV_QUIESCE: |
2343 | case SDEV_BLOCK: |
2344 | break; |
2345 | default: |
2346 | goto illegal; |
2347 | } |
2348 | break; |
2349 | |
2350 | case SDEV_QUIESCE: |
2351 | switch (oldstate) { |
2352 | case SDEV_RUNNING: |
2353 | case SDEV_OFFLINE: |
2354 | case SDEV_TRANSPORT_OFFLINE: |
2355 | break; |
2356 | default: |
2357 | goto illegal; |
2358 | } |
2359 | break; |
2360 | |
2361 | case SDEV_OFFLINE: |
2362 | case SDEV_TRANSPORT_OFFLINE: |
2363 | switch (oldstate) { |
2364 | case SDEV_CREATED: |
2365 | case SDEV_RUNNING: |
2366 | case SDEV_QUIESCE: |
2367 | case SDEV_BLOCK: |
2368 | break; |
2369 | default: |
2370 | goto illegal; |
2371 | } |
2372 | break; |
2373 | |
2374 | case SDEV_BLOCK: |
2375 | switch (oldstate) { |
2376 | case SDEV_RUNNING: |
2377 | case SDEV_CREATED_BLOCK: |
2378 | case SDEV_QUIESCE: |
2379 | case SDEV_OFFLINE: |
2380 | break; |
2381 | default: |
2382 | goto illegal; |
2383 | } |
2384 | break; |
2385 | |
2386 | case SDEV_CREATED_BLOCK: |
2387 | switch (oldstate) { |
2388 | case SDEV_CREATED: |
2389 | break; |
2390 | default: |
2391 | goto illegal; |
2392 | } |
2393 | break; |
2394 | |
2395 | case SDEV_CANCEL: |
2396 | switch (oldstate) { |
2397 | case SDEV_CREATED: |
2398 | case SDEV_RUNNING: |
2399 | case SDEV_QUIESCE: |
2400 | case SDEV_OFFLINE: |
2401 | case SDEV_TRANSPORT_OFFLINE: |
2402 | break; |
2403 | default: |
2404 | goto illegal; |
2405 | } |
2406 | break; |
2407 | |
2408 | case SDEV_DEL: |
2409 | switch (oldstate) { |
2410 | case SDEV_CREATED: |
2411 | case SDEV_RUNNING: |
2412 | case SDEV_OFFLINE: |
2413 | case SDEV_TRANSPORT_OFFLINE: |
2414 | case SDEV_CANCEL: |
2415 | case SDEV_BLOCK: |
2416 | case SDEV_CREATED_BLOCK: |
2417 | break; |
2418 | default: |
2419 | goto illegal; |
2420 | } |
2421 | break; |
2422 | |
2423 | } |
2424 | sdev->offline_already = false; |
2425 | sdev->sdev_state = state; |
2426 | return 0; |
2427 | |
2428 | illegal: |
2429 | SCSI_LOG_ERROR_RECOVERY(1, |
2430 | sdev_printk(KERN_ERR, sdev, |
2431 | "Illegal state transition %s->%s" , |
2432 | scsi_device_state_name(oldstate), |
2433 | scsi_device_state_name(state)) |
2434 | ); |
2435 | return -EINVAL; |
2436 | } |
2437 | EXPORT_SYMBOL(scsi_device_set_state); |
2438 | |
2439 | /** |
2440 | * scsi_evt_emit - emit a single SCSI device uevent |
2441 | * @sdev: associated SCSI device |
2442 | * @evt: event to emit |
2443 | * |
2444 | * Send a single uevent (scsi_event) to the associated scsi_device. |
2445 | */ |
2446 | static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) |
2447 | { |
2448 | int idx = 0; |
2449 | char *envp[3]; |
2450 | |
2451 | switch (evt->evt_type) { |
2452 | case SDEV_EVT_MEDIA_CHANGE: |
2453 | envp[idx++] = "SDEV_MEDIA_CHANGE=1" ; |
2454 | break; |
2455 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: |
2456 | scsi_rescan_device(sdev); |
2457 | envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED" ; |
2458 | break; |
2459 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: |
2460 | envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED" ; |
2461 | break; |
2462 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: |
2463 | envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED" ; |
2464 | break; |
2465 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: |
2466 | envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED" ; |
2467 | break; |
2468 | case SDEV_EVT_LUN_CHANGE_REPORTED: |
2469 | envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED" ; |
2470 | break; |
2471 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: |
2472 | envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED" ; |
2473 | break; |
2474 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: |
2475 | envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED" ; |
2476 | break; |
2477 | default: |
2478 | /* do nothing */ |
2479 | break; |
2480 | } |
2481 | |
2482 | envp[idx++] = NULL; |
2483 | |
2484 | kobject_uevent_env(kobj: &sdev->sdev_gendev.kobj, action: KOBJ_CHANGE, envp); |
2485 | } |
2486 | |
2487 | /** |
2488 | * scsi_evt_thread - send a uevent for each scsi event |
2489 | * @work: work struct for scsi_device |
2490 | * |
2491 | * Dispatch queued events to their associated scsi_device kobjects |
2492 | * as uevents. |
2493 | */ |
2494 | void scsi_evt_thread(struct work_struct *work) |
2495 | { |
2496 | struct scsi_device *sdev; |
2497 | enum scsi_device_event evt_type; |
2498 | LIST_HEAD(event_list); |
2499 | |
2500 | sdev = container_of(work, struct scsi_device, event_work); |
2501 | |
2502 | for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) |
2503 | if (test_and_clear_bit(nr: evt_type, addr: sdev->pending_events)) |
2504 | sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); |
2505 | |
2506 | while (1) { |
2507 | struct scsi_event *evt; |
2508 | struct list_head *this, *tmp; |
2509 | unsigned long flags; |
2510 | |
2511 | spin_lock_irqsave(&sdev->list_lock, flags); |
2512 | list_splice_init(list: &sdev->event_list, head: &event_list); |
2513 | spin_unlock_irqrestore(lock: &sdev->list_lock, flags); |
2514 | |
2515 | if (list_empty(head: &event_list)) |
2516 | break; |
2517 | |
2518 | list_for_each_safe(this, tmp, &event_list) { |
2519 | evt = list_entry(this, struct scsi_event, node); |
2520 | list_del(entry: &evt->node); |
2521 | scsi_evt_emit(sdev, evt); |
2522 | kfree(objp: evt); |
2523 | } |
2524 | } |
2525 | } |
2526 | |
2527 | /** |
2528 | * sdev_evt_send - send asserted event to uevent thread |
2529 | * @sdev: scsi_device event occurred on |
2530 | * @evt: event to send |
2531 | * |
2532 | * Assert scsi device event asynchronously. |
2533 | */ |
2534 | void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) |
2535 | { |
2536 | unsigned long flags; |
2537 | |
2538 | #if 0 |
2539 | /* FIXME: currently this check eliminates all media change events |
2540 | * for polled devices. Need to update to discriminate between AN |
2541 | * and polled events */ |
2542 | if (!test_bit(evt->evt_type, sdev->supported_events)) { |
2543 | kfree(evt); |
2544 | return; |
2545 | } |
2546 | #endif |
2547 | |
2548 | spin_lock_irqsave(&sdev->list_lock, flags); |
2549 | list_add_tail(new: &evt->node, head: &sdev->event_list); |
2550 | schedule_work(work: &sdev->event_work); |
2551 | spin_unlock_irqrestore(lock: &sdev->list_lock, flags); |
2552 | } |
2553 | EXPORT_SYMBOL_GPL(sdev_evt_send); |
2554 | |
2555 | /** |
2556 | * sdev_evt_alloc - allocate a new scsi event |
2557 | * @evt_type: type of event to allocate |
2558 | * @gfpflags: GFP flags for allocation |
2559 | * |
2560 | * Allocates and returns a new scsi_event. |
2561 | */ |
2562 | struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, |
2563 | gfp_t gfpflags) |
2564 | { |
2565 | struct scsi_event *evt = kzalloc(size: sizeof(struct scsi_event), flags: gfpflags); |
2566 | if (!evt) |
2567 | return NULL; |
2568 | |
2569 | evt->evt_type = evt_type; |
2570 | INIT_LIST_HEAD(list: &evt->node); |
2571 | |
2572 | /* evt_type-specific initialization, if any */ |
2573 | switch (evt_type) { |
2574 | case SDEV_EVT_MEDIA_CHANGE: |
2575 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: |
2576 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: |
2577 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: |
2578 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: |
2579 | case SDEV_EVT_LUN_CHANGE_REPORTED: |
2580 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: |
2581 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: |
2582 | default: |
2583 | /* do nothing */ |
2584 | break; |
2585 | } |
2586 | |
2587 | return evt; |
2588 | } |
2589 | EXPORT_SYMBOL_GPL(sdev_evt_alloc); |
2590 | |
2591 | /** |
2592 | * sdev_evt_send_simple - send asserted event to uevent thread |
2593 | * @sdev: scsi_device event occurred on |
2594 | * @evt_type: type of event to send |
2595 | * @gfpflags: GFP flags for allocation |
2596 | * |
2597 | * Assert scsi device event asynchronously, given an event type. |
2598 | */ |
2599 | void sdev_evt_send_simple(struct scsi_device *sdev, |
2600 | enum scsi_device_event evt_type, gfp_t gfpflags) |
2601 | { |
2602 | struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); |
2603 | if (!evt) { |
2604 | sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n" , |
2605 | evt_type); |
2606 | return; |
2607 | } |
2608 | |
2609 | sdev_evt_send(sdev, evt); |
2610 | } |
2611 | EXPORT_SYMBOL_GPL(sdev_evt_send_simple); |
2612 | |
2613 | /** |
2614 | * scsi_device_quiesce - Block all commands except power management. |
2615 | * @sdev: scsi device to quiesce. |
2616 | * |
2617 | * This works by trying to transition to the SDEV_QUIESCE state |
2618 | * (which must be a legal transition). When the device is in this |
2619 | * state, only power management requests will be accepted, all others will |
2620 | * be deferred. |
2621 | * |
2622 | * Must be called with user context, may sleep. |
2623 | * |
2624 | * Returns zero if unsuccessful or an error if not. |
2625 | */ |
2626 | int |
2627 | scsi_device_quiesce(struct scsi_device *sdev) |
2628 | { |
2629 | struct request_queue *q = sdev->request_queue; |
2630 | int err; |
2631 | |
2632 | /* |
2633 | * It is allowed to call scsi_device_quiesce() multiple times from |
2634 | * the same context but concurrent scsi_device_quiesce() calls are |
2635 | * not allowed. |
2636 | */ |
2637 | WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); |
2638 | |
2639 | if (sdev->quiesced_by == current) |
2640 | return 0; |
2641 | |
2642 | blk_set_pm_only(q); |
2643 | |
2644 | blk_mq_freeze_queue(q); |
2645 | /* |
2646 | * Ensure that the effect of blk_set_pm_only() will be visible |
2647 | * for percpu_ref_tryget() callers that occur after the queue |
2648 | * unfreeze even if the queue was already frozen before this function |
2649 | * was called. See also https://lwn.net/Articles/573497/. |
2650 | */ |
2651 | synchronize_rcu(); |
2652 | blk_mq_unfreeze_queue(q); |
2653 | |
2654 | mutex_lock(&sdev->state_mutex); |
2655 | err = scsi_device_set_state(sdev, SDEV_QUIESCE); |
2656 | if (err == 0) |
2657 | sdev->quiesced_by = current; |
2658 | else |
2659 | blk_clear_pm_only(q); |
2660 | mutex_unlock(lock: &sdev->state_mutex); |
2661 | |
2662 | return err; |
2663 | } |
2664 | EXPORT_SYMBOL(scsi_device_quiesce); |
2665 | |
2666 | /** |
2667 | * scsi_device_resume - Restart user issued commands to a quiesced device. |
2668 | * @sdev: scsi device to resume. |
2669 | * |
2670 | * Moves the device from quiesced back to running and restarts the |
2671 | * queues. |
2672 | * |
2673 | * Must be called with user context, may sleep. |
2674 | */ |
2675 | void scsi_device_resume(struct scsi_device *sdev) |
2676 | { |
2677 | /* check if the device state was mutated prior to resume, and if |
2678 | * so assume the state is being managed elsewhere (for example |
2679 | * device deleted during suspend) |
2680 | */ |
2681 | mutex_lock(&sdev->state_mutex); |
2682 | if (sdev->sdev_state == SDEV_QUIESCE) |
2683 | scsi_device_set_state(sdev, SDEV_RUNNING); |
2684 | if (sdev->quiesced_by) { |
2685 | sdev->quiesced_by = NULL; |
2686 | blk_clear_pm_only(q: sdev->request_queue); |
2687 | } |
2688 | mutex_unlock(lock: &sdev->state_mutex); |
2689 | } |
2690 | EXPORT_SYMBOL(scsi_device_resume); |
2691 | |
2692 | static void |
2693 | device_quiesce_fn(struct scsi_device *sdev, void *data) |
2694 | { |
2695 | scsi_device_quiesce(sdev); |
2696 | } |
2697 | |
2698 | void |
2699 | scsi_target_quiesce(struct scsi_target *starget) |
2700 | { |
2701 | starget_for_each_device(starget, NULL, fn: device_quiesce_fn); |
2702 | } |
2703 | EXPORT_SYMBOL(scsi_target_quiesce); |
2704 | |
2705 | static void |
2706 | device_resume_fn(struct scsi_device *sdev, void *data) |
2707 | { |
2708 | scsi_device_resume(sdev); |
2709 | } |
2710 | |
2711 | void |
2712 | scsi_target_resume(struct scsi_target *starget) |
2713 | { |
2714 | starget_for_each_device(starget, NULL, fn: device_resume_fn); |
2715 | } |
2716 | EXPORT_SYMBOL(scsi_target_resume); |
2717 | |
2718 | static int __scsi_internal_device_block_nowait(struct scsi_device *sdev) |
2719 | { |
2720 | if (scsi_device_set_state(sdev, SDEV_BLOCK)) |
2721 | return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); |
2722 | |
2723 | return 0; |
2724 | } |
2725 | |
2726 | void scsi_start_queue(struct scsi_device *sdev) |
2727 | { |
2728 | if (cmpxchg(&sdev->queue_stopped, 1, 0)) |
2729 | blk_mq_unquiesce_queue(q: sdev->request_queue); |
2730 | } |
2731 | |
2732 | static void scsi_stop_queue(struct scsi_device *sdev) |
2733 | { |
2734 | /* |
2735 | * The atomic variable of ->queue_stopped covers that |
2736 | * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. |
2737 | * |
2738 | * The caller needs to wait until quiesce is done. |
2739 | */ |
2740 | if (!cmpxchg(&sdev->queue_stopped, 0, 1)) |
2741 | blk_mq_quiesce_queue_nowait(q: sdev->request_queue); |
2742 | } |
2743 | |
2744 | /** |
2745 | * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state |
2746 | * @sdev: device to block |
2747 | * |
2748 | * Pause SCSI command processing on the specified device. Does not sleep. |
2749 | * |
2750 | * Returns zero if successful or a negative error code upon failure. |
2751 | * |
2752 | * Notes: |
2753 | * This routine transitions the device to the SDEV_BLOCK state (which must be |
2754 | * a legal transition). When the device is in this state, command processing |
2755 | * is paused until the device leaves the SDEV_BLOCK state. See also |
2756 | * scsi_internal_device_unblock_nowait(). |
2757 | */ |
2758 | int scsi_internal_device_block_nowait(struct scsi_device *sdev) |
2759 | { |
2760 | int ret = __scsi_internal_device_block_nowait(sdev); |
2761 | |
2762 | /* |
2763 | * The device has transitioned to SDEV_BLOCK. Stop the |
2764 | * block layer from calling the midlayer with this device's |
2765 | * request queue. |
2766 | */ |
2767 | if (!ret) |
2768 | scsi_stop_queue(sdev); |
2769 | return ret; |
2770 | } |
2771 | EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); |
2772 | |
2773 | /** |
2774 | * scsi_device_block - try to transition to the SDEV_BLOCK state |
2775 | * @sdev: device to block |
2776 | * @data: dummy argument, ignored |
2777 | * |
2778 | * Pause SCSI command processing on the specified device. Callers must wait |
2779 | * until all ongoing scsi_queue_rq() calls have finished after this function |
2780 | * returns. |
2781 | * |
2782 | * Note: |
2783 | * This routine transitions the device to the SDEV_BLOCK state (which must be |
2784 | * a legal transition). When the device is in this state, command processing |
2785 | * is paused until the device leaves the SDEV_BLOCK state. See also |
2786 | * scsi_internal_device_unblock(). |
2787 | */ |
2788 | static void scsi_device_block(struct scsi_device *sdev, void *data) |
2789 | { |
2790 | int err; |
2791 | enum scsi_device_state state; |
2792 | |
2793 | mutex_lock(&sdev->state_mutex); |
2794 | err = __scsi_internal_device_block_nowait(sdev); |
2795 | state = sdev->sdev_state; |
2796 | if (err == 0) |
2797 | /* |
2798 | * scsi_stop_queue() must be called with the state_mutex |
2799 | * held. Otherwise a simultaneous scsi_start_queue() call |
2800 | * might unquiesce the queue before we quiesce it. |
2801 | */ |
2802 | scsi_stop_queue(sdev); |
2803 | |
2804 | mutex_unlock(lock: &sdev->state_mutex); |
2805 | |
2806 | WARN_ONCE(err, "%s: failed to block %s in state %d\n" , |
2807 | __func__, dev_name(&sdev->sdev_gendev), state); |
2808 | } |
2809 | |
2810 | /** |
2811 | * scsi_internal_device_unblock_nowait - resume a device after a block request |
2812 | * @sdev: device to resume |
2813 | * @new_state: state to set the device to after unblocking |
2814 | * |
2815 | * Restart the device queue for a previously suspended SCSI device. Does not |
2816 | * sleep. |
2817 | * |
2818 | * Returns zero if successful or a negative error code upon failure. |
2819 | * |
2820 | * Notes: |
2821 | * This routine transitions the device to the SDEV_RUNNING state or to one of |
2822 | * the offline states (which must be a legal transition) allowing the midlayer |
2823 | * to goose the queue for this device. |
2824 | */ |
2825 | int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, |
2826 | enum scsi_device_state new_state) |
2827 | { |
2828 | switch (new_state) { |
2829 | case SDEV_RUNNING: |
2830 | case SDEV_TRANSPORT_OFFLINE: |
2831 | break; |
2832 | default: |
2833 | return -EINVAL; |
2834 | } |
2835 | |
2836 | /* |
2837 | * Try to transition the scsi device to SDEV_RUNNING or one of the |
2838 | * offlined states and goose the device queue if successful. |
2839 | */ |
2840 | switch (sdev->sdev_state) { |
2841 | case SDEV_BLOCK: |
2842 | case SDEV_TRANSPORT_OFFLINE: |
2843 | sdev->sdev_state = new_state; |
2844 | break; |
2845 | case SDEV_CREATED_BLOCK: |
2846 | if (new_state == SDEV_TRANSPORT_OFFLINE || |
2847 | new_state == SDEV_OFFLINE) |
2848 | sdev->sdev_state = new_state; |
2849 | else |
2850 | sdev->sdev_state = SDEV_CREATED; |
2851 | break; |
2852 | case SDEV_CANCEL: |
2853 | case SDEV_OFFLINE: |
2854 | break; |
2855 | default: |
2856 | return -EINVAL; |
2857 | } |
2858 | scsi_start_queue(sdev); |
2859 | |
2860 | return 0; |
2861 | } |
2862 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); |
2863 | |
2864 | /** |
2865 | * scsi_internal_device_unblock - resume a device after a block request |
2866 | * @sdev: device to resume |
2867 | * @new_state: state to set the device to after unblocking |
2868 | * |
2869 | * Restart the device queue for a previously suspended SCSI device. May sleep. |
2870 | * |
2871 | * Returns zero if successful or a negative error code upon failure. |
2872 | * |
2873 | * Notes: |
2874 | * This routine transitions the device to the SDEV_RUNNING state or to one of |
2875 | * the offline states (which must be a legal transition) allowing the midlayer |
2876 | * to goose the queue for this device. |
2877 | */ |
2878 | static int scsi_internal_device_unblock(struct scsi_device *sdev, |
2879 | enum scsi_device_state new_state) |
2880 | { |
2881 | int ret; |
2882 | |
2883 | mutex_lock(&sdev->state_mutex); |
2884 | ret = scsi_internal_device_unblock_nowait(sdev, new_state); |
2885 | mutex_unlock(lock: &sdev->state_mutex); |
2886 | |
2887 | return ret; |
2888 | } |
2889 | |
2890 | static int |
2891 | target_block(struct device *dev, void *data) |
2892 | { |
2893 | if (scsi_is_target_device(dev)) |
2894 | starget_for_each_device(to_scsi_target(dev), NULL, |
2895 | fn: scsi_device_block); |
2896 | return 0; |
2897 | } |
2898 | |
2899 | /** |
2900 | * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state |
2901 | * @dev: a parent device of one or more scsi_target devices |
2902 | * @shost: the Scsi_Host to which this device belongs |
2903 | * |
2904 | * Iterate over all children of @dev, which should be scsi_target devices, |
2905 | * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for |
2906 | * ongoing scsi_queue_rq() calls to finish. May sleep. |
2907 | * |
2908 | * Note: |
2909 | * @dev must not itself be a scsi_target device. |
2910 | */ |
2911 | void |
2912 | scsi_block_targets(struct Scsi_Host *shost, struct device *dev) |
2913 | { |
2914 | WARN_ON_ONCE(scsi_is_target_device(dev)); |
2915 | device_for_each_child(dev, NULL, fn: target_block); |
2916 | blk_mq_wait_quiesce_done(set: &shost->tag_set); |
2917 | } |
2918 | EXPORT_SYMBOL_GPL(scsi_block_targets); |
2919 | |
2920 | static void |
2921 | device_unblock(struct scsi_device *sdev, void *data) |
2922 | { |
2923 | scsi_internal_device_unblock(sdev, new_state: *(enum scsi_device_state *)data); |
2924 | } |
2925 | |
2926 | static int |
2927 | target_unblock(struct device *dev, void *data) |
2928 | { |
2929 | if (scsi_is_target_device(dev)) |
2930 | starget_for_each_device(to_scsi_target(dev), data, |
2931 | fn: device_unblock); |
2932 | return 0; |
2933 | } |
2934 | |
2935 | void |
2936 | scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) |
2937 | { |
2938 | if (scsi_is_target_device(dev)) |
2939 | starget_for_each_device(to_scsi_target(dev), &new_state, |
2940 | fn: device_unblock); |
2941 | else |
2942 | device_for_each_child(dev, data: &new_state, fn: target_unblock); |
2943 | } |
2944 | EXPORT_SYMBOL_GPL(scsi_target_unblock); |
2945 | |
2946 | /** |
2947 | * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state |
2948 | * @shost: device to block |
2949 | * |
2950 | * Pause SCSI command processing for all logical units associated with the SCSI |
2951 | * host and wait until pending scsi_queue_rq() calls have finished. |
2952 | * |
2953 | * Returns zero if successful or a negative error code upon failure. |
2954 | */ |
2955 | int |
2956 | scsi_host_block(struct Scsi_Host *shost) |
2957 | { |
2958 | struct scsi_device *sdev; |
2959 | int ret; |
2960 | |
2961 | /* |
2962 | * Call scsi_internal_device_block_nowait so we can avoid |
2963 | * calling synchronize_rcu() for each LUN. |
2964 | */ |
2965 | shost_for_each_device(sdev, shost) { |
2966 | mutex_lock(&sdev->state_mutex); |
2967 | ret = scsi_internal_device_block_nowait(sdev); |
2968 | mutex_unlock(lock: &sdev->state_mutex); |
2969 | if (ret) { |
2970 | scsi_device_put(sdev); |
2971 | return ret; |
2972 | } |
2973 | } |
2974 | |
2975 | /* Wait for ongoing scsi_queue_rq() calls to finish. */ |
2976 | blk_mq_wait_quiesce_done(set: &shost->tag_set); |
2977 | |
2978 | return 0; |
2979 | } |
2980 | EXPORT_SYMBOL_GPL(scsi_host_block); |
2981 | |
2982 | int |
2983 | scsi_host_unblock(struct Scsi_Host *shost, int new_state) |
2984 | { |
2985 | struct scsi_device *sdev; |
2986 | int ret = 0; |
2987 | |
2988 | shost_for_each_device(sdev, shost) { |
2989 | ret = scsi_internal_device_unblock(sdev, new_state); |
2990 | if (ret) { |
2991 | scsi_device_put(sdev); |
2992 | break; |
2993 | } |
2994 | } |
2995 | return ret; |
2996 | } |
2997 | EXPORT_SYMBOL_GPL(scsi_host_unblock); |
2998 | |
2999 | /** |
3000 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt |
3001 | * @sgl: scatter-gather list |
3002 | * @sg_count: number of segments in sg |
3003 | * @offset: offset in bytes into sg, on return offset into the mapped area |
3004 | * @len: bytes to map, on return number of bytes mapped |
3005 | * |
3006 | * Returns virtual address of the start of the mapped page |
3007 | */ |
3008 | void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, |
3009 | size_t *offset, size_t *len) |
3010 | { |
3011 | int i; |
3012 | size_t sg_len = 0, len_complete = 0; |
3013 | struct scatterlist *sg; |
3014 | struct page *page; |
3015 | |
3016 | WARN_ON(!irqs_disabled()); |
3017 | |
3018 | for_each_sg(sgl, sg, sg_count, i) { |
3019 | len_complete = sg_len; /* Complete sg-entries */ |
3020 | sg_len += sg->length; |
3021 | if (sg_len > *offset) |
3022 | break; |
3023 | } |
3024 | |
3025 | if (unlikely(i == sg_count)) { |
3026 | printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " |
3027 | "elements %d\n" , |
3028 | __func__, sg_len, *offset, sg_count); |
3029 | WARN_ON(1); |
3030 | return NULL; |
3031 | } |
3032 | |
3033 | /* Offset starting from the beginning of first page in this sg-entry */ |
3034 | *offset = *offset - len_complete + sg->offset; |
3035 | |
3036 | /* Assumption: contiguous pages can be accessed as "page + i" */ |
3037 | page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); |
3038 | *offset &= ~PAGE_MASK; |
3039 | |
3040 | /* Bytes in this sg-entry from *offset to the end of the page */ |
3041 | sg_len = PAGE_SIZE - *offset; |
3042 | if (*len > sg_len) |
3043 | *len = sg_len; |
3044 | |
3045 | return kmap_atomic(page); |
3046 | } |
3047 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); |
3048 | |
3049 | /** |
3050 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg |
3051 | * @virt: virtual address to be unmapped |
3052 | */ |
3053 | void scsi_kunmap_atomic_sg(void *virt) |
3054 | { |
3055 | kunmap_atomic(virt); |
3056 | } |
3057 | EXPORT_SYMBOL(scsi_kunmap_atomic_sg); |
3058 | |
3059 | void sdev_disable_disk_events(struct scsi_device *sdev) |
3060 | { |
3061 | atomic_inc(v: &sdev->disk_events_disable_depth); |
3062 | } |
3063 | EXPORT_SYMBOL(sdev_disable_disk_events); |
3064 | |
3065 | void sdev_enable_disk_events(struct scsi_device *sdev) |
3066 | { |
3067 | if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) |
3068 | return; |
3069 | atomic_dec(v: &sdev->disk_events_disable_depth); |
3070 | } |
3071 | EXPORT_SYMBOL(sdev_enable_disk_events); |
3072 | |
3073 | static unsigned char designator_prio(const unsigned char *d) |
3074 | { |
3075 | if (d[1] & 0x30) |
3076 | /* not associated with LUN */ |
3077 | return 0; |
3078 | |
3079 | if (d[3] == 0) |
3080 | /* invalid length */ |
3081 | return 0; |
3082 | |
3083 | /* |
3084 | * Order of preference for lun descriptor: |
3085 | * - SCSI name string |
3086 | * - NAA IEEE Registered Extended |
3087 | * - EUI-64 based 16-byte |
3088 | * - EUI-64 based 12-byte |
3089 | * - NAA IEEE Registered |
3090 | * - NAA IEEE Extended |
3091 | * - EUI-64 based 8-byte |
3092 | * - SCSI name string (truncated) |
3093 | * - T10 Vendor ID |
3094 | * as longer descriptors reduce the likelyhood |
3095 | * of identification clashes. |
3096 | */ |
3097 | |
3098 | switch (d[1] & 0xf) { |
3099 | case 8: |
3100 | /* SCSI name string, variable-length UTF-8 */ |
3101 | return 9; |
3102 | case 3: |
3103 | switch (d[4] >> 4) { |
3104 | case 6: |
3105 | /* NAA registered extended */ |
3106 | return 8; |
3107 | case 5: |
3108 | /* NAA registered */ |
3109 | return 5; |
3110 | case 4: |
3111 | /* NAA extended */ |
3112 | return 4; |
3113 | case 3: |
3114 | /* NAA locally assigned */ |
3115 | return 1; |
3116 | default: |
3117 | break; |
3118 | } |
3119 | break; |
3120 | case 2: |
3121 | switch (d[3]) { |
3122 | case 16: |
3123 | /* EUI64-based, 16 byte */ |
3124 | return 7; |
3125 | case 12: |
3126 | /* EUI64-based, 12 byte */ |
3127 | return 6; |
3128 | case 8: |
3129 | /* EUI64-based, 8 byte */ |
3130 | return 3; |
3131 | default: |
3132 | break; |
3133 | } |
3134 | break; |
3135 | case 1: |
3136 | /* T10 vendor ID */ |
3137 | return 1; |
3138 | default: |
3139 | break; |
3140 | } |
3141 | |
3142 | return 0; |
3143 | } |
3144 | |
3145 | /** |
3146 | * scsi_vpd_lun_id - return a unique device identification |
3147 | * @sdev: SCSI device |
3148 | * @id: buffer for the identification |
3149 | * @id_len: length of the buffer |
3150 | * |
3151 | * Copies a unique device identification into @id based |
3152 | * on the information in the VPD page 0x83 of the device. |
3153 | * The string will be formatted as a SCSI name string. |
3154 | * |
3155 | * Returns the length of the identification or error on failure. |
3156 | * If the identifier is longer than the supplied buffer the actual |
3157 | * identifier length is returned and the buffer is not zero-padded. |
3158 | */ |
3159 | int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) |
3160 | { |
3161 | u8 cur_id_prio = 0; |
3162 | u8 cur_id_size = 0; |
3163 | const unsigned char *d, *cur_id_str; |
3164 | const struct scsi_vpd *vpd_pg83; |
3165 | int id_size = -EINVAL; |
3166 | |
3167 | rcu_read_lock(); |
3168 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); |
3169 | if (!vpd_pg83) { |
3170 | rcu_read_unlock(); |
3171 | return -ENXIO; |
3172 | } |
3173 | |
3174 | /* The id string must be at least 20 bytes + terminating NULL byte */ |
3175 | if (id_len < 21) { |
3176 | rcu_read_unlock(); |
3177 | return -EINVAL; |
3178 | } |
3179 | |
3180 | memset(id, 0, id_len); |
3181 | for (d = vpd_pg83->data + 4; |
3182 | d < vpd_pg83->data + vpd_pg83->len; |
3183 | d += d[3] + 4) { |
3184 | u8 prio = designator_prio(d); |
3185 | |
3186 | if (prio == 0 || cur_id_prio > prio) |
3187 | continue; |
3188 | |
3189 | switch (d[1] & 0xf) { |
3190 | case 0x1: |
3191 | /* T10 Vendor ID */ |
3192 | if (cur_id_size > d[3]) |
3193 | break; |
3194 | cur_id_prio = prio; |
3195 | cur_id_size = d[3]; |
3196 | if (cur_id_size + 4 > id_len) |
3197 | cur_id_size = id_len - 4; |
3198 | cur_id_str = d + 4; |
3199 | id_size = snprintf(buf: id, size: id_len, fmt: "t10.%*pE" , |
3200 | cur_id_size, cur_id_str); |
3201 | break; |
3202 | case 0x2: |
3203 | /* EUI-64 */ |
3204 | cur_id_prio = prio; |
3205 | cur_id_size = d[3]; |
3206 | cur_id_str = d + 4; |
3207 | switch (cur_id_size) { |
3208 | case 8: |
3209 | id_size = snprintf(buf: id, size: id_len, |
3210 | fmt: "eui.%8phN" , |
3211 | cur_id_str); |
3212 | break; |
3213 | case 12: |
3214 | id_size = snprintf(buf: id, size: id_len, |
3215 | fmt: "eui.%12phN" , |
3216 | cur_id_str); |
3217 | break; |
3218 | case 16: |
3219 | id_size = snprintf(buf: id, size: id_len, |
3220 | fmt: "eui.%16phN" , |
3221 | cur_id_str); |
3222 | break; |
3223 | default: |
3224 | break; |
3225 | } |
3226 | break; |
3227 | case 0x3: |
3228 | /* NAA */ |
3229 | cur_id_prio = prio; |
3230 | cur_id_size = d[3]; |
3231 | cur_id_str = d + 4; |
3232 | switch (cur_id_size) { |
3233 | case 8: |
3234 | id_size = snprintf(buf: id, size: id_len, |
3235 | fmt: "naa.%8phN" , |
3236 | cur_id_str); |
3237 | break; |
3238 | case 16: |
3239 | id_size = snprintf(buf: id, size: id_len, |
3240 | fmt: "naa.%16phN" , |
3241 | cur_id_str); |
3242 | break; |
3243 | default: |
3244 | break; |
3245 | } |
3246 | break; |
3247 | case 0x8: |
3248 | /* SCSI name string */ |
3249 | if (cur_id_size > d[3]) |
3250 | break; |
3251 | /* Prefer others for truncated descriptor */ |
3252 | if (d[3] > id_len) { |
3253 | prio = 2; |
3254 | if (cur_id_prio > prio) |
3255 | break; |
3256 | } |
3257 | cur_id_prio = prio; |
3258 | cur_id_size = id_size = d[3]; |
3259 | cur_id_str = d + 4; |
3260 | if (cur_id_size >= id_len) |
3261 | cur_id_size = id_len - 1; |
3262 | memcpy(id, cur_id_str, cur_id_size); |
3263 | break; |
3264 | default: |
3265 | break; |
3266 | } |
3267 | } |
3268 | rcu_read_unlock(); |
3269 | |
3270 | return id_size; |
3271 | } |
3272 | EXPORT_SYMBOL(scsi_vpd_lun_id); |
3273 | |
3274 | /* |
3275 | * scsi_vpd_tpg_id - return a target port group identifier |
3276 | * @sdev: SCSI device |
3277 | * |
3278 | * Returns the Target Port Group identifier from the information |
3279 | * froom VPD page 0x83 of the device. |
3280 | * |
3281 | * Returns the identifier or error on failure. |
3282 | */ |
3283 | int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) |
3284 | { |
3285 | const unsigned char *d; |
3286 | const struct scsi_vpd *vpd_pg83; |
3287 | int group_id = -EAGAIN, rel_port = -1; |
3288 | |
3289 | rcu_read_lock(); |
3290 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); |
3291 | if (!vpd_pg83) { |
3292 | rcu_read_unlock(); |
3293 | return -ENXIO; |
3294 | } |
3295 | |
3296 | d = vpd_pg83->data + 4; |
3297 | while (d < vpd_pg83->data + vpd_pg83->len) { |
3298 | switch (d[1] & 0xf) { |
3299 | case 0x4: |
3300 | /* Relative target port */ |
3301 | rel_port = get_unaligned_be16(p: &d[6]); |
3302 | break; |
3303 | case 0x5: |
3304 | /* Target port group */ |
3305 | group_id = get_unaligned_be16(p: &d[6]); |
3306 | break; |
3307 | default: |
3308 | break; |
3309 | } |
3310 | d += d[3] + 4; |
3311 | } |
3312 | rcu_read_unlock(); |
3313 | |
3314 | if (group_id >= 0 && rel_id && rel_port != -1) |
3315 | *rel_id = rel_port; |
3316 | |
3317 | return group_id; |
3318 | } |
3319 | EXPORT_SYMBOL(scsi_vpd_tpg_id); |
3320 | |
3321 | /** |
3322 | * scsi_build_sense - build sense data for a command |
3323 | * @scmd: scsi command for which the sense should be formatted |
3324 | * @desc: Sense format (non-zero == descriptor format, |
3325 | * 0 == fixed format) |
3326 | * @key: Sense key |
3327 | * @asc: Additional sense code |
3328 | * @ascq: Additional sense code qualifier |
3329 | * |
3330 | **/ |
3331 | void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq) |
3332 | { |
3333 | scsi_build_sense_buffer(desc, buf: scmd->sense_buffer, key, asc, ascq); |
3334 | scmd->result = SAM_STAT_CHECK_CONDITION; |
3335 | } |
3336 | EXPORT_SYMBOL_GPL(scsi_build_sense); |
3337 | |