1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _SCSI_SCSI_HOST_H |
3 | #define _SCSI_SCSI_HOST_H |
4 | |
5 | #include <linux/device.h> |
6 | #include <linux/list.h> |
7 | #include <linux/types.h> |
8 | #include <linux/workqueue.h> |
9 | #include <linux/mutex.h> |
10 | #include <linux/seq_file.h> |
11 | #include <linux/blk-mq.h> |
12 | #include <scsi/scsi.h> |
13 | |
14 | struct block_device; |
15 | struct completion; |
16 | struct module; |
17 | struct scsi_cmnd; |
18 | struct scsi_device; |
19 | struct scsi_host_cmd_pool; |
20 | struct scsi_target; |
21 | struct Scsi_Host; |
22 | struct scsi_host_cmd_pool; |
23 | struct scsi_transport_template; |
24 | |
25 | |
26 | /* |
27 | * The various choices mean: |
28 | * NONE: Self evident. Host adapter is not capable of scatter-gather. |
29 | * ALL: Means that the host adapter module can do scatter-gather, |
30 | * and that there is no limit to the size of the table to which |
31 | * we scatter/gather data. The value we set here is the maximum |
32 | * single element sglist. To use chained sglists, the adapter |
33 | * has to set a value beyond ALL (and correctly use the chain |
34 | * handling API. |
35 | * Anything else: Indicates the maximum number of chains that can be |
36 | * used in one scatter-gather request. |
37 | */ |
38 | #define SG_NONE 0 |
39 | #define SG_ALL SG_CHUNK_SIZE |
40 | |
41 | #define MODE_UNKNOWN 0x00 |
42 | #define MODE_INITIATOR 0x01 |
43 | #define MODE_TARGET 0x02 |
44 | |
45 | struct scsi_host_template { |
46 | struct module *module; |
47 | const char *name; |
48 | |
49 | /* |
50 | * The info function will return whatever useful information the |
51 | * developer sees fit. If not provided, then the name field will |
52 | * be used instead. |
53 | * |
54 | * Status: OPTIONAL |
55 | */ |
56 | const char *(* info)(struct Scsi_Host *); |
57 | |
58 | /* |
59 | * Ioctl interface |
60 | * |
61 | * Status: OPTIONAL |
62 | */ |
63 | int (*ioctl)(struct scsi_device *dev, unsigned int cmd, |
64 | void __user *arg); |
65 | |
66 | |
67 | #ifdef CONFIG_COMPAT |
68 | /* |
69 | * Compat handler. Handle 32bit ABI. |
70 | * When unknown ioctl is passed return -ENOIOCTLCMD. |
71 | * |
72 | * Status: OPTIONAL |
73 | */ |
74 | int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd, |
75 | void __user *arg); |
76 | #endif |
77 | |
78 | /* |
79 | * The queuecommand function is used to queue up a scsi |
80 | * command block to the LLDD. When the driver finished |
81 | * processing the command the done callback is invoked. |
82 | * |
83 | * If queuecommand returns 0, then the HBA has accepted the |
84 | * command. The done() function must be called on the command |
85 | * when the driver has finished with it. (you may call done on the |
86 | * command before queuecommand returns, but in this case you |
87 | * *must* return 0 from queuecommand). |
88 | * |
89 | * Queuecommand may also reject the command, in which case it may |
90 | * not touch the command and must not call done() for it. |
91 | * |
92 | * There are two possible rejection returns: |
93 | * |
94 | * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but |
95 | * allow commands to other devices serviced by this host. |
96 | * |
97 | * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this |
98 | * host temporarily. |
99 | * |
100 | * For compatibility, any other non-zero return is treated the |
101 | * same as SCSI_MLQUEUE_HOST_BUSY. |
102 | * |
103 | * NOTE: "temporarily" means either until the next command for# |
104 | * this device/host completes, or a period of time determined by |
105 | * I/O pressure in the system if there are no other outstanding |
106 | * commands. |
107 | * |
108 | * STATUS: REQUIRED |
109 | */ |
110 | int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); |
111 | |
112 | /* |
113 | * This is an error handling strategy routine. You don't need to |
114 | * define one of these if you don't want to - there is a default |
115 | * routine that is present that should work in most cases. For those |
116 | * driver authors that have the inclination and ability to write their |
117 | * own strategy routine, this is where it is specified. Note - the |
118 | * strategy routine is *ALWAYS* run in the context of the kernel eh |
119 | * thread. Thus you are guaranteed to *NOT* be in an interrupt |
120 | * handler when you execute this, and you are also guaranteed to |
121 | * *NOT* have any other commands being queued while you are in the |
122 | * strategy routine. When you return from this function, operations |
123 | * return to normal. |
124 | * |
125 | * See scsi_error.c scsi_unjam_host for additional comments about |
126 | * what this function should and should not be attempting to do. |
127 | * |
128 | * Status: REQUIRED (at least one of them) |
129 | */ |
130 | int (* eh_abort_handler)(struct scsi_cmnd *); |
131 | int (* eh_device_reset_handler)(struct scsi_cmnd *); |
132 | int (* eh_target_reset_handler)(struct scsi_cmnd *); |
133 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); |
134 | int (* eh_host_reset_handler)(struct scsi_cmnd *); |
135 | |
136 | /* |
137 | * Before the mid layer attempts to scan for a new device where none |
138 | * currently exists, it will call this entry in your driver. Should |
139 | * your driver need to allocate any structs or perform any other init |
140 | * items in order to send commands to a currently unused target/lun |
141 | * combo, then this is where you can perform those allocations. This |
142 | * is specifically so that drivers won't have to perform any kind of |
143 | * "is this a new device" checks in their queuecommand routine, |
144 | * thereby making the hot path a bit quicker. |
145 | * |
146 | * Return values: 0 on success, non-0 on failure |
147 | * |
148 | * Deallocation: If we didn't find any devices at this ID, you will |
149 | * get an immediate call to slave_destroy(). If we find something |
150 | * here then you will get a call to slave_configure(), then the |
151 | * device will be used for however long it is kept around, then when |
152 | * the device is removed from the system (or * possibly at reboot |
153 | * time), you will then get a call to slave_destroy(). This is |
154 | * assuming you implement slave_configure and slave_destroy. |
155 | * However, if you allocate memory and hang it off the device struct, |
156 | * then you must implement the slave_destroy() routine at a minimum |
157 | * in order to avoid leaking memory |
158 | * each time a device is tore down. |
159 | * |
160 | * Status: OPTIONAL |
161 | */ |
162 | int (* slave_alloc)(struct scsi_device *); |
163 | |
164 | /* |
165 | * Once the device has responded to an INQUIRY and we know the |
166 | * device is online, we call into the low level driver with the |
167 | * struct scsi_device *. If the low level device driver implements |
168 | * this function, it *must* perform the task of setting the queue |
169 | * depth on the device. All other tasks are optional and depend |
170 | * on what the driver supports and various implementation details. |
171 | * |
172 | * Things currently recommended to be handled at this time include: |
173 | * |
174 | * 1. Setting the device queue depth. Proper setting of this is |
175 | * described in the comments for scsi_change_queue_depth. |
176 | * 2. Determining if the device supports the various synchronous |
177 | * negotiation protocols. The device struct will already have |
178 | * responded to INQUIRY and the results of the standard items |
179 | * will have been shoved into the various device flag bits, eg. |
180 | * device->sdtr will be true if the device supports SDTR messages. |
181 | * 3. Allocating command structs that the device will need. |
182 | * 4. Setting the default timeout on this device (if needed). |
183 | * 5. Anything else the low level driver might want to do on a device |
184 | * specific setup basis... |
185 | * 6. Return 0 on success, non-0 on error. The device will be marked |
186 | * as offline on error so that no access will occur. If you return |
187 | * non-0, your slave_destroy routine will never get called for this |
188 | * device, so don't leave any loose memory hanging around, clean |
189 | * up after yourself before returning non-0 |
190 | * |
191 | * Status: OPTIONAL |
192 | */ |
193 | int (* slave_configure)(struct scsi_device *); |
194 | |
195 | /* |
196 | * Immediately prior to deallocating the device and after all activity |
197 | * has ceased the mid layer calls this point so that the low level |
198 | * driver may completely detach itself from the scsi device and vice |
199 | * versa. The low level driver is responsible for freeing any memory |
200 | * it allocated in the slave_alloc or slave_configure calls. |
201 | * |
202 | * Status: OPTIONAL |
203 | */ |
204 | void (* slave_destroy)(struct scsi_device *); |
205 | |
206 | /* |
207 | * Before the mid layer attempts to scan for a new device attached |
208 | * to a target where no target currently exists, it will call this |
209 | * entry in your driver. Should your driver need to allocate any |
210 | * structs or perform any other init items in order to send commands |
211 | * to a currently unused target, then this is where you can perform |
212 | * those allocations. |
213 | * |
214 | * Return values: 0 on success, non-0 on failure |
215 | * |
216 | * Status: OPTIONAL |
217 | */ |
218 | int (* target_alloc)(struct scsi_target *); |
219 | |
220 | /* |
221 | * Immediately prior to deallocating the target structure, and |
222 | * after all activity to attached scsi devices has ceased, the |
223 | * midlayer calls this point so that the driver may deallocate |
224 | * and terminate any references to the target. |
225 | * |
226 | * Status: OPTIONAL |
227 | */ |
228 | void (* target_destroy)(struct scsi_target *); |
229 | |
230 | /* |
231 | * If a host has the ability to discover targets on its own instead |
232 | * of scanning the entire bus, it can fill in this function and |
233 | * call scsi_scan_host(). This function will be called periodically |
234 | * until it returns 1 with the scsi_host and the elapsed time of |
235 | * the scan in jiffies. |
236 | * |
237 | * Status: OPTIONAL |
238 | */ |
239 | int (* scan_finished)(struct Scsi_Host *, unsigned long); |
240 | |
241 | /* |
242 | * If the host wants to be called before the scan starts, but |
243 | * after the midlayer has set up ready for the scan, it can fill |
244 | * in this function. |
245 | * |
246 | * Status: OPTIONAL |
247 | */ |
248 | void (* scan_start)(struct Scsi_Host *); |
249 | |
250 | /* |
251 | * Fill in this function to allow the queue depth of this host |
252 | * to be changeable (on a per device basis). Returns either |
253 | * the current queue depth setting (may be different from what |
254 | * was passed in) or an error. An error should only be |
255 | * returned if the requested depth is legal but the driver was |
256 | * unable to set it. If the requested depth is illegal, the |
257 | * driver should set and return the closest legal queue depth. |
258 | * |
259 | * Status: OPTIONAL |
260 | */ |
261 | int (* change_queue_depth)(struct scsi_device *, int); |
262 | |
263 | /* |
264 | * This functions lets the driver expose the queue mapping |
265 | * to the block layer. |
266 | * |
267 | * Status: OPTIONAL |
268 | */ |
269 | int (* map_queues)(struct Scsi_Host *shost); |
270 | |
271 | /* |
272 | * This function determines the BIOS parameters for a given |
273 | * harddisk. These tend to be numbers that are made up by |
274 | * the host adapter. Parameters: |
275 | * size, device, list (heads, sectors, cylinders) |
276 | * |
277 | * Status: OPTIONAL |
278 | */ |
279 | int (* bios_param)(struct scsi_device *, struct block_device *, |
280 | sector_t, int []); |
281 | |
282 | /* |
283 | * This function is called when one or more partitions on the |
284 | * device reach beyond the end of the device. |
285 | * |
286 | * Status: OPTIONAL |
287 | */ |
288 | void (*unlock_native_capacity)(struct scsi_device *); |
289 | |
290 | /* |
291 | * Can be used to export driver statistics and other infos to the |
292 | * world outside the kernel ie. userspace and it also provides an |
293 | * interface to feed the driver with information. |
294 | * |
295 | * Status: OBSOLETE |
296 | */ |
297 | int (*show_info)(struct seq_file *, struct Scsi_Host *); |
298 | int (*write_info)(struct Scsi_Host *, char *, int); |
299 | |
300 | /* |
301 | * This is an optional routine that allows the transport to become |
302 | * involved when a scsi io timer fires. The return value tells the |
303 | * timer routine how to finish the io timeout handling. |
304 | * |
305 | * Status: OPTIONAL |
306 | */ |
307 | enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); |
308 | |
309 | /* This is an optional routine that allows transport to initiate |
310 | * LLD adapter or firmware reset using sysfs attribute. |
311 | * |
312 | * Return values: 0 on success, -ve value on failure. |
313 | * |
314 | * Status: OPTIONAL |
315 | */ |
316 | |
317 | int (*host_reset)(struct Scsi_Host *shost, int reset_type); |
318 | #define SCSI_ADAPTER_RESET 1 |
319 | #define SCSI_FIRMWARE_RESET 2 |
320 | |
321 | |
322 | /* |
323 | * Name of proc directory |
324 | */ |
325 | const char *proc_name; |
326 | |
327 | /* |
328 | * Used to store the procfs directory if a driver implements the |
329 | * show_info method. |
330 | */ |
331 | struct proc_dir_entry *proc_dir; |
332 | |
333 | /* |
334 | * This determines if we will use a non-interrupt driven |
335 | * or an interrupt driven scheme. It is set to the maximum number |
336 | * of simultaneous commands a given host adapter will accept. |
337 | */ |
338 | int can_queue; |
339 | |
340 | /* |
341 | * In many instances, especially where disconnect / reconnect are |
342 | * supported, our host also has an ID on the SCSI bus. If this is |
343 | * the case, then it must be reserved. Please set this_id to -1 if |
344 | * your setup is in single initiator mode, and the host lacks an |
345 | * ID. |
346 | */ |
347 | int this_id; |
348 | |
349 | /* |
350 | * This determines the degree to which the host adapter is capable |
351 | * of scatter-gather. |
352 | */ |
353 | unsigned short sg_tablesize; |
354 | unsigned short sg_prot_tablesize; |
355 | |
356 | /* |
357 | * Set this if the host adapter has limitations beside segment count. |
358 | */ |
359 | unsigned int max_sectors; |
360 | |
361 | /* |
362 | * Maximum size in bytes of a single segment. |
363 | */ |
364 | unsigned int max_segment_size; |
365 | |
366 | /* |
367 | * DMA scatter gather segment boundary limit. A segment crossing this |
368 | * boundary will be split in two. |
369 | */ |
370 | unsigned long dma_boundary; |
371 | |
372 | /* |
373 | * This specifies "machine infinity" for host templates which don't |
374 | * limit the transfer size. Note this limit represents an absolute |
375 | * maximum, and may be over the transfer limits allowed for |
376 | * individual devices (e.g. 256 for SCSI-1). |
377 | */ |
378 | #define SCSI_DEFAULT_MAX_SECTORS 1024 |
379 | |
380 | /* |
381 | * True if this host adapter can make good use of linked commands. |
382 | * This will allow more than one command to be queued to a given |
383 | * unit on a given host. Set this to the maximum number of command |
384 | * blocks to be provided for each device. Set this to 1 for one |
385 | * command block per lun, 2 for two, etc. Do not set this to 0. |
386 | * You should make sure that the host adapter will do the right thing |
387 | * before you try setting this above 1. |
388 | */ |
389 | short cmd_per_lun; |
390 | |
391 | /* |
392 | * present contains counter indicating how many boards of this |
393 | * type were found when we did the scan. |
394 | */ |
395 | unsigned char present; |
396 | |
397 | /* If use block layer to manage tags, this is tag allocation policy */ |
398 | int tag_alloc_policy; |
399 | |
400 | /* |
401 | * Track QUEUE_FULL events and reduce queue depth on demand. |
402 | */ |
403 | unsigned track_queue_depth:1; |
404 | |
405 | /* |
406 | * This specifies the mode that a LLD supports. |
407 | */ |
408 | unsigned supported_mode:2; |
409 | |
410 | /* |
411 | * True if this host adapter uses unchecked DMA onto an ISA bus. |
412 | */ |
413 | unsigned unchecked_isa_dma:1; |
414 | |
415 | /* |
416 | * True for emulated SCSI host adapters (e.g. ATAPI). |
417 | */ |
418 | unsigned emulated:1; |
419 | |
420 | /* |
421 | * True if the low-level driver performs its own reset-settle delays. |
422 | */ |
423 | unsigned skip_settle_delay:1; |
424 | |
425 | /* True if the controller does not support WRITE SAME */ |
426 | unsigned no_write_same:1; |
427 | |
428 | /* True if the low-level driver supports blk-mq only */ |
429 | unsigned force_blk_mq:1; |
430 | |
431 | /* |
432 | * Countdown for host blocking with no commands outstanding. |
433 | */ |
434 | unsigned int max_host_blocked; |
435 | |
436 | /* |
437 | * Default value for the blocking. If the queue is empty, |
438 | * host_blocked counts down in the request_fn until it restarts |
439 | * host operations as zero is reached. |
440 | * |
441 | * FIXME: This should probably be a value in the template |
442 | */ |
443 | #define SCSI_DEFAULT_HOST_BLOCKED 7 |
444 | |
445 | /* |
446 | * Pointer to the sysfs class properties for this host, NULL terminated. |
447 | */ |
448 | struct device_attribute **shost_attrs; |
449 | |
450 | /* |
451 | * Pointer to the SCSI device properties for this host, NULL terminated. |
452 | */ |
453 | struct device_attribute **sdev_attrs; |
454 | |
455 | /* |
456 | * Pointer to the SCSI device attribute groups for this host, |
457 | * NULL terminated. |
458 | */ |
459 | const struct attribute_group **sdev_groups; |
460 | |
461 | /* |
462 | * Vendor Identifier associated with the host |
463 | * |
464 | * Note: When specifying vendor_id, be sure to read the |
465 | * Vendor Type and ID formatting requirements specified in |
466 | * scsi_netlink.h |
467 | */ |
468 | u64 vendor_id; |
469 | |
470 | /* |
471 | * Additional per-command data allocated for the driver. |
472 | */ |
473 | unsigned int cmd_size; |
474 | struct scsi_host_cmd_pool *cmd_pool; |
475 | }; |
476 | |
477 | /* |
478 | * Temporary #define for host lock push down. Can be removed when all |
479 | * drivers have been updated to take advantage of unlocked |
480 | * queuecommand. |
481 | * |
482 | */ |
483 | #define DEF_SCSI_QCMD(func_name) \ |
484 | int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ |
485 | { \ |
486 | unsigned long irq_flags; \ |
487 | int rc; \ |
488 | spin_lock_irqsave(shost->host_lock, irq_flags); \ |
489 | rc = func_name##_lck (cmd, cmd->scsi_done); \ |
490 | spin_unlock_irqrestore(shost->host_lock, irq_flags); \ |
491 | return rc; \ |
492 | } |
493 | |
494 | |
495 | /* |
496 | * shost state: If you alter this, you also need to alter scsi_sysfs.c |
497 | * (for the ascii descriptions) and the state model enforcer: |
498 | * scsi_host_set_state() |
499 | */ |
500 | enum scsi_host_state { |
501 | SHOST_CREATED = 1, |
502 | SHOST_RUNNING, |
503 | SHOST_CANCEL, |
504 | SHOST_DEL, |
505 | SHOST_RECOVERY, |
506 | SHOST_CANCEL_RECOVERY, |
507 | SHOST_DEL_RECOVERY, |
508 | }; |
509 | |
510 | struct Scsi_Host { |
511 | /* |
512 | * __devices is protected by the host_lock, but you should |
513 | * usually use scsi_device_lookup / shost_for_each_device |
514 | * to access it and don't care about locking yourself. |
515 | * In the rare case of being in irq context you can use |
516 | * their __ prefixed variants with the lock held. NEVER |
517 | * access this list directly from a driver. |
518 | */ |
519 | struct list_head __devices; |
520 | struct list_head __targets; |
521 | |
522 | struct list_head starved_list; |
523 | |
524 | spinlock_t default_lock; |
525 | spinlock_t *host_lock; |
526 | |
527 | struct mutex scan_mutex;/* serialize scanning activity */ |
528 | |
529 | struct list_head eh_cmd_q; |
530 | struct task_struct * ehandler; /* Error recovery thread. */ |
531 | struct completion * eh_action; /* Wait for specific actions on the |
532 | host. */ |
533 | wait_queue_head_t host_wait; |
534 | struct scsi_host_template *hostt; |
535 | struct scsi_transport_template *transportt; |
536 | |
537 | /* Area to keep a shared tag map */ |
538 | struct blk_mq_tag_set tag_set; |
539 | |
540 | atomic_t host_busy; /* commands actually active on low-level */ |
541 | atomic_t host_blocked; |
542 | |
543 | unsigned int host_failed; /* commands that failed. |
544 | protected by host_lock */ |
545 | unsigned int host_eh_scheduled; /* EH scheduled without command */ |
546 | |
547 | unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ |
548 | |
549 | /* next two fields are used to bound the time spent in error handling */ |
550 | int eh_deadline; |
551 | unsigned long last_reset; |
552 | |
553 | |
554 | /* |
555 | * These three parameters can be used to allow for wide scsi, |
556 | * and for host adapters that support multiple busses |
557 | * The last two should be set to 1 more than the actual max id |
558 | * or lun (e.g. 8 for SCSI parallel systems). |
559 | */ |
560 | unsigned int max_channel; |
561 | unsigned int max_id; |
562 | u64 max_lun; |
563 | |
564 | /* |
565 | * This is a unique identifier that must be assigned so that we |
566 | * have some way of identifying each detected host adapter properly |
567 | * and uniquely. For hosts that do not support more than one card |
568 | * in the system at one time, this does not need to be set. It is |
569 | * initialized to 0 in scsi_register. |
570 | */ |
571 | unsigned int unique_id; |
572 | |
573 | /* |
574 | * The maximum length of SCSI commands that this host can accept. |
575 | * Probably 12 for most host adapters, but could be 16 for others. |
576 | * or 260 if the driver supports variable length cdbs. |
577 | * For drivers that don't set this field, a value of 12 is |
578 | * assumed. |
579 | */ |
580 | unsigned short max_cmd_len; |
581 | |
582 | int this_id; |
583 | int can_queue; |
584 | short cmd_per_lun; |
585 | short unsigned int sg_tablesize; |
586 | short unsigned int sg_prot_tablesize; |
587 | unsigned int max_sectors; |
588 | unsigned int max_segment_size; |
589 | unsigned long dma_boundary; |
590 | /* |
591 | * In scsi-mq mode, the number of hardware queues supported by the LLD. |
592 | * |
593 | * Note: it is assumed that each hardware queue has a queue depth of |
594 | * can_queue. In other words, the total queue depth per host |
595 | * is nr_hw_queues * can_queue. |
596 | */ |
597 | unsigned nr_hw_queues; |
598 | unsigned active_mode:2; |
599 | unsigned unchecked_isa_dma:1; |
600 | |
601 | /* |
602 | * Host has requested that no further requests come through for the |
603 | * time being. |
604 | */ |
605 | unsigned host_self_blocked:1; |
606 | |
607 | /* |
608 | * Host uses correct SCSI ordering not PC ordering. The bit is |
609 | * set for the minority of drivers whose authors actually read |
610 | * the spec ;). |
611 | */ |
612 | unsigned reverse_ordering:1; |
613 | |
614 | /* Task mgmt function in progress */ |
615 | unsigned tmf_in_progress:1; |
616 | |
617 | /* Asynchronous scan in progress */ |
618 | unsigned async_scan:1; |
619 | |
620 | /* Don't resume host in EH */ |
621 | unsigned eh_noresume:1; |
622 | |
623 | /* The controller does not support WRITE SAME */ |
624 | unsigned no_write_same:1; |
625 | |
626 | unsigned use_cmd_list:1; |
627 | |
628 | /* Host responded with short (<36 bytes) INQUIRY result */ |
629 | unsigned short_inquiry:1; |
630 | |
631 | /* |
632 | * Optional work queue to be utilized by the transport |
633 | */ |
634 | char work_q_name[20]; |
635 | struct workqueue_struct *work_q; |
636 | |
637 | /* |
638 | * Task management function work queue |
639 | */ |
640 | struct workqueue_struct *tmf_work_q; |
641 | |
642 | /* The transport requires the LUN bits NOT to be stored in CDB[1] */ |
643 | unsigned no_scsi2_lun_in_cdb:1; |
644 | |
645 | /* |
646 | * Value host_blocked counts down from |
647 | */ |
648 | unsigned int max_host_blocked; |
649 | |
650 | /* Protection Information */ |
651 | unsigned int prot_capabilities; |
652 | unsigned char prot_guard_type; |
653 | |
654 | /* legacy crap */ |
655 | unsigned long base; |
656 | unsigned long io_port; |
657 | unsigned char n_io_port; |
658 | unsigned char dma_channel; |
659 | unsigned int irq; |
660 | |
661 | |
662 | enum scsi_host_state shost_state; |
663 | |
664 | /* ldm bits */ |
665 | struct device shost_gendev, shost_dev; |
666 | |
667 | /* |
668 | * Points to the transport data (if any) which is allocated |
669 | * separately |
670 | */ |
671 | void *shost_data; |
672 | |
673 | /* |
674 | * Points to the physical bus device we'd use to do DMA |
675 | * Needed just in case we have virtual hosts. |
676 | */ |
677 | struct device *dma_dev; |
678 | |
679 | /* |
680 | * We should ensure that this is aligned, both for better performance |
681 | * and also because some compilers (m68k) don't automatically force |
682 | * alignment to a long boundary. |
683 | */ |
684 | unsigned long hostdata[0] /* Used for storage of host specific stuff */ |
685 | __attribute__ ((aligned (sizeof(unsigned long)))); |
686 | }; |
687 | |
688 | #define class_to_shost(d) \ |
689 | container_of(d, struct Scsi_Host, shost_dev) |
690 | |
691 | #define shost_printk(prefix, shost, fmt, a...) \ |
692 | dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) |
693 | |
694 | static inline void *shost_priv(struct Scsi_Host *shost) |
695 | { |
696 | return (void *)shost->hostdata; |
697 | } |
698 | |
699 | int scsi_is_host_device(const struct device *); |
700 | |
701 | static inline struct Scsi_Host *dev_to_shost(struct device *dev) |
702 | { |
703 | while (!scsi_is_host_device(dev)) { |
704 | if (!dev->parent) |
705 | return NULL; |
706 | dev = dev->parent; |
707 | } |
708 | return container_of(dev, struct Scsi_Host, shost_gendev); |
709 | } |
710 | |
711 | static inline int scsi_host_in_recovery(struct Scsi_Host *shost) |
712 | { |
713 | return shost->shost_state == SHOST_RECOVERY || |
714 | shost->shost_state == SHOST_CANCEL_RECOVERY || |
715 | shost->shost_state == SHOST_DEL_RECOVERY || |
716 | shost->tmf_in_progress; |
717 | } |
718 | |
719 | extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); |
720 | extern void scsi_flush_work(struct Scsi_Host *); |
721 | |
722 | extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); |
723 | extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, |
724 | struct device *, |
725 | struct device *); |
726 | extern void scsi_scan_host(struct Scsi_Host *); |
727 | extern void scsi_rescan_device(struct device *); |
728 | extern void scsi_remove_host(struct Scsi_Host *); |
729 | extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); |
730 | extern int scsi_host_busy(struct Scsi_Host *shost); |
731 | extern void scsi_host_put(struct Scsi_Host *t); |
732 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); |
733 | extern const char *scsi_host_state_name(enum scsi_host_state); |
734 | |
735 | static inline int __must_check scsi_add_host(struct Scsi_Host *host, |
736 | struct device *dev) |
737 | { |
738 | return scsi_add_host_with_dma(host, dev, dev); |
739 | } |
740 | |
741 | static inline struct device *scsi_get_device(struct Scsi_Host *shost) |
742 | { |
743 | return shost->shost_gendev.parent; |
744 | } |
745 | |
746 | /** |
747 | * scsi_host_scan_allowed - Is scanning of this host allowed |
748 | * @shost: Pointer to Scsi_Host. |
749 | **/ |
750 | static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) |
751 | { |
752 | return shost->shost_state == SHOST_RUNNING || |
753 | shost->shost_state == SHOST_RECOVERY; |
754 | } |
755 | |
756 | extern void scsi_unblock_requests(struct Scsi_Host *); |
757 | extern void scsi_block_requests(struct Scsi_Host *); |
758 | |
759 | struct class_container; |
760 | |
761 | /* |
762 | * These two functions are used to allocate and free a pseudo device |
763 | * which will connect to the host adapter itself rather than any |
764 | * physical device. You must deallocate when you are done with the |
765 | * thing. This physical pseudo-device isn't real and won't be available |
766 | * from any high-level drivers. |
767 | */ |
768 | extern void scsi_free_host_dev(struct scsi_device *); |
769 | extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); |
770 | |
771 | /* |
772 | * DIF defines the exchange of protection information between |
773 | * initiator and SBC block device. |
774 | * |
775 | * DIX defines the exchange of protection information between OS and |
776 | * initiator. |
777 | */ |
778 | enum scsi_host_prot_capabilities { |
779 | SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */ |
780 | SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */ |
781 | SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */ |
782 | |
783 | SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */ |
784 | SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */ |
785 | SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */ |
786 | SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */ |
787 | }; |
788 | |
789 | /* |
790 | * SCSI hosts which support the Data Integrity Extensions must |
791 | * indicate their capabilities by setting the prot_capabilities using |
792 | * this call. |
793 | */ |
794 | static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask) |
795 | { |
796 | shost->prot_capabilities = mask; |
797 | } |
798 | |
799 | static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) |
800 | { |
801 | return shost->prot_capabilities; |
802 | } |
803 | |
804 | static inline int scsi_host_prot_dma(struct Scsi_Host *shost) |
805 | { |
806 | return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION; |
807 | } |
808 | |
809 | static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) |
810 | { |
811 | static unsigned char cap[] = { 0, |
812 | SHOST_DIF_TYPE1_PROTECTION, |
813 | SHOST_DIF_TYPE2_PROTECTION, |
814 | SHOST_DIF_TYPE3_PROTECTION }; |
815 | |
816 | if (target_type >= ARRAY_SIZE(cap)) |
817 | return 0; |
818 | |
819 | return shost->prot_capabilities & cap[target_type] ? target_type : 0; |
820 | } |
821 | |
822 | static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) |
823 | { |
824 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
825 | static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, |
826 | SHOST_DIX_TYPE1_PROTECTION, |
827 | SHOST_DIX_TYPE2_PROTECTION, |
828 | SHOST_DIX_TYPE3_PROTECTION }; |
829 | |
830 | if (target_type >= ARRAY_SIZE(cap)) |
831 | return 0; |
832 | |
833 | return shost->prot_capabilities & cap[target_type]; |
834 | #endif |
835 | return 0; |
836 | } |
837 | |
838 | /* |
839 | * All DIX-capable initiators must support the T10-mandated CRC |
840 | * checksum. Controllers can optionally implement the IP checksum |
841 | * scheme which has much lower impact on system performance. Note |
842 | * that the main rationale for the checksum is to match integrity |
843 | * metadata with data. Detecting bit errors are a job for ECC memory |
844 | * and buses. |
845 | */ |
846 | |
847 | enum scsi_host_guard_type { |
848 | SHOST_DIX_GUARD_CRC = 1 << 0, |
849 | SHOST_DIX_GUARD_IP = 1 << 1, |
850 | }; |
851 | |
852 | static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type) |
853 | { |
854 | shost->prot_guard_type = type; |
855 | } |
856 | |
857 | static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost) |
858 | { |
859 | return shost->prot_guard_type; |
860 | } |
861 | |
862 | extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); |
863 | |
864 | #endif /* _SCSI_SCSI_HOST_H */ |
865 | |