1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
11 *
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
13 */
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18#include <linux/module.h>
19#include <linux/align.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/jiffies.h>
23#include <linux/slab.h>
24#include <linux/types.h>
25#include <linux/string.h>
26#include <linux/fs.h>
27#include <linux/init.h>
28#include <linux/proc_fs.h>
29#include <linux/vmalloc.h>
30#include <linux/moduleparam.h>
31#include <linux/scatterlist.h>
32#include <linux/blkdev.h>
33#include <linux/crc-t10dif.h>
34#include <linux/spinlock.h>
35#include <linux/interrupt.h>
36#include <linux/atomic.h>
37#include <linux/hrtimer.h>
38#include <linux/uuid.h>
39#include <linux/t10-pi.h>
40#include <linux/msdos_partition.h>
41#include <linux/random.h>
42#include <linux/xarray.h>
43#include <linux/prefetch.h>
44#include <linux/debugfs.h>
45#include <linux/async.h>
46
47#include <net/checksum.h>
48
49#include <asm/unaligned.h>
50
51#include <scsi/scsi.h>
52#include <scsi/scsi_cmnd.h>
53#include <scsi/scsi_device.h>
54#include <scsi/scsi_host.h>
55#include <scsi/scsicam.h>
56#include <scsi/scsi_eh.h>
57#include <scsi/scsi_tcq.h>
58#include <scsi/scsi_dbg.h>
59
60#include "sd.h"
61#include "scsi_logging.h"
62
63/* make sure inq_product_rev string corresponds to this version */
64#define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
65static const char *sdebug_version_date = "20210520";
66
67#define MY_NAME "scsi_debug"
68
69/* Additional Sense Code (ASC) */
70#define NO_ADDITIONAL_SENSE 0x0
71#define LOGICAL_UNIT_NOT_READY 0x4
72#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73#define UNRECOVERED_READ_ERR 0x11
74#define PARAMETER_LIST_LENGTH_ERR 0x1a
75#define INVALID_OPCODE 0x20
76#define LBA_OUT_OF_RANGE 0x21
77#define INVALID_FIELD_IN_CDB 0x24
78#define INVALID_FIELD_IN_PARAM_LIST 0x26
79#define WRITE_PROTECTED 0x27
80#define UA_RESET_ASC 0x29
81#define UA_CHANGED_ASC 0x2a
82#define TARGET_CHANGED_ASC 0x3f
83#define LUNS_CHANGED_ASCQ 0x0e
84#define INSUFF_RES_ASC 0x55
85#define INSUFF_RES_ASCQ 0x3
86#define POWER_ON_RESET_ASCQ 0x0
87#define POWER_ON_OCCURRED_ASCQ 0x1
88#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
89#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
90#define CAPACITY_CHANGED_ASCQ 0x9
91#define SAVING_PARAMS_UNSUP 0x39
92#define TRANSPORT_PROBLEM 0x4b
93#define THRESHOLD_EXCEEDED 0x5d
94#define LOW_POWER_COND_ON 0x5e
95#define MISCOMPARE_VERIFY_ASC 0x1d
96#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
97#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98#define WRITE_ERROR_ASC 0xc
99#define UNALIGNED_WRITE_ASCQ 0x4
100#define WRITE_BOUNDARY_ASCQ 0x5
101#define READ_INVDATA_ASCQ 0x6
102#define READ_BOUNDARY_ASCQ 0x7
103#define ATTEMPT_ACCESS_GAP 0x9
104#define INSUFF_ZONE_ASCQ 0xe
105
106/* Additional Sense Code Qualifier (ASCQ) */
107#define ACK_NAK_TO 0x3
108
109/* Default values for driver parameters */
110#define DEF_NUM_HOST 1
111#define DEF_NUM_TGTS 1
112#define DEF_MAX_LUNS 1
113/* With these defaults, this driver will make 1 host with 1 target
114 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115 */
116#define DEF_ATO 1
117#define DEF_CDB_LEN 10
118#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
119#define DEF_DEV_SIZE_PRE_INIT 0
120#define DEF_DEV_SIZE_MB 8
121#define DEF_ZBC_DEV_SIZE_MB 128
122#define DEF_DIF 0
123#define DEF_DIX 0
124#define DEF_PER_HOST_STORE false
125#define DEF_D_SENSE 0
126#define DEF_EVERY_NTH 0
127#define DEF_FAKE_RW 0
128#define DEF_GUARD 0
129#define DEF_HOST_LOCK 0
130#define DEF_LBPU 0
131#define DEF_LBPWS 0
132#define DEF_LBPWS10 0
133#define DEF_LBPRZ 1
134#define DEF_LOWEST_ALIGNED 0
135#define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
136#define DEF_NO_LUN_0 0
137#define DEF_NUM_PARTS 0
138#define DEF_OPTS 0
139#define DEF_OPT_BLKS 1024
140#define DEF_PHYSBLK_EXP 0
141#define DEF_OPT_XFERLEN_EXP 0
142#define DEF_PTYPE TYPE_DISK
143#define DEF_RANDOM false
144#define DEF_REMOVABLE false
145#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
146#define DEF_SECTOR_SIZE 512
147#define DEF_UNMAP_ALIGNMENT 0
148#define DEF_UNMAP_GRANULARITY 1
149#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
150#define DEF_UNMAP_MAX_DESC 256
151#define DEF_VIRTUAL_GB 0
152#define DEF_VPD_USE_HOSTNO 1
153#define DEF_WRITESAME_LENGTH 0xFFFF
154#define DEF_STRICT 0
155#define DEF_STATISTICS false
156#define DEF_SUBMIT_QUEUES 1
157#define DEF_TUR_MS_TO_READY 0
158#define DEF_UUID_CTL 0
159#define JDELAY_OVERRIDDEN -9999
160
161/* Default parameters for ZBC drives */
162#define DEF_ZBC_ZONE_SIZE_MB 128
163#define DEF_ZBC_MAX_OPEN_ZONES 8
164#define DEF_ZBC_NR_CONV_ZONES 1
165
166#define SDEBUG_LUN_0_VAL 0
167
168/* bit mask values for sdebug_opts */
169#define SDEBUG_OPT_NOISE 1
170#define SDEBUG_OPT_MEDIUM_ERR 2
171#define SDEBUG_OPT_TIMEOUT 4
172#define SDEBUG_OPT_RECOVERED_ERR 8
173#define SDEBUG_OPT_TRANSPORT_ERR 16
174#define SDEBUG_OPT_DIF_ERR 32
175#define SDEBUG_OPT_DIX_ERR 64
176#define SDEBUG_OPT_MAC_TIMEOUT 128
177#define SDEBUG_OPT_SHORT_TRANSFER 0x100
178#define SDEBUG_OPT_Q_NOISE 0x200
179#define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
180#define SDEBUG_OPT_RARE_TSF 0x800
181#define SDEBUG_OPT_N_WCE 0x1000
182#define SDEBUG_OPT_RESET_NOISE 0x2000
183#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
184#define SDEBUG_OPT_HOST_BUSY 0x8000
185#define SDEBUG_OPT_CMD_ABORT 0x10000
186#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
187 SDEBUG_OPT_RESET_NOISE)
188#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
189 SDEBUG_OPT_TRANSPORT_ERR | \
190 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
191 SDEBUG_OPT_SHORT_TRANSFER | \
192 SDEBUG_OPT_HOST_BUSY | \
193 SDEBUG_OPT_CMD_ABORT)
194#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
195 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
196
197/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
198 * priority order. In the subset implemented here lower numbers have higher
199 * priority. The UA numbers should be a sequence starting from 0 with
200 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
201#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
202#define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
203#define SDEBUG_UA_BUS_RESET 2
204#define SDEBUG_UA_MODE_CHANGED 3
205#define SDEBUG_UA_CAPACITY_CHANGED 4
206#define SDEBUG_UA_LUNS_CHANGED 5
207#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
208#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
209#define SDEBUG_NUM_UAS 8
210
211/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
212 * sector on read commands: */
213#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
214#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
215
216/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217 * (for response) per submit queue at one time. Can be reduced by max_queue
218 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221 * but cannot exceed SDEBUG_CANQUEUE .
222 */
223#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
224#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225#define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
226
227/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228#define F_D_IN 1 /* Data-in command (e.g. READ) */
229#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
230#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
231#define F_D_UNKN 8
232#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
233#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
234#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
235#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
236#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
237#define F_INV_OP 0x200 /* invalid opcode (not supported) */
238#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
239#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
240#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
241#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
242
243/* Useful combinations of the above flags */
244#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
245#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
246#define FF_SA (F_SA_HIGH | F_SA_LOW)
247#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
248
249#define SDEBUG_MAX_PARTS 4
250
251#define SDEBUG_MAX_CMD_LEN 32
252
253#define SDEB_XA_NOT_IN_USE XA_MARK_1
254
255static struct kmem_cache *queued_cmd_cache;
256
257#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
258#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
259
260/* Zone types (zbcr05 table 25) */
261enum sdebug_z_type {
262 ZBC_ZTYPE_CNV = 0x1,
263 ZBC_ZTYPE_SWR = 0x2,
264 ZBC_ZTYPE_SWP = 0x3,
265 /* ZBC_ZTYPE_SOBR = 0x4, */
266 ZBC_ZTYPE_GAP = 0x5,
267};
268
269/* enumeration names taken from table 26, zbcr05 */
270enum sdebug_z_cond {
271 ZBC_NOT_WRITE_POINTER = 0x0,
272 ZC1_EMPTY = 0x1,
273 ZC2_IMPLICIT_OPEN = 0x2,
274 ZC3_EXPLICIT_OPEN = 0x3,
275 ZC4_CLOSED = 0x4,
276 ZC6_READ_ONLY = 0xd,
277 ZC5_FULL = 0xe,
278 ZC7_OFFLINE = 0xf,
279};
280
281struct sdeb_zone_state { /* ZBC: per zone state */
282 enum sdebug_z_type z_type;
283 enum sdebug_z_cond z_cond;
284 bool z_non_seq_resource;
285 unsigned int z_size;
286 sector_t z_start;
287 sector_t z_wp;
288};
289
290enum sdebug_err_type {
291 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
292 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
293 /* queuecmd return failed */
294 ERR_FAIL_CMD = 2, /* make specific scsi command's */
295 /* queuecmd return succeed but */
296 /* with errors set in scsi_cmnd */
297 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
298 /* scsi_debug_abort() */
299 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
300 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
301};
302
303struct sdebug_err_inject {
304 int type;
305 struct list_head list;
306 int cnt;
307 unsigned char cmd;
308 struct rcu_head rcu;
309
310 union {
311 /*
312 * For ERR_FAIL_QUEUE_CMD
313 */
314 int queuecmd_ret;
315
316 /*
317 * For ERR_FAIL_CMD
318 */
319 struct {
320 unsigned char host_byte;
321 unsigned char driver_byte;
322 unsigned char status_byte;
323 unsigned char sense_key;
324 unsigned char asc;
325 unsigned char asq;
326 };
327 };
328};
329
330struct sdebug_dev_info {
331 struct list_head dev_list;
332 unsigned int channel;
333 unsigned int target;
334 u64 lun;
335 uuid_t lu_name;
336 struct sdebug_host_info *sdbg_host;
337 unsigned long uas_bm[1];
338 atomic_t stopped; /* 1: by SSU, 2: device start */
339 bool used;
340
341 /* For ZBC devices */
342 enum blk_zoned_model zmodel;
343 unsigned int zcap;
344 unsigned int zsize;
345 unsigned int zsize_shift;
346 unsigned int nr_zones;
347 unsigned int nr_conv_zones;
348 unsigned int nr_seq_zones;
349 unsigned int nr_imp_open;
350 unsigned int nr_exp_open;
351 unsigned int nr_closed;
352 unsigned int max_open;
353 ktime_t create_ts; /* time since bootup that this device was created */
354 struct sdeb_zone_state *zstate;
355
356 struct dentry *debugfs_entry;
357 struct spinlock list_lock;
358 struct list_head inject_err_list;
359};
360
361struct sdebug_target_info {
362 bool reset_fail;
363 struct dentry *debugfs_entry;
364};
365
366struct sdebug_host_info {
367 struct list_head host_list;
368 int si_idx; /* sdeb_store_info (per host) xarray index */
369 struct Scsi_Host *shost;
370 struct device dev;
371 struct list_head dev_info_list;
372};
373
374/* There is an xarray of pointers to this struct's objects, one per host */
375struct sdeb_store_info {
376 rwlock_t macc_lck; /* for atomic media access on this store */
377 u8 *storep; /* user data storage (ram) */
378 struct t10_pi_tuple *dif_storep; /* protection info */
379 void *map_storep; /* provisioning map */
380};
381
382#define dev_to_sdebug_host(d) \
383 container_of(d, struct sdebug_host_info, dev)
384
385#define shost_to_sdebug_host(shost) \
386 dev_to_sdebug_host(shost->dma_dev)
387
388enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
389 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
390
391struct sdebug_defer {
392 struct hrtimer hrt;
393 struct execute_work ew;
394 ktime_t cmpl_ts;/* time since boot to complete this cmd */
395 int issuing_cpu;
396 bool aborted; /* true when blk_abort_request() already called */
397 enum sdeb_defer_type defer_t;
398};
399
400struct sdebug_queued_cmd {
401 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
402 * instance indicates this slot is in use.
403 */
404 struct sdebug_defer sd_dp;
405 struct scsi_cmnd *scmd;
406};
407
408struct sdebug_scsi_cmd {
409 spinlock_t lock;
410};
411
412static atomic_t sdebug_cmnd_count; /* number of incoming commands */
413static atomic_t sdebug_completions; /* count of deferred completions */
414static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
415static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
416static atomic_t sdeb_inject_pending;
417static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
418
419struct opcode_info_t {
420 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
421 /* for terminating element */
422 u8 opcode; /* if num_attached > 0, preferred */
423 u16 sa; /* service action */
424 u32 flags; /* OR-ed set of SDEB_F_* */
425 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
426 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
427 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
428 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
429};
430
431/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
432enum sdeb_opcode_index {
433 SDEB_I_INVALID_OPCODE = 0,
434 SDEB_I_INQUIRY = 1,
435 SDEB_I_REPORT_LUNS = 2,
436 SDEB_I_REQUEST_SENSE = 3,
437 SDEB_I_TEST_UNIT_READY = 4,
438 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
439 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
440 SDEB_I_LOG_SENSE = 7,
441 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
442 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
443 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
444 SDEB_I_START_STOP = 11,
445 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
446 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
447 SDEB_I_MAINT_IN = 14,
448 SDEB_I_MAINT_OUT = 15,
449 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
450 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
451 SDEB_I_RESERVE = 18, /* 6, 10 */
452 SDEB_I_RELEASE = 19, /* 6, 10 */
453 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
454 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
455 SDEB_I_ATA_PT = 22, /* 12, 16 */
456 SDEB_I_SEND_DIAG = 23,
457 SDEB_I_UNMAP = 24,
458 SDEB_I_WRITE_BUFFER = 25,
459 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
460 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
461 SDEB_I_COMP_WRITE = 28,
462 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
463 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
464 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
465 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
466};
467
468
469static const unsigned char opcode_ind_arr[256] = {
470/* 0x0; 0x0->0x1f: 6 byte cdbs */
471 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
472 0, 0, 0, 0,
473 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
474 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
475 SDEB_I_RELEASE,
476 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
477 SDEB_I_ALLOW_REMOVAL, 0,
478/* 0x20; 0x20->0x3f: 10 byte cdbs */
479 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
480 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
481 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
482 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
483/* 0x40; 0x40->0x5f: 10 byte cdbs */
484 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
485 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
486 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
487 SDEB_I_RELEASE,
488 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
489/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
490 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
491 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 0, SDEB_I_VARIABLE_LEN,
493/* 0x80; 0x80->0x9f: 16 byte cdbs */
494 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
495 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
496 0, 0, 0, SDEB_I_VERIFY,
497 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
498 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
499 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
500/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
501 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
502 SDEB_I_MAINT_OUT, 0, 0, 0,
503 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
504 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
505 0, 0, 0, 0, 0, 0, 0, 0,
506 0, 0, 0, 0, 0, 0, 0, 0,
507/* 0xc0; 0xc0->0xff: vendor specific */
508 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512};
513
514/*
515 * The following "response" functions return the SCSI mid-level's 4 byte
516 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
517 * command completion, they can mask their return value with
518 * SDEG_RES_IMMED_MASK .
519 */
520#define SDEG_RES_IMMED_MASK 0x40000000
521
522static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
523static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
524static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
525static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
526static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
527static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
528static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
529static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
530static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
532static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
533static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
534static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
535static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
536static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
537static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
538static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
539static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
540static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
541static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
542static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
543static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
544static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
545static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
546static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
547static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
548static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
549static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
550static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551
552static int sdebug_do_add_host(bool mk_new_store);
553static int sdebug_add_host_helper(int per_host_idx);
554static void sdebug_do_remove_host(bool the_end);
555static int sdebug_add_store(void);
556static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
557static void sdebug_erase_all_stores(bool apart_from_first);
558
559static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
560
561/*
562 * The following are overflow arrays for cdbs that "hit" the same index in
563 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
564 * should be placed in opcode_info_arr[], the others should be placed here.
565 */
566static const struct opcode_info_t msense_iarr[] = {
567 {0, 0x1a, 0, F_D_IN, NULL, NULL,
568 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
569};
570
571static const struct opcode_info_t mselect_iarr[] = {
572 {0, 0x15, 0, F_D_OUT, NULL, NULL,
573 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574};
575
576static const struct opcode_info_t read_iarr[] = {
577 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
578 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
579 0, 0, 0, 0} },
580 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
581 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
583 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
584 0xc7, 0, 0, 0, 0} },
585};
586
587static const struct opcode_info_t write_iarr[] = {
588 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
589 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
590 0, 0, 0, 0, 0, 0} },
591 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
592 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
593 0, 0, 0} },
594 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
595 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 0xbf, 0xc7, 0, 0, 0, 0} },
597};
598
599static const struct opcode_info_t verify_iarr[] = {
600 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
601 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
602 0, 0, 0, 0, 0, 0} },
603};
604
605static const struct opcode_info_t sa_in_16_iarr[] = {
606 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
607 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
609};
610
611static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
612 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
613 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
614 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
615 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
616 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
617 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
618};
619
620static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
621 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
622 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
623 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
624 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
625 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
626 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
627};
628
629static const struct opcode_info_t write_same_iarr[] = {
630 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
631 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
632 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
633};
634
635static const struct opcode_info_t reserve_iarr[] = {
636 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
637 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
638};
639
640static const struct opcode_info_t release_iarr[] = {
641 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
642 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
643};
644
645static const struct opcode_info_t sync_cache_iarr[] = {
646 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
647 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
648 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
649};
650
651static const struct opcode_info_t pre_fetch_iarr[] = {
652 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
653 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
655};
656
657static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
658 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
659 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
661 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
662 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
664 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
665 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
667};
668
669static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
670 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
671 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
673};
674
675
676/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
677 * plus the terminating elements for logic that scans this table such as
678 * REPORT SUPPORTED OPERATION CODES. */
679static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
680/* 0 */
681 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
682 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
683 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
684 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
685 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
686 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
687 0, 0} }, /* REPORT LUNS */
688 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
689 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
691 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692/* 5 */
693 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
694 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
695 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
697 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
698 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
700 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
701 0, 0, 0} },
702 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
703 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
704 0, 0} },
705 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
706 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
707 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
708/* 10 */
709 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
710 resp_write_dt0, write_iarr, /* WRITE(16) */
711 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
712 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
713 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
714 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
715 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
716 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
717 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
719 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
720 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
721 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
722 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
723 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
724 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
725 0xff, 0, 0xc7, 0, 0, 0, 0} },
726/* 15 */
727 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
728 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
729 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
730 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
731 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
732 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
733 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
734 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
735 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
736 0xff, 0xff} },
737 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
738 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
739 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
740 0} },
741 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
742 NULL, release_iarr, /* RELEASE(10) <no response function> */
743 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
744 0} },
745/* 20 */
746 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
747 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
748 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
749 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
750 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
751 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
752 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
753 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
755 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
756/* 25 */
757 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
758 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
759 0, 0, 0, 0} }, /* WRITE_BUFFER */
760 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
761 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
762 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
763 0, 0, 0, 0, 0} },
764 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
765 resp_sync_cache, sync_cache_iarr,
766 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
767 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
768 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
769 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
770 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
771 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
772 resp_pre_fetch, pre_fetch_iarr,
773 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
774 0, 0, 0, 0} }, /* PRE-FETCH (10) */
775
776/* 30 */
777 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
778 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
779 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
780 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
781 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
782 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
783 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
784 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
785/* sentinel */
786 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
787 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
788};
789
790static int sdebug_num_hosts;
791static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
792static int sdebug_ato = DEF_ATO;
793static int sdebug_cdb_len = DEF_CDB_LEN;
794static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
795static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
796static int sdebug_dif = DEF_DIF;
797static int sdebug_dix = DEF_DIX;
798static int sdebug_dsense = DEF_D_SENSE;
799static int sdebug_every_nth = DEF_EVERY_NTH;
800static int sdebug_fake_rw = DEF_FAKE_RW;
801static unsigned int sdebug_guard = DEF_GUARD;
802static int sdebug_host_max_queue; /* per host */
803static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
804static int sdebug_max_luns = DEF_MAX_LUNS;
805static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
806static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
807static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
808static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
809static int sdebug_no_lun_0 = DEF_NO_LUN_0;
810static int sdebug_no_uld;
811static int sdebug_num_parts = DEF_NUM_PARTS;
812static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
813static int sdebug_opt_blks = DEF_OPT_BLKS;
814static int sdebug_opts = DEF_OPTS;
815static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
816static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
817static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
818static int sdebug_scsi_level = DEF_SCSI_LEVEL;
819static int sdebug_sector_size = DEF_SECTOR_SIZE;
820static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
821static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
822static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
823static unsigned int sdebug_lbpu = DEF_LBPU;
824static unsigned int sdebug_lbpws = DEF_LBPWS;
825static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
826static unsigned int sdebug_lbprz = DEF_LBPRZ;
827static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
828static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
829static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
830static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
831static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
832static int sdebug_uuid_ctl = DEF_UUID_CTL;
833static bool sdebug_random = DEF_RANDOM;
834static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
835static bool sdebug_removable = DEF_REMOVABLE;
836static bool sdebug_clustering;
837static bool sdebug_host_lock = DEF_HOST_LOCK;
838static bool sdebug_strict = DEF_STRICT;
839static bool sdebug_any_injecting_opt;
840static bool sdebug_no_rwlock;
841static bool sdebug_verbose;
842static bool have_dif_prot;
843static bool write_since_sync;
844static bool sdebug_statistics = DEF_STATISTICS;
845static bool sdebug_wp;
846static bool sdebug_allow_restart;
847/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
848static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
849static char *sdeb_zbc_model_s;
850
851enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
852 SAM_LUN_AM_FLAT = 0x1,
853 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
854 SAM_LUN_AM_EXTENDED = 0x3};
855static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
856static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
857
858static unsigned int sdebug_store_sectors;
859static sector_t sdebug_capacity; /* in sectors */
860
861/* old BIOS stuff, kernel may get rid of them but some mode sense pages
862 may still need them */
863static int sdebug_heads; /* heads per disk */
864static int sdebug_cylinders_per; /* cylinders per surface */
865static int sdebug_sectors_per; /* sectors per cylinder */
866
867static LIST_HEAD(sdebug_host_list);
868static DEFINE_MUTEX(sdebug_host_list_mutex);
869
870static struct xarray per_store_arr;
871static struct xarray *per_store_ap = &per_store_arr;
872static int sdeb_first_idx = -1; /* invalid index ==> none created */
873static int sdeb_most_recent_idx = -1;
874static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
875
876static unsigned long map_size;
877static int num_aborts;
878static int num_dev_resets;
879static int num_target_resets;
880static int num_bus_resets;
881static int num_host_resets;
882static int dix_writes;
883static int dix_reads;
884static int dif_errors;
885
886/* ZBC global data */
887static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
888static int sdeb_zbc_zone_cap_mb;
889static int sdeb_zbc_zone_size_mb;
890static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
891static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
892
893static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
894static int poll_queues; /* iouring iopoll interface.*/
895
896static char sdebug_proc_name[] = MY_NAME;
897static const char *my_name = MY_NAME;
898
899static struct bus_type pseudo_lld_bus;
900
901static struct device_driver sdebug_driverfs_driver = {
902 .name = sdebug_proc_name,
903 .bus = &pseudo_lld_bus,
904};
905
906static const int check_condition_result =
907 SAM_STAT_CHECK_CONDITION;
908
909static const int illegal_condition_result =
910 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
911
912static const int device_qfull_result =
913 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
914
915static const int condition_met_result = SAM_STAT_CONDITION_MET;
916
917static struct dentry *sdebug_debugfs_root;
918
919static void sdebug_err_free(struct rcu_head *head)
920{
921 struct sdebug_err_inject *inject =
922 container_of(head, typeof(*inject), rcu);
923
924 kfree(objp: inject);
925}
926
927static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
928{
929 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
930 struct sdebug_err_inject *err;
931
932 spin_lock(lock: &devip->list_lock);
933 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
934 if (err->type == new->type && err->cmd == new->cmd) {
935 list_del_rcu(entry: &err->list);
936 call_rcu(head: &err->rcu, func: sdebug_err_free);
937 }
938 }
939
940 list_add_tail_rcu(new: &new->list, head: &devip->inject_err_list);
941 spin_unlock(lock: &devip->list_lock);
942}
943
944static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
945{
946 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
947 struct sdebug_err_inject *err;
948 int type;
949 unsigned char cmd;
950
951 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
952 kfree(objp: buf);
953 return -EINVAL;
954 }
955
956 spin_lock(lock: &devip->list_lock);
957 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
958 if (err->type == type && err->cmd == cmd) {
959 list_del_rcu(entry: &err->list);
960 call_rcu(head: &err->rcu, func: sdebug_err_free);
961 spin_unlock(lock: &devip->list_lock);
962 kfree(objp: buf);
963 return count;
964 }
965 }
966 spin_unlock(lock: &devip->list_lock);
967
968 kfree(objp: buf);
969 return -EINVAL;
970}
971
972static int sdebug_error_show(struct seq_file *m, void *p)
973{
974 struct scsi_device *sdev = (struct scsi_device *)m->private;
975 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
976 struct sdebug_err_inject *err;
977
978 seq_puts(m, s: "Type\tCount\tCommand\n");
979
980 rcu_read_lock();
981 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
982 switch (err->type) {
983 case ERR_TMOUT_CMD:
984 case ERR_ABORT_CMD_FAILED:
985 case ERR_LUN_RESET_FAILED:
986 seq_printf(m, fmt: "%d\t%d\t0x%x\n", err->type, err->cnt,
987 err->cmd);
988 break;
989
990 case ERR_FAIL_QUEUE_CMD:
991 seq_printf(m, fmt: "%d\t%d\t0x%x\t0x%x\n", err->type,
992 err->cnt, err->cmd, err->queuecmd_ret);
993 break;
994
995 case ERR_FAIL_CMD:
996 seq_printf(m, fmt: "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
997 err->type, err->cnt, err->cmd,
998 err->host_byte, err->driver_byte,
999 err->status_byte, err->sense_key,
1000 err->asc, err->asq);
1001 break;
1002 }
1003 }
1004 rcu_read_unlock();
1005
1006 return 0;
1007}
1008
1009static int sdebug_error_open(struct inode *inode, struct file *file)
1010{
1011 return single_open(file, sdebug_error_show, inode->i_private);
1012}
1013
1014static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1015 size_t count, loff_t *ppos)
1016{
1017 char *buf;
1018 unsigned int inject_type;
1019 struct sdebug_err_inject *inject;
1020 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1021
1022 buf = kmalloc(size: count, GFP_KERNEL);
1023 if (!buf)
1024 return -ENOMEM;
1025
1026 if (copy_from_user(to: buf, from: ubuf, n: count)) {
1027 kfree(objp: buf);
1028 return -EFAULT;
1029 }
1030
1031 if (buf[0] == '-')
1032 return sdebug_err_remove(sdev, buf, count);
1033
1034 if (sscanf(buf, "%d", &inject_type) != 1) {
1035 kfree(objp: buf);
1036 return -EINVAL;
1037 }
1038
1039 inject = kzalloc(size: sizeof(struct sdebug_err_inject), GFP_KERNEL);
1040 if (!inject) {
1041 kfree(objp: buf);
1042 return -ENOMEM;
1043 }
1044
1045 switch (inject_type) {
1046 case ERR_TMOUT_CMD:
1047 case ERR_ABORT_CMD_FAILED:
1048 case ERR_LUN_RESET_FAILED:
1049 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1050 &inject->cmd) != 3)
1051 goto out_error;
1052 break;
1053
1054 case ERR_FAIL_QUEUE_CMD:
1055 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1056 &inject->cmd, &inject->queuecmd_ret) != 4)
1057 goto out_error;
1058 break;
1059
1060 case ERR_FAIL_CMD:
1061 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1062 &inject->type, &inject->cnt, &inject->cmd,
1063 &inject->host_byte, &inject->driver_byte,
1064 &inject->status_byte, &inject->sense_key,
1065 &inject->asc, &inject->asq) != 9)
1066 goto out_error;
1067 break;
1068
1069 default:
1070 goto out_error;
1071 break;
1072 }
1073
1074 kfree(objp: buf);
1075 sdebug_err_add(sdev, new: inject);
1076
1077 return count;
1078
1079out_error:
1080 kfree(objp: buf);
1081 kfree(objp: inject);
1082 return -EINVAL;
1083}
1084
1085static const struct file_operations sdebug_error_fops = {
1086 .open = sdebug_error_open,
1087 .read = seq_read,
1088 .write = sdebug_error_write,
1089 .release = single_release,
1090};
1091
1092static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1093{
1094 struct scsi_target *starget = (struct scsi_target *)m->private;
1095 struct sdebug_target_info *targetip =
1096 (struct sdebug_target_info *)starget->hostdata;
1097
1098 if (targetip)
1099 seq_printf(m, fmt: "%c\n", targetip->reset_fail ? 'Y' : 'N');
1100
1101 return 0;
1102}
1103
1104static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1105{
1106 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1107}
1108
1109static ssize_t sdebug_target_reset_fail_write(struct file *file,
1110 const char __user *ubuf, size_t count, loff_t *ppos)
1111{
1112 int ret;
1113 struct scsi_target *starget =
1114 (struct scsi_target *)file->f_inode->i_private;
1115 struct sdebug_target_info *targetip =
1116 (struct sdebug_target_info *)starget->hostdata;
1117
1118 if (targetip) {
1119 ret = kstrtobool_from_user(s: ubuf, count, res: &targetip->reset_fail);
1120 return ret < 0 ? ret : count;
1121 }
1122 return -ENODEV;
1123}
1124
1125static const struct file_operations sdebug_target_reset_fail_fops = {
1126 .open = sdebug_target_reset_fail_open,
1127 .read = seq_read,
1128 .write = sdebug_target_reset_fail_write,
1129 .release = single_release,
1130};
1131
1132static int sdebug_target_alloc(struct scsi_target *starget)
1133{
1134 struct sdebug_target_info *targetip;
1135 struct dentry *dentry;
1136
1137 targetip = kzalloc(size: sizeof(struct sdebug_target_info), GFP_KERNEL);
1138 if (!targetip)
1139 return -ENOMEM;
1140
1141 targetip->debugfs_entry = debugfs_create_dir(name: dev_name(dev: &starget->dev),
1142 parent: sdebug_debugfs_root);
1143 if (IS_ERR_OR_NULL(ptr: targetip->debugfs_entry))
1144 pr_info("%s: failed to create debugfs directory for target %s\n",
1145 __func__, dev_name(&starget->dev));
1146
1147 debugfs_create_file(name: "fail_reset", mode: 0600, parent: targetip->debugfs_entry, data: starget,
1148 fops: &sdebug_target_reset_fail_fops);
1149 if (IS_ERR_OR_NULL(ptr: dentry))
1150 pr_info("%s: failed to create fail_reset file for target %s\n",
1151 __func__, dev_name(&starget->dev));
1152
1153 starget->hostdata = targetip;
1154
1155 return 0;
1156}
1157
1158static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1159{
1160 struct sdebug_target_info *targetip = data;
1161
1162 debugfs_remove(dentry: targetip->debugfs_entry);
1163 kfree(objp: targetip);
1164}
1165
1166static void sdebug_target_destroy(struct scsi_target *starget)
1167{
1168 struct sdebug_target_info *targetip;
1169
1170 targetip = (struct sdebug_target_info *)starget->hostdata;
1171 if (targetip) {
1172 starget->hostdata = NULL;
1173 async_schedule(func: sdebug_tartget_cleanup_async, data: targetip);
1174 }
1175}
1176
1177/* Only do the extra work involved in logical block provisioning if one or
1178 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1179 * real reads and writes (i.e. not skipping them for speed).
1180 */
1181static inline bool scsi_debug_lbp(void)
1182{
1183 return 0 == sdebug_fake_rw &&
1184 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1185}
1186
1187static void *lba2fake_store(struct sdeb_store_info *sip,
1188 unsigned long long lba)
1189{
1190 struct sdeb_store_info *lsip = sip;
1191
1192 lba = do_div(lba, sdebug_store_sectors);
1193 if (!sip || !sip->storep) {
1194 WARN_ON_ONCE(true);
1195 lsip = xa_load(per_store_ap, index: 0); /* should never be NULL */
1196 }
1197 return lsip->storep + lba * sdebug_sector_size;
1198}
1199
1200static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1201 sector_t sector)
1202{
1203 sector = sector_div(sector, sdebug_store_sectors);
1204
1205 return sip->dif_storep + sector;
1206}
1207
1208static void sdebug_max_tgts_luns(void)
1209{
1210 struct sdebug_host_info *sdbg_host;
1211 struct Scsi_Host *hpnt;
1212
1213 mutex_lock(&sdebug_host_list_mutex);
1214 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1215 hpnt = sdbg_host->shost;
1216 if ((hpnt->this_id >= 0) &&
1217 (sdebug_num_tgts > hpnt->this_id))
1218 hpnt->max_id = sdebug_num_tgts + 1;
1219 else
1220 hpnt->max_id = sdebug_num_tgts;
1221 /* sdebug_max_luns; */
1222 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1223 }
1224 mutex_unlock(lock: &sdebug_host_list_mutex);
1225}
1226
1227enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1228
1229/* Set in_bit to -1 to indicate no bit position of invalid field */
1230static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1231 enum sdeb_cmd_data c_d,
1232 int in_byte, int in_bit)
1233{
1234 unsigned char *sbuff;
1235 u8 sks[4];
1236 int sl, asc;
1237
1238 sbuff = scp->sense_buffer;
1239 if (!sbuff) {
1240 sdev_printk(KERN_ERR, scp->device,
1241 "%s: sense_buffer is NULL\n", __func__);
1242 return;
1243 }
1244 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1245 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1246 scsi_build_sense(scmd: scp, desc: sdebug_dsense, ILLEGAL_REQUEST, asc, ascq: 0);
1247 memset(sks, 0, sizeof(sks));
1248 sks[0] = 0x80;
1249 if (c_d)
1250 sks[0] |= 0x40;
1251 if (in_bit >= 0) {
1252 sks[0] |= 0x8;
1253 sks[0] |= 0x7 & in_bit;
1254 }
1255 put_unaligned_be16(val: in_byte, p: sks + 1);
1256 if (sdebug_dsense) {
1257 sl = sbuff[7] + 8;
1258 sbuff[7] = sl;
1259 sbuff[sl] = 0x2;
1260 sbuff[sl + 1] = 0x6;
1261 memcpy(sbuff + sl + 4, sks, 3);
1262 } else
1263 memcpy(sbuff + 15, sks, 3);
1264 if (sdebug_verbose)
1265 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1266 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1267 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1268}
1269
1270static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1271{
1272 if (!scp->sense_buffer) {
1273 sdev_printk(KERN_ERR, scp->device,
1274 "%s: sense_buffer is NULL\n", __func__);
1275 return;
1276 }
1277 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1278
1279 scsi_build_sense(scmd: scp, desc: sdebug_dsense, key, asc, ascq: asq);
1280
1281 if (sdebug_verbose)
1282 sdev_printk(KERN_INFO, scp->device,
1283 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1284 my_name, key, asc, asq);
1285}
1286
1287static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1288{
1289 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, asq: 0);
1290}
1291
1292static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1293 void __user *arg)
1294{
1295 if (sdebug_verbose) {
1296 if (0x1261 == cmd)
1297 sdev_printk(KERN_INFO, dev,
1298 "%s: BLKFLSBUF [0x1261]\n", __func__);
1299 else if (0x5331 == cmd)
1300 sdev_printk(KERN_INFO, dev,
1301 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1302 __func__);
1303 else
1304 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1305 __func__, cmd);
1306 }
1307 return -EINVAL;
1308 /* return -ENOTTY; // correct return but upsets fdisk */
1309}
1310
1311static void config_cdb_len(struct scsi_device *sdev)
1312{
1313 switch (sdebug_cdb_len) {
1314 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1315 sdev->use_10_for_rw = false;
1316 sdev->use_16_for_rw = false;
1317 sdev->use_10_for_ms = false;
1318 break;
1319 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1320 sdev->use_10_for_rw = true;
1321 sdev->use_16_for_rw = false;
1322 sdev->use_10_for_ms = false;
1323 break;
1324 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1325 sdev->use_10_for_rw = true;
1326 sdev->use_16_for_rw = false;
1327 sdev->use_10_for_ms = true;
1328 break;
1329 case 16:
1330 sdev->use_10_for_rw = false;
1331 sdev->use_16_for_rw = true;
1332 sdev->use_10_for_ms = true;
1333 break;
1334 case 32: /* No knobs to suggest this so same as 16 for now */
1335 sdev->use_10_for_rw = false;
1336 sdev->use_16_for_rw = true;
1337 sdev->use_10_for_ms = true;
1338 break;
1339 default:
1340 pr_warn("unexpected cdb_len=%d, force to 10\n",
1341 sdebug_cdb_len);
1342 sdev->use_10_for_rw = true;
1343 sdev->use_16_for_rw = false;
1344 sdev->use_10_for_ms = false;
1345 sdebug_cdb_len = 10;
1346 break;
1347 }
1348}
1349
1350static void all_config_cdb_len(void)
1351{
1352 struct sdebug_host_info *sdbg_host;
1353 struct Scsi_Host *shost;
1354 struct scsi_device *sdev;
1355
1356 mutex_lock(&sdebug_host_list_mutex);
1357 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1358 shost = sdbg_host->shost;
1359 shost_for_each_device(sdev, shost) {
1360 config_cdb_len(sdev);
1361 }
1362 }
1363 mutex_unlock(lock: &sdebug_host_list_mutex);
1364}
1365
1366static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1367{
1368 struct sdebug_host_info *sdhp = devip->sdbg_host;
1369 struct sdebug_dev_info *dp;
1370
1371 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1372 if ((devip->sdbg_host == dp->sdbg_host) &&
1373 (devip->target == dp->target)) {
1374 clear_bit(SDEBUG_UA_LUNS_CHANGED, addr: dp->uas_bm);
1375 }
1376 }
1377}
1378
1379static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1380{
1381 int k;
1382
1383 k = find_first_bit(addr: devip->uas_bm, SDEBUG_NUM_UAS);
1384 if (k != SDEBUG_NUM_UAS) {
1385 const char *cp = NULL;
1386
1387 switch (k) {
1388 case SDEBUG_UA_POR:
1389 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1390 POWER_ON_RESET_ASCQ);
1391 if (sdebug_verbose)
1392 cp = "power on reset";
1393 break;
1394 case SDEBUG_UA_POOCCUR:
1395 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1396 POWER_ON_OCCURRED_ASCQ);
1397 if (sdebug_verbose)
1398 cp = "power on occurred";
1399 break;
1400 case SDEBUG_UA_BUS_RESET:
1401 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1402 BUS_RESET_ASCQ);
1403 if (sdebug_verbose)
1404 cp = "bus reset";
1405 break;
1406 case SDEBUG_UA_MODE_CHANGED:
1407 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1408 MODE_CHANGED_ASCQ);
1409 if (sdebug_verbose)
1410 cp = "mode parameters changed";
1411 break;
1412 case SDEBUG_UA_CAPACITY_CHANGED:
1413 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1414 CAPACITY_CHANGED_ASCQ);
1415 if (sdebug_verbose)
1416 cp = "capacity data changed";
1417 break;
1418 case SDEBUG_UA_MICROCODE_CHANGED:
1419 mk_sense_buffer(scp, UNIT_ATTENTION,
1420 TARGET_CHANGED_ASC,
1421 MICROCODE_CHANGED_ASCQ);
1422 if (sdebug_verbose)
1423 cp = "microcode has been changed";
1424 break;
1425 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1426 mk_sense_buffer(scp, UNIT_ATTENTION,
1427 TARGET_CHANGED_ASC,
1428 MICROCODE_CHANGED_WO_RESET_ASCQ);
1429 if (sdebug_verbose)
1430 cp = "microcode has been changed without reset";
1431 break;
1432 case SDEBUG_UA_LUNS_CHANGED:
1433 /*
1434 * SPC-3 behavior is to report a UNIT ATTENTION with
1435 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1436 * on the target, until a REPORT LUNS command is
1437 * received. SPC-4 behavior is to report it only once.
1438 * NOTE: sdebug_scsi_level does not use the same
1439 * values as struct scsi_device->scsi_level.
1440 */
1441 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1442 clear_luns_changed_on_target(devip);
1443 mk_sense_buffer(scp, UNIT_ATTENTION,
1444 TARGET_CHANGED_ASC,
1445 LUNS_CHANGED_ASCQ);
1446 if (sdebug_verbose)
1447 cp = "reported luns data has changed";
1448 break;
1449 default:
1450 pr_warn("unexpected unit attention code=%d\n", k);
1451 if (sdebug_verbose)
1452 cp = "unknown";
1453 break;
1454 }
1455 clear_bit(nr: k, addr: devip->uas_bm);
1456 if (sdebug_verbose)
1457 sdev_printk(KERN_INFO, scp->device,
1458 "%s reports: Unit attention: %s\n",
1459 my_name, cp);
1460 return check_condition_result;
1461 }
1462 return 0;
1463}
1464
1465/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1466static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1467 int arr_len)
1468{
1469 int act_len;
1470 struct scsi_data_buffer *sdb = &scp->sdb;
1471
1472 if (!sdb->length)
1473 return 0;
1474 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1475 return DID_ERROR << 16;
1476
1477 act_len = sg_copy_from_buffer(sgl: sdb->table.sgl, nents: sdb->table.nents,
1478 buf: arr, buflen: arr_len);
1479 scsi_set_resid(cmd: scp, resid: scsi_bufflen(cmd: scp) - act_len);
1480
1481 return 0;
1482}
1483
1484/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1485 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1486 * calls, not required to write in ascending offset order. Assumes resid
1487 * set to scsi_bufflen() prior to any calls.
1488 */
1489static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1490 int arr_len, unsigned int off_dst)
1491{
1492 unsigned int act_len, n;
1493 struct scsi_data_buffer *sdb = &scp->sdb;
1494 off_t skip = off_dst;
1495
1496 if (sdb->length <= off_dst)
1497 return 0;
1498 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1499 return DID_ERROR << 16;
1500
1501 act_len = sg_pcopy_from_buffer(sgl: sdb->table.sgl, nents: sdb->table.nents,
1502 buf: arr, buflen: arr_len, skip);
1503 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1504 __func__, off_dst, scsi_bufflen(scp), act_len,
1505 scsi_get_resid(scp));
1506 n = scsi_bufflen(cmd: scp) - (off_dst + act_len);
1507 scsi_set_resid(cmd: scp, min_t(u32, scsi_get_resid(scp), n));
1508 return 0;
1509}
1510
1511/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1512 * 'arr' or -1 if error.
1513 */
1514static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1515 int arr_len)
1516{
1517 if (!scsi_bufflen(cmd: scp))
1518 return 0;
1519 if (scp->sc_data_direction != DMA_TO_DEVICE)
1520 return -1;
1521
1522 return scsi_sg_copy_to_buffer(cmd: scp, buf: arr, buflen: arr_len);
1523}
1524
1525
1526static char sdebug_inq_vendor_id[9] = "Linux ";
1527static char sdebug_inq_product_id[17] = "scsi_debug ";
1528static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1529/* Use some locally assigned NAAs for SAS addresses. */
1530static const u64 naa3_comp_a = 0x3222222000000000ULL;
1531static const u64 naa3_comp_b = 0x3333333000000000ULL;
1532static const u64 naa3_comp_c = 0x3111111000000000ULL;
1533
1534/* Device identification VPD page. Returns number of bytes placed in arr */
1535static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1536 int target_dev_id, int dev_id_num,
1537 const char *dev_id_str, int dev_id_str_len,
1538 const uuid_t *lu_name)
1539{
1540 int num, port_a;
1541 char b[32];
1542
1543 port_a = target_dev_id + 1;
1544 /* T10 vendor identifier field format (faked) */
1545 arr[0] = 0x2; /* ASCII */
1546 arr[1] = 0x1;
1547 arr[2] = 0x0;
1548 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1549 memcpy(&arr[12], sdebug_inq_product_id, 16);
1550 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1551 num = 8 + 16 + dev_id_str_len;
1552 arr[3] = num;
1553 num += 4;
1554 if (dev_id_num >= 0) {
1555 if (sdebug_uuid_ctl) {
1556 /* Locally assigned UUID */
1557 arr[num++] = 0x1; /* binary (not necessarily sas) */
1558 arr[num++] = 0xa; /* PIV=0, lu, naa */
1559 arr[num++] = 0x0;
1560 arr[num++] = 0x12;
1561 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1562 arr[num++] = 0x0;
1563 memcpy(arr + num, lu_name, 16);
1564 num += 16;
1565 } else {
1566 /* NAA-3, Logical unit identifier (binary) */
1567 arr[num++] = 0x1; /* binary (not necessarily sas) */
1568 arr[num++] = 0x3; /* PIV=0, lu, naa */
1569 arr[num++] = 0x0;
1570 arr[num++] = 0x8;
1571 put_unaligned_be64(val: naa3_comp_b + dev_id_num, p: arr + num);
1572 num += 8;
1573 }
1574 /* Target relative port number */
1575 arr[num++] = 0x61; /* proto=sas, binary */
1576 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1577 arr[num++] = 0x0; /* reserved */
1578 arr[num++] = 0x4; /* length */
1579 arr[num++] = 0x0; /* reserved */
1580 arr[num++] = 0x0; /* reserved */
1581 arr[num++] = 0x0;
1582 arr[num++] = 0x1; /* relative port A */
1583 }
1584 /* NAA-3, Target port identifier */
1585 arr[num++] = 0x61; /* proto=sas, binary */
1586 arr[num++] = 0x93; /* piv=1, target port, naa */
1587 arr[num++] = 0x0;
1588 arr[num++] = 0x8;
1589 put_unaligned_be64(val: naa3_comp_a + port_a, p: arr + num);
1590 num += 8;
1591 /* NAA-3, Target port group identifier */
1592 arr[num++] = 0x61; /* proto=sas, binary */
1593 arr[num++] = 0x95; /* piv=1, target port group id */
1594 arr[num++] = 0x0;
1595 arr[num++] = 0x4;
1596 arr[num++] = 0;
1597 arr[num++] = 0;
1598 put_unaligned_be16(val: port_group_id, p: arr + num);
1599 num += 2;
1600 /* NAA-3, Target device identifier */
1601 arr[num++] = 0x61; /* proto=sas, binary */
1602 arr[num++] = 0xa3; /* piv=1, target device, naa */
1603 arr[num++] = 0x0;
1604 arr[num++] = 0x8;
1605 put_unaligned_be64(val: naa3_comp_a + target_dev_id, p: arr + num);
1606 num += 8;
1607 /* SCSI name string: Target device identifier */
1608 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1609 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1610 arr[num++] = 0x0;
1611 arr[num++] = 24;
1612 memcpy(arr + num, "naa.32222220", 12);
1613 num += 12;
1614 snprintf(buf: b, size: sizeof(b), fmt: "%08X", target_dev_id);
1615 memcpy(arr + num, b, 8);
1616 num += 8;
1617 memset(arr + num, 0, 4);
1618 num += 4;
1619 return num;
1620}
1621
1622static unsigned char vpd84_data[] = {
1623/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1624 0x22,0x22,0x22,0x0,0xbb,0x1,
1625 0x22,0x22,0x22,0x0,0xbb,0x2,
1626};
1627
1628/* Software interface identification VPD page */
1629static int inquiry_vpd_84(unsigned char *arr)
1630{
1631 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1632 return sizeof(vpd84_data);
1633}
1634
1635/* Management network addresses VPD page */
1636static int inquiry_vpd_85(unsigned char *arr)
1637{
1638 int num = 0;
1639 const char *na1 = "https://www.kernel.org/config";
1640 const char *na2 = "http://www.kernel.org/log";
1641 int plen, olen;
1642
1643 arr[num++] = 0x1; /* lu, storage config */
1644 arr[num++] = 0x0; /* reserved */
1645 arr[num++] = 0x0;
1646 olen = strlen(na1);
1647 plen = olen + 1;
1648 if (plen % 4)
1649 plen = ((plen / 4) + 1) * 4;
1650 arr[num++] = plen; /* length, null termianted, padded */
1651 memcpy(arr + num, na1, olen);
1652 memset(arr + num + olen, 0, plen - olen);
1653 num += plen;
1654
1655 arr[num++] = 0x4; /* lu, logging */
1656 arr[num++] = 0x0; /* reserved */
1657 arr[num++] = 0x0;
1658 olen = strlen(na2);
1659 plen = olen + 1;
1660 if (plen % 4)
1661 plen = ((plen / 4) + 1) * 4;
1662 arr[num++] = plen; /* length, null terminated, padded */
1663 memcpy(arr + num, na2, olen);
1664 memset(arr + num + olen, 0, plen - olen);
1665 num += plen;
1666
1667 return num;
1668}
1669
1670/* SCSI ports VPD page */
1671static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1672{
1673 int num = 0;
1674 int port_a, port_b;
1675
1676 port_a = target_dev_id + 1;
1677 port_b = port_a + 1;
1678 arr[num++] = 0x0; /* reserved */
1679 arr[num++] = 0x0; /* reserved */
1680 arr[num++] = 0x0;
1681 arr[num++] = 0x1; /* relative port 1 (primary) */
1682 memset(arr + num, 0, 6);
1683 num += 6;
1684 arr[num++] = 0x0;
1685 arr[num++] = 12; /* length tp descriptor */
1686 /* naa-5 target port identifier (A) */
1687 arr[num++] = 0x61; /* proto=sas, binary */
1688 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1689 arr[num++] = 0x0; /* reserved */
1690 arr[num++] = 0x8; /* length */
1691 put_unaligned_be64(val: naa3_comp_a + port_a, p: arr + num);
1692 num += 8;
1693 arr[num++] = 0x0; /* reserved */
1694 arr[num++] = 0x0; /* reserved */
1695 arr[num++] = 0x0;
1696 arr[num++] = 0x2; /* relative port 2 (secondary) */
1697 memset(arr + num, 0, 6);
1698 num += 6;
1699 arr[num++] = 0x0;
1700 arr[num++] = 12; /* length tp descriptor */
1701 /* naa-5 target port identifier (B) */
1702 arr[num++] = 0x61; /* proto=sas, binary */
1703 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1704 arr[num++] = 0x0; /* reserved */
1705 arr[num++] = 0x8; /* length */
1706 put_unaligned_be64(val: naa3_comp_a + port_b, p: arr + num);
1707 num += 8;
1708
1709 return num;
1710}
1711
1712
1713static unsigned char vpd89_data[] = {
1714/* from 4th byte */ 0,0,0,0,
1715'l','i','n','u','x',' ',' ',' ',
1716'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1717'1','2','3','4',
17180x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
17190xec,0,0,0,
17200x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
17210,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
17220x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
17230x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
17240x53,0x41,
17250x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
17260x20,0x20,
17270x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
17280x10,0x80,
17290,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
17300x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
17310x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
17320,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
17330x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
17340x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
17350,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
17360,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17370,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17380,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17390x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
17400,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
17410xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
17420,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
17430,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17440,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17450,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17460,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17470,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17480,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17490,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17500,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17510,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17520,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17530,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17540,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1755};
1756
1757/* ATA Information VPD page */
1758static int inquiry_vpd_89(unsigned char *arr)
1759{
1760 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1761 return sizeof(vpd89_data);
1762}
1763
1764
1765static unsigned char vpdb0_data[] = {
1766 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1767 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1768 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1769 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1770};
1771
1772/* Block limits VPD page (SBC-3) */
1773static int inquiry_vpd_b0(unsigned char *arr)
1774{
1775 unsigned int gran;
1776
1777 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1778
1779 /* Optimal transfer length granularity */
1780 if (sdebug_opt_xferlen_exp != 0 &&
1781 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1782 gran = 1 << sdebug_opt_xferlen_exp;
1783 else
1784 gran = 1 << sdebug_physblk_exp;
1785 put_unaligned_be16(val: gran, p: arr + 2);
1786
1787 /* Maximum Transfer Length */
1788 if (sdebug_store_sectors > 0x400)
1789 put_unaligned_be32(val: sdebug_store_sectors, p: arr + 4);
1790
1791 /* Optimal Transfer Length */
1792 put_unaligned_be32(val: sdebug_opt_blks, p: &arr[8]);
1793
1794 if (sdebug_lbpu) {
1795 /* Maximum Unmap LBA Count */
1796 put_unaligned_be32(val: sdebug_unmap_max_blocks, p: &arr[16]);
1797
1798 /* Maximum Unmap Block Descriptor Count */
1799 put_unaligned_be32(val: sdebug_unmap_max_desc, p: &arr[20]);
1800 }
1801
1802 /* Unmap Granularity Alignment */
1803 if (sdebug_unmap_alignment) {
1804 put_unaligned_be32(val: sdebug_unmap_alignment, p: &arr[28]);
1805 arr[28] |= 0x80; /* UGAVALID */
1806 }
1807
1808 /* Optimal Unmap Granularity */
1809 put_unaligned_be32(val: sdebug_unmap_granularity, p: &arr[24]);
1810
1811 /* Maximum WRITE SAME Length */
1812 put_unaligned_be64(val: sdebug_write_same_length, p: &arr[32]);
1813
1814 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1815}
1816
1817/* Block device characteristics VPD page (SBC-3) */
1818static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1819{
1820 memset(arr, 0, 0x3c);
1821 arr[0] = 0;
1822 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1823 arr[2] = 0;
1824 arr[3] = 5; /* less than 1.8" */
1825 if (devip->zmodel == BLK_ZONED_HA)
1826 arr[4] = 1 << 4; /* zoned field = 01b */
1827
1828 return 0x3c;
1829}
1830
1831/* Logical block provisioning VPD page (SBC-4) */
1832static int inquiry_vpd_b2(unsigned char *arr)
1833{
1834 memset(arr, 0, 0x4);
1835 arr[0] = 0; /* threshold exponent */
1836 if (sdebug_lbpu)
1837 arr[1] = 1 << 7;
1838 if (sdebug_lbpws)
1839 arr[1] |= 1 << 6;
1840 if (sdebug_lbpws10)
1841 arr[1] |= 1 << 5;
1842 if (sdebug_lbprz && scsi_debug_lbp())
1843 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1844 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1845 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1846 /* threshold_percentage=0 */
1847 return 0x4;
1848}
1849
1850/* Zoned block device characteristics VPD page (ZBC mandatory) */
1851static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1852{
1853 memset(arr, 0, 0x3c);
1854 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1855 /*
1856 * Set Optimal number of open sequential write preferred zones and
1857 * Optimal number of non-sequentially written sequential write
1858 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1859 * fields set to zero, apart from Max. number of open swrz_s field.
1860 */
1861 put_unaligned_be32(val: 0xffffffff, p: &arr[4]);
1862 put_unaligned_be32(val: 0xffffffff, p: &arr[8]);
1863 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1864 put_unaligned_be32(val: devip->max_open, p: &arr[12]);
1865 else
1866 put_unaligned_be32(val: 0xffffffff, p: &arr[12]);
1867 if (devip->zcap < devip->zsize) {
1868 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1869 put_unaligned_be64(val: devip->zsize, p: &arr[20]);
1870 } else {
1871 arr[19] = 0;
1872 }
1873 return 0x3c;
1874}
1875
1876#define SDEBUG_LONG_INQ_SZ 96
1877#define SDEBUG_MAX_INQ_ARR_SZ 584
1878
1879static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1880{
1881 unsigned char pq_pdt;
1882 unsigned char *arr;
1883 unsigned char *cmd = scp->cmnd;
1884 u32 alloc_len, n;
1885 int ret;
1886 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1887
1888 alloc_len = get_unaligned_be16(p: cmd + 3);
1889 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1890 if (! arr)
1891 return DID_REQUEUE << 16;
1892 is_disk = (sdebug_ptype == TYPE_DISK);
1893 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1894 is_disk_zbc = (is_disk || is_zbc);
1895 have_wlun = scsi_is_wlun(lun: scp->device->lun);
1896 if (have_wlun)
1897 pq_pdt = TYPE_WLUN; /* present, wlun */
1898 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1899 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1900 else
1901 pq_pdt = (sdebug_ptype & 0x1f);
1902 arr[0] = pq_pdt;
1903 if (0x2 & cmd[1]) { /* CMDDT bit set */
1904 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 1, in_bit: 1);
1905 kfree(objp: arr);
1906 return check_condition_result;
1907 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1908 int lu_id_num, port_group_id, target_dev_id;
1909 u32 len;
1910 char lu_id_str[6];
1911 int host_no = devip->sdbg_host->shost->host_no;
1912
1913 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1914 (devip->channel & 0x7f);
1915 if (sdebug_vpd_use_hostno == 0)
1916 host_no = 0;
1917 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1918 (devip->target * 1000) + devip->lun);
1919 target_dev_id = ((host_no + 1) * 2000) +
1920 (devip->target * 1000) - 3;
1921 len = scnprintf(buf: lu_id_str, size: 6, fmt: "%d", lu_id_num);
1922 if (0 == cmd[2]) { /* supported vital product data pages */
1923 arr[1] = cmd[2]; /*sanity */
1924 n = 4;
1925 arr[n++] = 0x0; /* this page */
1926 arr[n++] = 0x80; /* unit serial number */
1927 arr[n++] = 0x83; /* device identification */
1928 arr[n++] = 0x84; /* software interface ident. */
1929 arr[n++] = 0x85; /* management network addresses */
1930 arr[n++] = 0x86; /* extended inquiry */
1931 arr[n++] = 0x87; /* mode page policy */
1932 arr[n++] = 0x88; /* SCSI ports */
1933 if (is_disk_zbc) { /* SBC or ZBC */
1934 arr[n++] = 0x89; /* ATA information */
1935 arr[n++] = 0xb0; /* Block limits */
1936 arr[n++] = 0xb1; /* Block characteristics */
1937 if (is_disk)
1938 arr[n++] = 0xb2; /* LB Provisioning */
1939 if (is_zbc)
1940 arr[n++] = 0xb6; /* ZB dev. char. */
1941 }
1942 arr[3] = n - 4; /* number of supported VPD pages */
1943 } else if (0x80 == cmd[2]) { /* unit serial number */
1944 arr[1] = cmd[2]; /*sanity */
1945 arr[3] = len;
1946 memcpy(&arr[4], lu_id_str, len);
1947 } else if (0x83 == cmd[2]) { /* device identification */
1948 arr[1] = cmd[2]; /*sanity */
1949 arr[3] = inquiry_vpd_83(arr: &arr[4], port_group_id,
1950 target_dev_id, dev_id_num: lu_id_num,
1951 dev_id_str: lu_id_str, dev_id_str_len: len,
1952 lu_name: &devip->lu_name);
1953 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1954 arr[1] = cmd[2]; /*sanity */
1955 arr[3] = inquiry_vpd_84(arr: &arr[4]);
1956 } else if (0x85 == cmd[2]) { /* Management network addresses */
1957 arr[1] = cmd[2]; /*sanity */
1958 arr[3] = inquiry_vpd_85(arr: &arr[4]);
1959 } else if (0x86 == cmd[2]) { /* extended inquiry */
1960 arr[1] = cmd[2]; /*sanity */
1961 arr[3] = 0x3c; /* number of following entries */
1962 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1963 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1964 else if (have_dif_prot)
1965 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1966 else
1967 arr[4] = 0x0; /* no protection stuff */
1968 arr[5] = 0x7; /* head of q, ordered + simple q's */
1969 } else if (0x87 == cmd[2]) { /* mode page policy */
1970 arr[1] = cmd[2]; /*sanity */
1971 arr[3] = 0x8; /* number of following entries */
1972 arr[4] = 0x2; /* disconnect-reconnect mp */
1973 arr[6] = 0x80; /* mlus, shared */
1974 arr[8] = 0x18; /* protocol specific lu */
1975 arr[10] = 0x82; /* mlus, per initiator port */
1976 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1977 arr[1] = cmd[2]; /*sanity */
1978 arr[3] = inquiry_vpd_88(arr: &arr[4], target_dev_id);
1979 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1980 arr[1] = cmd[2]; /*sanity */
1981 n = inquiry_vpd_89(arr: &arr[4]);
1982 put_unaligned_be16(val: n, p: arr + 2);
1983 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1984 arr[1] = cmd[2]; /*sanity */
1985 arr[3] = inquiry_vpd_b0(arr: &arr[4]);
1986 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1987 arr[1] = cmd[2]; /*sanity */
1988 arr[3] = inquiry_vpd_b1(devip, arr: &arr[4]);
1989 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1990 arr[1] = cmd[2]; /*sanity */
1991 arr[3] = inquiry_vpd_b2(arr: &arr[4]);
1992 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1993 arr[1] = cmd[2]; /*sanity */
1994 arr[3] = inquiry_vpd_b6(devip, arr: &arr[4]);
1995 } else {
1996 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: -1);
1997 kfree(objp: arr);
1998 return check_condition_result;
1999 }
2000 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2001 ret = fill_from_dev_buffer(scp, arr,
2002 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2003 kfree(objp: arr);
2004 return ret;
2005 }
2006 /* drops through here for a standard inquiry */
2007 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2008 arr[2] = sdebug_scsi_level;
2009 arr[3] = 2; /* response_data_format==2 */
2010 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2011 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2012 if (sdebug_vpd_use_hostno == 0)
2013 arr[5] |= 0x10; /* claim: implicit TPGS */
2014 arr[6] = 0x10; /* claim: MultiP */
2015 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2016 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2017 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2018 memcpy(&arr[16], sdebug_inq_product_id, 16);
2019 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2020 /* Use Vendor Specific area to place driver date in ASCII hex */
2021 memcpy(&arr[36], sdebug_version_date, 8);
2022 /* version descriptors (2 bytes each) follow */
2023 put_unaligned_be16(val: 0xc0, p: arr + 58); /* SAM-6 no version claimed */
2024 put_unaligned_be16(val: 0x5c0, p: arr + 60); /* SPC-5 no version claimed */
2025 n = 62;
2026 if (is_disk) { /* SBC-4 no version claimed */
2027 put_unaligned_be16(val: 0x600, p: arr + n);
2028 n += 2;
2029 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2030 put_unaligned_be16(val: 0x525, p: arr + n);
2031 n += 2;
2032 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2033 put_unaligned_be16(val: 0x624, p: arr + n);
2034 n += 2;
2035 }
2036 put_unaligned_be16(val: 0x2100, p: arr + n); /* SPL-4 no version claimed */
2037 ret = fill_from_dev_buffer(scp, arr,
2038 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2039 kfree(objp: arr);
2040 return ret;
2041}
2042
2043/* See resp_iec_m_pg() for how this data is manipulated */
2044static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2045 0, 0, 0x0, 0x0};
2046
2047static int resp_requests(struct scsi_cmnd *scp,
2048 struct sdebug_dev_info *devip)
2049{
2050 unsigned char *cmd = scp->cmnd;
2051 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2052 bool dsense = !!(cmd[1] & 1);
2053 u32 alloc_len = cmd[4];
2054 u32 len = 18;
2055 int stopped_state = atomic_read(v: &devip->stopped);
2056
2057 memset(arr, 0, sizeof(arr));
2058 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2059 if (dsense) {
2060 arr[0] = 0x72;
2061 arr[1] = NOT_READY;
2062 arr[2] = LOGICAL_UNIT_NOT_READY;
2063 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2064 len = 8;
2065 } else {
2066 arr[0] = 0x70;
2067 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2068 arr[7] = 0xa; /* 18 byte sense buffer */
2069 arr[12] = LOGICAL_UNIT_NOT_READY;
2070 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2071 }
2072 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2073 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2074 if (dsense) {
2075 arr[0] = 0x72;
2076 arr[1] = 0x0; /* NO_SENSE in sense_key */
2077 arr[2] = THRESHOLD_EXCEEDED;
2078 arr[3] = 0xff; /* Failure prediction(false) */
2079 len = 8;
2080 } else {
2081 arr[0] = 0x70;
2082 arr[2] = 0x0; /* NO_SENSE in sense_key */
2083 arr[7] = 0xa; /* 18 byte sense buffer */
2084 arr[12] = THRESHOLD_EXCEEDED;
2085 arr[13] = 0xff; /* Failure prediction(false) */
2086 }
2087 } else { /* nothing to report */
2088 if (dsense) {
2089 len = 8;
2090 memset(arr, 0, len);
2091 arr[0] = 0x72;
2092 } else {
2093 memset(arr, 0, len);
2094 arr[0] = 0x70;
2095 arr[7] = 0xa;
2096 }
2097 }
2098 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2099}
2100
2101static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2102{
2103 unsigned char *cmd = scp->cmnd;
2104 int power_cond, want_stop, stopped_state;
2105 bool changing;
2106
2107 power_cond = (cmd[4] & 0xf0) >> 4;
2108 if (power_cond) {
2109 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 4, in_bit: 7);
2110 return check_condition_result;
2111 }
2112 want_stop = !(cmd[4] & 1);
2113 stopped_state = atomic_read(v: &devip->stopped);
2114 if (stopped_state == 2) {
2115 ktime_t now_ts = ktime_get_boottime();
2116
2117 if (ktime_to_ns(kt: now_ts) > ktime_to_ns(kt: devip->create_ts)) {
2118 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2119
2120 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2121 /* tur_ms_to_ready timer extinguished */
2122 atomic_set(v: &devip->stopped, i: 0);
2123 stopped_state = 0;
2124 }
2125 }
2126 if (stopped_state == 2) {
2127 if (want_stop) {
2128 stopped_state = 1; /* dummy up success */
2129 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2130 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 4, in_bit: 0 /* START bit */);
2131 return check_condition_result;
2132 }
2133 }
2134 }
2135 changing = (stopped_state != want_stop);
2136 if (changing)
2137 atomic_xchg(v: &devip->stopped, new: want_stop);
2138 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2139 return SDEG_RES_IMMED_MASK;
2140 else
2141 return 0;
2142}
2143
2144static sector_t get_sdebug_capacity(void)
2145{
2146 static const unsigned int gibibyte = 1073741824;
2147
2148 if (sdebug_virtual_gb > 0)
2149 return (sector_t)sdebug_virtual_gb *
2150 (gibibyte / sdebug_sector_size);
2151 else
2152 return sdebug_store_sectors;
2153}
2154
2155#define SDEBUG_READCAP_ARR_SZ 8
2156static int resp_readcap(struct scsi_cmnd *scp,
2157 struct sdebug_dev_info *devip)
2158{
2159 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2160 unsigned int capac;
2161
2162 /* following just in case virtual_gb changed */
2163 sdebug_capacity = get_sdebug_capacity();
2164 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2165 if (sdebug_capacity < 0xffffffff) {
2166 capac = (unsigned int)sdebug_capacity - 1;
2167 put_unaligned_be32(val: capac, p: arr + 0);
2168 } else
2169 put_unaligned_be32(val: 0xffffffff, p: arr + 0);
2170 put_unaligned_be16(val: sdebug_sector_size, p: arr + 6);
2171 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2172}
2173
2174#define SDEBUG_READCAP16_ARR_SZ 32
2175static int resp_readcap16(struct scsi_cmnd *scp,
2176 struct sdebug_dev_info *devip)
2177{
2178 unsigned char *cmd = scp->cmnd;
2179 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2180 u32 alloc_len;
2181
2182 alloc_len = get_unaligned_be32(p: cmd + 10);
2183 /* following just in case virtual_gb changed */
2184 sdebug_capacity = get_sdebug_capacity();
2185 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2186 put_unaligned_be64(val: (u64)(sdebug_capacity - 1), p: arr + 0);
2187 put_unaligned_be32(val: sdebug_sector_size, p: arr + 8);
2188 arr[13] = sdebug_physblk_exp & 0xf;
2189 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2190
2191 if (scsi_debug_lbp()) {
2192 arr[14] |= 0x80; /* LBPME */
2193 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2194 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2195 * in the wider field maps to 0 in this field.
2196 */
2197 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2198 arr[14] |= 0x40;
2199 }
2200
2201 /*
2202 * Since the scsi_debug READ CAPACITY implementation always reports the
2203 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2204 */
2205 if (devip->zmodel == BLK_ZONED_HM)
2206 arr[12] |= 1 << 4;
2207
2208 arr[15] = sdebug_lowest_aligned & 0xff;
2209
2210 if (have_dif_prot) {
2211 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2212 arr[12] |= 1; /* PROT_EN */
2213 }
2214
2215 return fill_from_dev_buffer(scp, arr,
2216 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2217}
2218
2219#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2220
2221static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2222 struct sdebug_dev_info *devip)
2223{
2224 unsigned char *cmd = scp->cmnd;
2225 unsigned char *arr;
2226 int host_no = devip->sdbg_host->shost->host_no;
2227 int port_group_a, port_group_b, port_a, port_b;
2228 u32 alen, n, rlen;
2229 int ret;
2230
2231 alen = get_unaligned_be32(p: cmd + 6);
2232 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2233 if (! arr)
2234 return DID_REQUEUE << 16;
2235 /*
2236 * EVPD page 0x88 states we have two ports, one
2237 * real and a fake port with no device connected.
2238 * So we create two port groups with one port each
2239 * and set the group with port B to unavailable.
2240 */
2241 port_a = 0x1; /* relative port A */
2242 port_b = 0x2; /* relative port B */
2243 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2244 (devip->channel & 0x7f);
2245 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2246 (devip->channel & 0x7f) + 0x80;
2247
2248 /*
2249 * The asymmetric access state is cycled according to the host_id.
2250 */
2251 n = 4;
2252 if (sdebug_vpd_use_hostno == 0) {
2253 arr[n++] = host_no % 3; /* Asymm access state */
2254 arr[n++] = 0x0F; /* claim: all states are supported */
2255 } else {
2256 arr[n++] = 0x0; /* Active/Optimized path */
2257 arr[n++] = 0x01; /* only support active/optimized paths */
2258 }
2259 put_unaligned_be16(val: port_group_a, p: arr + n);
2260 n += 2;
2261 arr[n++] = 0; /* Reserved */
2262 arr[n++] = 0; /* Status code */
2263 arr[n++] = 0; /* Vendor unique */
2264 arr[n++] = 0x1; /* One port per group */
2265 arr[n++] = 0; /* Reserved */
2266 arr[n++] = 0; /* Reserved */
2267 put_unaligned_be16(val: port_a, p: arr + n);
2268 n += 2;
2269 arr[n++] = 3; /* Port unavailable */
2270 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2271 put_unaligned_be16(val: port_group_b, p: arr + n);
2272 n += 2;
2273 arr[n++] = 0; /* Reserved */
2274 arr[n++] = 0; /* Status code */
2275 arr[n++] = 0; /* Vendor unique */
2276 arr[n++] = 0x1; /* One port per group */
2277 arr[n++] = 0; /* Reserved */
2278 arr[n++] = 0; /* Reserved */
2279 put_unaligned_be16(val: port_b, p: arr + n);
2280 n += 2;
2281
2282 rlen = n - 4;
2283 put_unaligned_be32(val: rlen, p: arr + 0);
2284
2285 /*
2286 * Return the smallest value of either
2287 * - The allocated length
2288 * - The constructed command length
2289 * - The maximum array size
2290 */
2291 rlen = min(alen, n);
2292 ret = fill_from_dev_buffer(scp, arr,
2293 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2294 kfree(objp: arr);
2295 return ret;
2296}
2297
2298static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2299 struct sdebug_dev_info *devip)
2300{
2301 bool rctd;
2302 u8 reporting_opts, req_opcode, sdeb_i, supp;
2303 u16 req_sa, u;
2304 u32 alloc_len, a_len;
2305 int k, offset, len, errsts, count, bump, na;
2306 const struct opcode_info_t *oip;
2307 const struct opcode_info_t *r_oip;
2308 u8 *arr;
2309 u8 *cmd = scp->cmnd;
2310
2311 rctd = !!(cmd[2] & 0x80);
2312 reporting_opts = cmd[2] & 0x7;
2313 req_opcode = cmd[3];
2314 req_sa = get_unaligned_be16(p: cmd + 4);
2315 alloc_len = get_unaligned_be32(p: cmd + 6);
2316 if (alloc_len < 4 || alloc_len > 0xffff) {
2317 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 6, in_bit: -1);
2318 return check_condition_result;
2319 }
2320 if (alloc_len > 8192)
2321 a_len = 8192;
2322 else
2323 a_len = alloc_len;
2324 arr = kzalloc(size: (a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2325 if (NULL == arr) {
2326 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2327 INSUFF_RES_ASCQ);
2328 return check_condition_result;
2329 }
2330 switch (reporting_opts) {
2331 case 0: /* all commands */
2332 /* count number of commands */
2333 for (count = 0, oip = opcode_info_arr;
2334 oip->num_attached != 0xff; ++oip) {
2335 if (F_INV_OP & oip->flags)
2336 continue;
2337 count += (oip->num_attached + 1);
2338 }
2339 bump = rctd ? 20 : 8;
2340 put_unaligned_be32(val: count * bump, p: arr);
2341 for (offset = 4, oip = opcode_info_arr;
2342 oip->num_attached != 0xff && offset < a_len; ++oip) {
2343 if (F_INV_OP & oip->flags)
2344 continue;
2345 na = oip->num_attached;
2346 arr[offset] = oip->opcode;
2347 put_unaligned_be16(val: oip->sa, p: arr + offset + 2);
2348 if (rctd)
2349 arr[offset + 5] |= 0x2;
2350 if (FF_SA & oip->flags)
2351 arr[offset + 5] |= 0x1;
2352 put_unaligned_be16(val: oip->len_mask[0], p: arr + offset + 6);
2353 if (rctd)
2354 put_unaligned_be16(val: 0xa, p: arr + offset + 8);
2355 r_oip = oip;
2356 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2357 if (F_INV_OP & oip->flags)
2358 continue;
2359 offset += bump;
2360 arr[offset] = oip->opcode;
2361 put_unaligned_be16(val: oip->sa, p: arr + offset + 2);
2362 if (rctd)
2363 arr[offset + 5] |= 0x2;
2364 if (FF_SA & oip->flags)
2365 arr[offset + 5] |= 0x1;
2366 put_unaligned_be16(val: oip->len_mask[0],
2367 p: arr + offset + 6);
2368 if (rctd)
2369 put_unaligned_be16(val: 0xa,
2370 p: arr + offset + 8);
2371 }
2372 oip = r_oip;
2373 offset += bump;
2374 }
2375 break;
2376 case 1: /* one command: opcode only */
2377 case 2: /* one command: opcode plus service action */
2378 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2379 sdeb_i = opcode_ind_arr[req_opcode];
2380 oip = &opcode_info_arr[sdeb_i];
2381 if (F_INV_OP & oip->flags) {
2382 supp = 1;
2383 offset = 4;
2384 } else {
2385 if (1 == reporting_opts) {
2386 if (FF_SA & oip->flags) {
2387 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB,
2388 in_byte: 2, in_bit: 2);
2389 kfree(objp: arr);
2390 return check_condition_result;
2391 }
2392 req_sa = 0;
2393 } else if (2 == reporting_opts &&
2394 0 == (FF_SA & oip->flags)) {
2395 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 4, in_bit: -1);
2396 kfree(objp: arr); /* point at requested sa */
2397 return check_condition_result;
2398 }
2399 if (0 == (FF_SA & oip->flags) &&
2400 req_opcode == oip->opcode)
2401 supp = 3;
2402 else if (0 == (FF_SA & oip->flags)) {
2403 na = oip->num_attached;
2404 for (k = 0, oip = oip->arrp; k < na;
2405 ++k, ++oip) {
2406 if (req_opcode == oip->opcode)
2407 break;
2408 }
2409 supp = (k >= na) ? 1 : 3;
2410 } else if (req_sa != oip->sa) {
2411 na = oip->num_attached;
2412 for (k = 0, oip = oip->arrp; k < na;
2413 ++k, ++oip) {
2414 if (req_sa == oip->sa)
2415 break;
2416 }
2417 supp = (k >= na) ? 1 : 3;
2418 } else
2419 supp = 3;
2420 if (3 == supp) {
2421 u = oip->len_mask[0];
2422 put_unaligned_be16(val: u, p: arr + 2);
2423 arr[4] = oip->opcode;
2424 for (k = 1; k < u; ++k)
2425 arr[4 + k] = (k < 16) ?
2426 oip->len_mask[k] : 0xff;
2427 offset = 4 + u;
2428 } else
2429 offset = 4;
2430 }
2431 arr[1] = (rctd ? 0x80 : 0) | supp;
2432 if (rctd) {
2433 put_unaligned_be16(val: 0xa, p: arr + offset);
2434 offset += 12;
2435 }
2436 break;
2437 default:
2438 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: 2);
2439 kfree(objp: arr);
2440 return check_condition_result;
2441 }
2442 offset = (offset < a_len) ? offset : a_len;
2443 len = (offset < alloc_len) ? offset : alloc_len;
2444 errsts = fill_from_dev_buffer(scp, arr, arr_len: len);
2445 kfree(objp: arr);
2446 return errsts;
2447}
2448
2449static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2450 struct sdebug_dev_info *devip)
2451{
2452 bool repd;
2453 u32 alloc_len, len;
2454 u8 arr[16];
2455 u8 *cmd = scp->cmnd;
2456
2457 memset(arr, 0, sizeof(arr));
2458 repd = !!(cmd[2] & 0x80);
2459 alloc_len = get_unaligned_be32(p: cmd + 6);
2460 if (alloc_len < 4) {
2461 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 6, in_bit: -1);
2462 return check_condition_result;
2463 }
2464 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2465 arr[1] = 0x1; /* ITNRS */
2466 if (repd) {
2467 arr[3] = 0xc;
2468 len = 16;
2469 } else
2470 len = 4;
2471
2472 len = (len < alloc_len) ? len : alloc_len;
2473 return fill_from_dev_buffer(scp, arr, arr_len: len);
2474}
2475
2476/* <<Following mode page info copied from ST318451LW>> */
2477
2478static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2479{ /* Read-Write Error Recovery page for mode_sense */
2480 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2481 5, 0, 0xff, 0xff};
2482
2483 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2484 if (1 == pcontrol)
2485 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2486 return sizeof(err_recov_pg);
2487}
2488
2489static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2490{ /* Disconnect-Reconnect page for mode_sense */
2491 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2492 0, 0, 0, 0, 0, 0, 0, 0};
2493
2494 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2495 if (1 == pcontrol)
2496 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2497 return sizeof(disconnect_pg);
2498}
2499
2500static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2501{ /* Format device page for mode_sense */
2502 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2503 0, 0, 0, 0, 0, 0, 0, 0,
2504 0, 0, 0, 0, 0x40, 0, 0, 0};
2505
2506 memcpy(p, format_pg, sizeof(format_pg));
2507 put_unaligned_be16(val: sdebug_sectors_per, p: p + 10);
2508 put_unaligned_be16(val: sdebug_sector_size, p: p + 12);
2509 if (sdebug_removable)
2510 p[20] |= 0x20; /* should agree with INQUIRY */
2511 if (1 == pcontrol)
2512 memset(p + 2, 0, sizeof(format_pg) - 2);
2513 return sizeof(format_pg);
2514}
2515
2516static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2517 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2518 0, 0, 0, 0};
2519
2520static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2521{ /* Caching page for mode_sense */
2522 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2523 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2524 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2525 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2526
2527 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2528 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2529 memcpy(p, caching_pg, sizeof(caching_pg));
2530 if (1 == pcontrol)
2531 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2532 else if (2 == pcontrol)
2533 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2534 return sizeof(caching_pg);
2535}
2536
2537static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2538 0, 0, 0x2, 0x4b};
2539
2540static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2541{ /* Control mode page for mode_sense */
2542 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2543 0, 0, 0, 0};
2544 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2545 0, 0, 0x2, 0x4b};
2546
2547 if (sdebug_dsense)
2548 ctrl_m_pg[2] |= 0x4;
2549 else
2550 ctrl_m_pg[2] &= ~0x4;
2551
2552 if (sdebug_ato)
2553 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2554
2555 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2556 if (1 == pcontrol)
2557 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2558 else if (2 == pcontrol)
2559 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2560 return sizeof(ctrl_m_pg);
2561}
2562
2563
2564static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2565{ /* Informational Exceptions control mode page for mode_sense */
2566 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2567 0, 0, 0x0, 0x0};
2568 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2569 0, 0, 0x0, 0x0};
2570
2571 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2572 if (1 == pcontrol)
2573 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2574 else if (2 == pcontrol)
2575 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2576 return sizeof(iec_m_pg);
2577}
2578
2579static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2580{ /* SAS SSP mode page - short format for mode_sense */
2581 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2582 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2583
2584 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2585 if (1 == pcontrol)
2586 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2587 return sizeof(sas_sf_m_pg);
2588}
2589
2590
2591static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2592 int target_dev_id)
2593{ /* SAS phy control and discover mode page for mode_sense */
2594 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2595 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2596 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2597 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2598 0x2, 0, 0, 0, 0, 0, 0, 0,
2599 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2600 0, 0, 0, 0, 0, 0, 0, 0,
2601 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2602 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2603 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2604 0x3, 0, 0, 0, 0, 0, 0, 0,
2605 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2606 0, 0, 0, 0, 0, 0, 0, 0,
2607 };
2608 int port_a, port_b;
2609
2610 put_unaligned_be64(val: naa3_comp_a, p: sas_pcd_m_pg + 16);
2611 put_unaligned_be64(val: naa3_comp_c + 1, p: sas_pcd_m_pg + 24);
2612 put_unaligned_be64(val: naa3_comp_a, p: sas_pcd_m_pg + 64);
2613 put_unaligned_be64(val: naa3_comp_c + 1, p: sas_pcd_m_pg + 72);
2614 port_a = target_dev_id + 1;
2615 port_b = port_a + 1;
2616 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2617 put_unaligned_be32(val: port_a, p: p + 20);
2618 put_unaligned_be32(val: port_b, p: p + 48 + 20);
2619 if (1 == pcontrol)
2620 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2621 return sizeof(sas_pcd_m_pg);
2622}
2623
2624static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2625{ /* SAS SSP shared protocol specific port mode subpage */
2626 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2627 0, 0, 0, 0, 0, 0, 0, 0,
2628 };
2629
2630 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2631 if (1 == pcontrol)
2632 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2633 return sizeof(sas_sha_m_pg);
2634}
2635
2636#define SDEBUG_MAX_MSENSE_SZ 256
2637
2638static int resp_mode_sense(struct scsi_cmnd *scp,
2639 struct sdebug_dev_info *devip)
2640{
2641 int pcontrol, pcode, subpcode, bd_len;
2642 unsigned char dev_spec;
2643 u32 alloc_len, offset, len;
2644 int target_dev_id;
2645 int target = scp->device->id;
2646 unsigned char *ap;
2647 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2648 unsigned char *cmd = scp->cmnd;
2649 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2650
2651 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2652 pcontrol = (cmd[2] & 0xc0) >> 6;
2653 pcode = cmd[2] & 0x3f;
2654 subpcode = cmd[3];
2655 msense_6 = (MODE_SENSE == cmd[0]);
2656 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2657 is_disk = (sdebug_ptype == TYPE_DISK);
2658 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2659 if ((is_disk || is_zbc) && !dbd)
2660 bd_len = llbaa ? 16 : 8;
2661 else
2662 bd_len = 0;
2663 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(p: cmd + 7);
2664 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2665 if (0x3 == pcontrol) { /* Saving values not supported */
2666 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, asq: 0);
2667 return check_condition_result;
2668 }
2669 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2670 (devip->target * 1000) - 3;
2671 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2672 if (is_disk || is_zbc) {
2673 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2674 if (sdebug_wp)
2675 dev_spec |= 0x80;
2676 } else
2677 dev_spec = 0x0;
2678 if (msense_6) {
2679 arr[2] = dev_spec;
2680 arr[3] = bd_len;
2681 offset = 4;
2682 } else {
2683 arr[3] = dev_spec;
2684 if (16 == bd_len)
2685 arr[4] = 0x1; /* set LONGLBA bit */
2686 arr[7] = bd_len; /* assume 255 or less */
2687 offset = 8;
2688 }
2689 ap = arr + offset;
2690 if ((bd_len > 0) && (!sdebug_capacity))
2691 sdebug_capacity = get_sdebug_capacity();
2692
2693 if (8 == bd_len) {
2694 if (sdebug_capacity > 0xfffffffe)
2695 put_unaligned_be32(val: 0xffffffff, p: ap + 0);
2696 else
2697 put_unaligned_be32(val: sdebug_capacity, p: ap + 0);
2698 put_unaligned_be16(val: sdebug_sector_size, p: ap + 6);
2699 offset += bd_len;
2700 ap = arr + offset;
2701 } else if (16 == bd_len) {
2702 put_unaligned_be64(val: (u64)sdebug_capacity, p: ap + 0);
2703 put_unaligned_be32(val: sdebug_sector_size, p: ap + 12);
2704 offset += bd_len;
2705 ap = arr + offset;
2706 }
2707
2708 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2709 /* TODO: Control Extension page */
2710 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 3, in_bit: -1);
2711 return check_condition_result;
2712 }
2713 bad_pcode = false;
2714
2715 switch (pcode) {
2716 case 0x1: /* Read-Write error recovery page, direct access */
2717 len = resp_err_recov_pg(p: ap, pcontrol, target);
2718 offset += len;
2719 break;
2720 case 0x2: /* Disconnect-Reconnect page, all devices */
2721 len = resp_disconnect_pg(p: ap, pcontrol, target);
2722 offset += len;
2723 break;
2724 case 0x3: /* Format device page, direct access */
2725 if (is_disk) {
2726 len = resp_format_pg(p: ap, pcontrol, target);
2727 offset += len;
2728 } else
2729 bad_pcode = true;
2730 break;
2731 case 0x8: /* Caching page, direct access */
2732 if (is_disk || is_zbc) {
2733 len = resp_caching_pg(p: ap, pcontrol, target);
2734 offset += len;
2735 } else
2736 bad_pcode = true;
2737 break;
2738 case 0xa: /* Control Mode page, all devices */
2739 len = resp_ctrl_m_pg(p: ap, pcontrol, target);
2740 offset += len;
2741 break;
2742 case 0x19: /* if spc==1 then sas phy, control+discover */
2743 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2744 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 3, in_bit: -1);
2745 return check_condition_result;
2746 }
2747 len = 0;
2748 if ((0x0 == subpcode) || (0xff == subpcode))
2749 len += resp_sas_sf_m_pg(p: ap + len, pcontrol, target);
2750 if ((0x1 == subpcode) || (0xff == subpcode))
2751 len += resp_sas_pcd_m_spg(p: ap + len, pcontrol, target,
2752 target_dev_id);
2753 if ((0x2 == subpcode) || (0xff == subpcode))
2754 len += resp_sas_sha_m_spg(p: ap + len, pcontrol);
2755 offset += len;
2756 break;
2757 case 0x1c: /* Informational Exceptions Mode page, all devices */
2758 len = resp_iec_m_pg(p: ap, pcontrol, target);
2759 offset += len;
2760 break;
2761 case 0x3f: /* Read all Mode pages */
2762 if ((0 == subpcode) || (0xff == subpcode)) {
2763 len = resp_err_recov_pg(p: ap, pcontrol, target);
2764 len += resp_disconnect_pg(p: ap + len, pcontrol, target);
2765 if (is_disk) {
2766 len += resp_format_pg(p: ap + len, pcontrol,
2767 target);
2768 len += resp_caching_pg(p: ap + len, pcontrol,
2769 target);
2770 } else if (is_zbc) {
2771 len += resp_caching_pg(p: ap + len, pcontrol,
2772 target);
2773 }
2774 len += resp_ctrl_m_pg(p: ap + len, pcontrol, target);
2775 len += resp_sas_sf_m_pg(p: ap + len, pcontrol, target);
2776 if (0xff == subpcode) {
2777 len += resp_sas_pcd_m_spg(p: ap + len, pcontrol,
2778 target, target_dev_id);
2779 len += resp_sas_sha_m_spg(p: ap + len, pcontrol);
2780 }
2781 len += resp_iec_m_pg(p: ap + len, pcontrol, target);
2782 offset += len;
2783 } else {
2784 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 3, in_bit: -1);
2785 return check_condition_result;
2786 }
2787 break;
2788 default:
2789 bad_pcode = true;
2790 break;
2791 }
2792 if (bad_pcode) {
2793 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: 5);
2794 return check_condition_result;
2795 }
2796 if (msense_6)
2797 arr[0] = offset - 1;
2798 else
2799 put_unaligned_be16(val: (offset - 2), p: arr + 0);
2800 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2801}
2802
2803#define SDEBUG_MAX_MSELECT_SZ 512
2804
2805static int resp_mode_select(struct scsi_cmnd *scp,
2806 struct sdebug_dev_info *devip)
2807{
2808 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2809 int param_len, res, mpage;
2810 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2811 unsigned char *cmd = scp->cmnd;
2812 int mselect6 = (MODE_SELECT == cmd[0]);
2813
2814 memset(arr, 0, sizeof(arr));
2815 pf = cmd[1] & 0x10;
2816 sp = cmd[1] & 0x1;
2817 param_len = mselect6 ? cmd[4] : get_unaligned_be16(p: cmd + 7);
2818 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2819 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: mselect6 ? 4 : 7, in_bit: -1);
2820 return check_condition_result;
2821 }
2822 res = fetch_to_dev_buffer(scp, arr, arr_len: param_len);
2823 if (-1 == res)
2824 return DID_ERROR << 16;
2825 else if (sdebug_verbose && (res < param_len))
2826 sdev_printk(KERN_INFO, scp->device,
2827 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2828 __func__, param_len, res);
2829 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(p: arr + 0) + 2);
2830 bd_len = mselect6 ? arr[3] : get_unaligned_be16(p: arr + 6);
2831 off = bd_len + (mselect6 ? 4 : 8);
2832 if (md_len > 2 || off >= res) {
2833 mk_sense_invalid_fld(scp, c_d: SDEB_IN_DATA, in_byte: 0, in_bit: -1);
2834 return check_condition_result;
2835 }
2836 mpage = arr[off] & 0x3f;
2837 ps = !!(arr[off] & 0x80);
2838 if (ps) {
2839 mk_sense_invalid_fld(scp, c_d: SDEB_IN_DATA, in_byte: off, in_bit: 7);
2840 return check_condition_result;
2841 }
2842 spf = !!(arr[off] & 0x40);
2843 pg_len = spf ? (get_unaligned_be16(p: arr + off + 2) + 4) :
2844 (arr[off + 1] + 2);
2845 if ((pg_len + off) > param_len) {
2846 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2847 PARAMETER_LIST_LENGTH_ERR, asq: 0);
2848 return check_condition_result;
2849 }
2850 switch (mpage) {
2851 case 0x8: /* Caching Mode page */
2852 if (caching_pg[1] == arr[off + 1]) {
2853 memcpy(caching_pg + 2, arr + off + 2,
2854 sizeof(caching_pg) - 2);
2855 goto set_mode_changed_ua;
2856 }
2857 break;
2858 case 0xa: /* Control Mode page */
2859 if (ctrl_m_pg[1] == arr[off + 1]) {
2860 memcpy(ctrl_m_pg + 2, arr + off + 2,
2861 sizeof(ctrl_m_pg) - 2);
2862 if (ctrl_m_pg[4] & 0x8)
2863 sdebug_wp = true;
2864 else
2865 sdebug_wp = false;
2866 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2867 goto set_mode_changed_ua;
2868 }
2869 break;
2870 case 0x1c: /* Informational Exceptions Mode page */
2871 if (iec_m_pg[1] == arr[off + 1]) {
2872 memcpy(iec_m_pg + 2, arr + off + 2,
2873 sizeof(iec_m_pg) - 2);
2874 goto set_mode_changed_ua;
2875 }
2876 break;
2877 default:
2878 break;
2879 }
2880 mk_sense_invalid_fld(scp, c_d: SDEB_IN_DATA, in_byte: off, in_bit: 5);
2881 return check_condition_result;
2882set_mode_changed_ua:
2883 set_bit(SDEBUG_UA_MODE_CHANGED, addr: devip->uas_bm);
2884 return 0;
2885}
2886
2887static int resp_temp_l_pg(unsigned char *arr)
2888{
2889 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2890 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2891 };
2892
2893 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2894 return sizeof(temp_l_pg);
2895}
2896
2897static int resp_ie_l_pg(unsigned char *arr)
2898{
2899 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2900 };
2901
2902 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2903 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2904 arr[4] = THRESHOLD_EXCEEDED;
2905 arr[5] = 0xff;
2906 }
2907 return sizeof(ie_l_pg);
2908}
2909
2910static int resp_env_rep_l_spg(unsigned char *arr)
2911{
2912 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2913 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2914 0x1, 0x0, 0x23, 0x8,
2915 0x0, 55, 72, 35, 55, 45, 0, 0,
2916 };
2917
2918 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2919 return sizeof(env_rep_l_spg);
2920}
2921
2922#define SDEBUG_MAX_LSENSE_SZ 512
2923
2924static int resp_log_sense(struct scsi_cmnd *scp,
2925 struct sdebug_dev_info *devip)
2926{
2927 int ppc, sp, pcode, subpcode;
2928 u32 alloc_len, len, n;
2929 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2930 unsigned char *cmd = scp->cmnd;
2931
2932 memset(arr, 0, sizeof(arr));
2933 ppc = cmd[1] & 0x2;
2934 sp = cmd[1] & 0x1;
2935 if (ppc || sp) {
2936 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 1, in_bit: ppc ? 1 : 0);
2937 return check_condition_result;
2938 }
2939 pcode = cmd[2] & 0x3f;
2940 subpcode = cmd[3] & 0xff;
2941 alloc_len = get_unaligned_be16(p: cmd + 7);
2942 arr[0] = pcode;
2943 if (0 == subpcode) {
2944 switch (pcode) {
2945 case 0x0: /* Supported log pages log page */
2946 n = 4;
2947 arr[n++] = 0x0; /* this page */
2948 arr[n++] = 0xd; /* Temperature */
2949 arr[n++] = 0x2f; /* Informational exceptions */
2950 arr[3] = n - 4;
2951 break;
2952 case 0xd: /* Temperature log page */
2953 arr[3] = resp_temp_l_pg(arr: arr + 4);
2954 break;
2955 case 0x2f: /* Informational exceptions log page */
2956 arr[3] = resp_ie_l_pg(arr: arr + 4);
2957 break;
2958 default:
2959 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: 5);
2960 return check_condition_result;
2961 }
2962 } else if (0xff == subpcode) {
2963 arr[0] |= 0x40;
2964 arr[1] = subpcode;
2965 switch (pcode) {
2966 case 0x0: /* Supported log pages and subpages log page */
2967 n = 4;
2968 arr[n++] = 0x0;
2969 arr[n++] = 0x0; /* 0,0 page */
2970 arr[n++] = 0x0;
2971 arr[n++] = 0xff; /* this page */
2972 arr[n++] = 0xd;
2973 arr[n++] = 0x0; /* Temperature */
2974 arr[n++] = 0xd;
2975 arr[n++] = 0x1; /* Environment reporting */
2976 arr[n++] = 0xd;
2977 arr[n++] = 0xff; /* all 0xd subpages */
2978 arr[n++] = 0x2f;
2979 arr[n++] = 0x0; /* Informational exceptions */
2980 arr[n++] = 0x2f;
2981 arr[n++] = 0xff; /* all 0x2f subpages */
2982 arr[3] = n - 4;
2983 break;
2984 case 0xd: /* Temperature subpages */
2985 n = 4;
2986 arr[n++] = 0xd;
2987 arr[n++] = 0x0; /* Temperature */
2988 arr[n++] = 0xd;
2989 arr[n++] = 0x1; /* Environment reporting */
2990 arr[n++] = 0xd;
2991 arr[n++] = 0xff; /* these subpages */
2992 arr[3] = n - 4;
2993 break;
2994 case 0x2f: /* Informational exceptions subpages */
2995 n = 4;
2996 arr[n++] = 0x2f;
2997 arr[n++] = 0x0; /* Informational exceptions */
2998 arr[n++] = 0x2f;
2999 arr[n++] = 0xff; /* these subpages */
3000 arr[3] = n - 4;
3001 break;
3002 default:
3003 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: 5);
3004 return check_condition_result;
3005 }
3006 } else if (subpcode > 0) {
3007 arr[0] |= 0x40;
3008 arr[1] = subpcode;
3009 if (pcode == 0xd && subpcode == 1)
3010 arr[3] = resp_env_rep_l_spg(arr: arr + 4);
3011 else {
3012 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: 5);
3013 return check_condition_result;
3014 }
3015 } else {
3016 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 3, in_bit: -1);
3017 return check_condition_result;
3018 }
3019 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3020 return fill_from_dev_buffer(scp, arr,
3021 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3022}
3023
3024static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3025{
3026 return devip->nr_zones != 0;
3027}
3028
3029static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3030 unsigned long long lba)
3031{
3032 u32 zno = lba >> devip->zsize_shift;
3033 struct sdeb_zone_state *zsp;
3034
3035 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3036 return &devip->zstate[zno];
3037
3038 /*
3039 * If the zone capacity is less than the zone size, adjust for gap
3040 * zones.
3041 */
3042 zno = 2 * zno - devip->nr_conv_zones;
3043 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3044 zsp = &devip->zstate[zno];
3045 if (lba >= zsp->z_start + zsp->z_size)
3046 zsp++;
3047 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3048 return zsp;
3049}
3050
3051static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3052{
3053 return zsp->z_type == ZBC_ZTYPE_CNV;
3054}
3055
3056static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3057{
3058 return zsp->z_type == ZBC_ZTYPE_GAP;
3059}
3060
3061static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3062{
3063 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3064}
3065
3066static void zbc_close_zone(struct sdebug_dev_info *devip,
3067 struct sdeb_zone_state *zsp)
3068{
3069 enum sdebug_z_cond zc;
3070
3071 if (!zbc_zone_is_seq(zsp))
3072 return;
3073
3074 zc = zsp->z_cond;
3075 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3076 return;
3077
3078 if (zc == ZC2_IMPLICIT_OPEN)
3079 devip->nr_imp_open--;
3080 else
3081 devip->nr_exp_open--;
3082
3083 if (zsp->z_wp == zsp->z_start) {
3084 zsp->z_cond = ZC1_EMPTY;
3085 } else {
3086 zsp->z_cond = ZC4_CLOSED;
3087 devip->nr_closed++;
3088 }
3089}
3090
3091static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3092{
3093 struct sdeb_zone_state *zsp = &devip->zstate[0];
3094 unsigned int i;
3095
3096 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3097 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3098 zbc_close_zone(devip, zsp);
3099 return;
3100 }
3101 }
3102}
3103
3104static void zbc_open_zone(struct sdebug_dev_info *devip,
3105 struct sdeb_zone_state *zsp, bool explicit)
3106{
3107 enum sdebug_z_cond zc;
3108
3109 if (!zbc_zone_is_seq(zsp))
3110 return;
3111
3112 zc = zsp->z_cond;
3113 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3114 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3115 return;
3116
3117 /* Close an implicit open zone if necessary */
3118 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3119 zbc_close_zone(devip, zsp);
3120 else if (devip->max_open &&
3121 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3122 zbc_close_imp_open_zone(devip);
3123
3124 if (zsp->z_cond == ZC4_CLOSED)
3125 devip->nr_closed--;
3126 if (explicit) {
3127 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3128 devip->nr_exp_open++;
3129 } else {
3130 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3131 devip->nr_imp_open++;
3132 }
3133}
3134
3135static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3136 struct sdeb_zone_state *zsp)
3137{
3138 switch (zsp->z_cond) {
3139 case ZC2_IMPLICIT_OPEN:
3140 devip->nr_imp_open--;
3141 break;
3142 case ZC3_EXPLICIT_OPEN:
3143 devip->nr_exp_open--;
3144 break;
3145 default:
3146 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3147 zsp->z_start, zsp->z_cond);
3148 break;
3149 }
3150 zsp->z_cond = ZC5_FULL;
3151}
3152
3153static void zbc_inc_wp(struct sdebug_dev_info *devip,
3154 unsigned long long lba, unsigned int num)
3155{
3156 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3157 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3158
3159 if (!zbc_zone_is_seq(zsp))
3160 return;
3161
3162 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3163 zsp->z_wp += num;
3164 if (zsp->z_wp >= zend)
3165 zbc_set_zone_full(devip, zsp);
3166 return;
3167 }
3168
3169 while (num) {
3170 if (lba != zsp->z_wp)
3171 zsp->z_non_seq_resource = true;
3172
3173 end = lba + num;
3174 if (end >= zend) {
3175 n = zend - lba;
3176 zsp->z_wp = zend;
3177 } else if (end > zsp->z_wp) {
3178 n = num;
3179 zsp->z_wp = end;
3180 } else {
3181 n = num;
3182 }
3183 if (zsp->z_wp >= zend)
3184 zbc_set_zone_full(devip, zsp);
3185
3186 num -= n;
3187 lba += n;
3188 if (num) {
3189 zsp++;
3190 zend = zsp->z_start + zsp->z_size;
3191 }
3192 }
3193}
3194
3195static int check_zbc_access_params(struct scsi_cmnd *scp,
3196 unsigned long long lba, unsigned int num, bool write)
3197{
3198 struct scsi_device *sdp = scp->device;
3199 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3200 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3201 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba: lba + num - 1);
3202
3203 if (!write) {
3204 if (devip->zmodel == BLK_ZONED_HA)
3205 return 0;
3206 /* For host-managed, reads cannot cross zone types boundaries */
3207 if (zsp->z_type != zsp_end->z_type) {
3208 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3209 LBA_OUT_OF_RANGE,
3210 READ_INVDATA_ASCQ);
3211 return check_condition_result;
3212 }
3213 return 0;
3214 }
3215
3216 /* Writing into a gap zone is not allowed */
3217 if (zbc_zone_is_gap(zsp)) {
3218 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3219 ATTEMPT_ACCESS_GAP);
3220 return check_condition_result;
3221 }
3222
3223 /* No restrictions for writes within conventional zones */
3224 if (zbc_zone_is_conv(zsp)) {
3225 if (!zbc_zone_is_conv(zsp: zsp_end)) {
3226 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3227 LBA_OUT_OF_RANGE,
3228 WRITE_BOUNDARY_ASCQ);
3229 return check_condition_result;
3230 }
3231 return 0;
3232 }
3233
3234 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3235 /* Writes cannot cross sequential zone boundaries */
3236 if (zsp_end != zsp) {
3237 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3238 LBA_OUT_OF_RANGE,
3239 WRITE_BOUNDARY_ASCQ);
3240 return check_condition_result;
3241 }
3242 /* Cannot write full zones */
3243 if (zsp->z_cond == ZC5_FULL) {
3244 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3245 INVALID_FIELD_IN_CDB, asq: 0);
3246 return check_condition_result;
3247 }
3248 /* Writes must be aligned to the zone WP */
3249 if (lba != zsp->z_wp) {
3250 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3251 LBA_OUT_OF_RANGE,
3252 UNALIGNED_WRITE_ASCQ);
3253 return check_condition_result;
3254 }
3255 }
3256
3257 /* Handle implicit open of closed and empty zones */
3258 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3259 if (devip->max_open &&
3260 devip->nr_exp_open >= devip->max_open) {
3261 mk_sense_buffer(scp, DATA_PROTECT,
3262 INSUFF_RES_ASC,
3263 INSUFF_ZONE_ASCQ);
3264 return check_condition_result;
3265 }
3266 zbc_open_zone(devip, zsp, explicit: false);
3267 }
3268
3269 return 0;
3270}
3271
3272static inline int check_device_access_params
3273 (struct scsi_cmnd *scp, unsigned long long lba,
3274 unsigned int num, bool write)
3275{
3276 struct scsi_device *sdp = scp->device;
3277 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3278
3279 if (lba + num > sdebug_capacity) {
3280 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
3281 return check_condition_result;
3282 }
3283 /* transfer length excessive (tie in to block limits VPD page) */
3284 if (num > sdebug_store_sectors) {
3285 /* needs work to find which cdb byte 'num' comes from */
3286 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
3287 return check_condition_result;
3288 }
3289 if (write && unlikely(sdebug_wp)) {
3290 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, asq: 0x2);
3291 return check_condition_result;
3292 }
3293 if (sdebug_dev_is_zoned(devip))
3294 return check_zbc_access_params(scp, lba, num, write);
3295
3296 return 0;
3297}
3298
3299/*
3300 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3301 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3302 * that access any of the "stores" in struct sdeb_store_info should call this
3303 * function with bug_if_fake_rw set to true.
3304 */
3305static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3306 bool bug_if_fake_rw)
3307{
3308 if (sdebug_fake_rw) {
3309 BUG_ON(bug_if_fake_rw); /* See note above */
3310 return NULL;
3311 }
3312 return xa_load(per_store_ap, index: devip->sdbg_host->si_idx);
3313}
3314
3315/* Returns number of bytes copied or -1 if error. */
3316static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3317 u32 sg_skip, u64 lba, u32 num, bool do_write)
3318{
3319 int ret;
3320 u64 block, rest = 0;
3321 enum dma_data_direction dir;
3322 struct scsi_data_buffer *sdb = &scp->sdb;
3323 u8 *fsp;
3324
3325 if (do_write) {
3326 dir = DMA_TO_DEVICE;
3327 write_since_sync = true;
3328 } else {
3329 dir = DMA_FROM_DEVICE;
3330 }
3331
3332 if (!sdb->length || !sip)
3333 return 0;
3334 if (scp->sc_data_direction != dir)
3335 return -1;
3336 fsp = sip->storep;
3337
3338 block = do_div(lba, sdebug_store_sectors);
3339 if (block + num > sdebug_store_sectors)
3340 rest = block + num - sdebug_store_sectors;
3341
3342 ret = sg_copy_buffer(sgl: sdb->table.sgl, nents: sdb->table.nents,
3343 buf: fsp + (block * sdebug_sector_size),
3344 buflen: (num - rest) * sdebug_sector_size, skip: sg_skip, to_buffer: do_write);
3345 if (ret != (num - rest) * sdebug_sector_size)
3346 return ret;
3347
3348 if (rest) {
3349 ret += sg_copy_buffer(sgl: sdb->table.sgl, nents: sdb->table.nents,
3350 buf: fsp, buflen: rest * sdebug_sector_size,
3351 skip: sg_skip + ((num - rest) * sdebug_sector_size),
3352 to_buffer: do_write);
3353 }
3354
3355 return ret;
3356}
3357
3358/* Returns number of bytes copied or -1 if error. */
3359static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3360{
3361 struct scsi_data_buffer *sdb = &scp->sdb;
3362
3363 if (!sdb->length)
3364 return 0;
3365 if (scp->sc_data_direction != DMA_TO_DEVICE)
3366 return -1;
3367 return sg_copy_buffer(sgl: sdb->table.sgl, nents: sdb->table.nents, buf: doutp,
3368 buflen: num * sdebug_sector_size, skip: 0, to_buffer: true);
3369}
3370
3371/* If sip->storep+lba compares equal to arr(num), then copy top half of
3372 * arr into sip->storep+lba and return true. If comparison fails then
3373 * return false. */
3374static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3375 const u8 *arr, bool compare_only)
3376{
3377 bool res;
3378 u64 block, rest = 0;
3379 u32 store_blks = sdebug_store_sectors;
3380 u32 lb_size = sdebug_sector_size;
3381 u8 *fsp = sip->storep;
3382
3383 block = do_div(lba, store_blks);
3384 if (block + num > store_blks)
3385 rest = block + num - store_blks;
3386
3387 res = !memcmp(p: fsp + (block * lb_size), q: arr, size: (num - rest) * lb_size);
3388 if (!res)
3389 return res;
3390 if (rest)
3391 res = memcmp(p: fsp, q: arr + ((num - rest) * lb_size),
3392 size: rest * lb_size);
3393 if (!res)
3394 return res;
3395 if (compare_only)
3396 return true;
3397 arr += num * lb_size;
3398 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3399 if (rest)
3400 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3401 return res;
3402}
3403
3404static __be16 dif_compute_csum(const void *buf, int len)
3405{
3406 __be16 csum;
3407
3408 if (sdebug_guard)
3409 csum = (__force __be16)ip_compute_csum(buff: buf, len);
3410 else
3411 csum = cpu_to_be16(crc_t10dif(buf, len));
3412
3413 return csum;
3414}
3415
3416static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3417 sector_t sector, u32 ei_lba)
3418{
3419 __be16 csum = dif_compute_csum(buf: data, len: sdebug_sector_size);
3420
3421 if (sdt->guard_tag != csum) {
3422 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3423 (unsigned long)sector,
3424 be16_to_cpu(sdt->guard_tag),
3425 be16_to_cpu(csum));
3426 return 0x01;
3427 }
3428 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3429 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3430 pr_err("REF check failed on sector %lu\n",
3431 (unsigned long)sector);
3432 return 0x03;
3433 }
3434 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3435 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3436 pr_err("REF check failed on sector %lu\n",
3437 (unsigned long)sector);
3438 return 0x03;
3439 }
3440 return 0;
3441}
3442
3443static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3444 unsigned int sectors, bool read)
3445{
3446 size_t resid;
3447 void *paddr;
3448 struct sdeb_store_info *sip = devip2sip(devip: (struct sdebug_dev_info *)
3449 scp->device->hostdata, bug_if_fake_rw: true);
3450 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3451 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3452 struct sg_mapping_iter miter;
3453
3454 /* Bytes of protection data to copy into sgl */
3455 resid = sectors * sizeof(*dif_storep);
3456
3457 sg_miter_start(miter: &miter, sgl: scsi_prot_sglist(cmd: scp),
3458 nents: scsi_prot_sg_count(cmd: scp), SG_MITER_ATOMIC |
3459 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3460
3461 while (sg_miter_next(miter: &miter) && resid > 0) {
3462 size_t len = min_t(size_t, miter.length, resid);
3463 void *start = dif_store(sip, sector);
3464 size_t rest = 0;
3465
3466 if (dif_store_end < start + len)
3467 rest = start + len - dif_store_end;
3468
3469 paddr = miter.addr;
3470
3471 if (read)
3472 memcpy(paddr, start, len - rest);
3473 else
3474 memcpy(start, paddr, len - rest);
3475
3476 if (rest) {
3477 if (read)
3478 memcpy(paddr + len - rest, dif_storep, rest);
3479 else
3480 memcpy(dif_storep, paddr + len - rest, rest);
3481 }
3482
3483 sector += len / sizeof(*dif_storep);
3484 resid -= len;
3485 }
3486 sg_miter_stop(miter: &miter);
3487}
3488
3489static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3490 unsigned int sectors, u32 ei_lba)
3491{
3492 int ret = 0;
3493 unsigned int i;
3494 sector_t sector;
3495 struct sdeb_store_info *sip = devip2sip(devip: (struct sdebug_dev_info *)
3496 scp->device->hostdata, bug_if_fake_rw: true);
3497 struct t10_pi_tuple *sdt;
3498
3499 for (i = 0; i < sectors; i++, ei_lba++) {
3500 sector = start_sec + i;
3501 sdt = dif_store(sip, sector);
3502
3503 if (sdt->app_tag == cpu_to_be16(0xffff))
3504 continue;
3505
3506 /*
3507 * Because scsi_debug acts as both initiator and
3508 * target we proceed to verify the PI even if
3509 * RDPROTECT=3. This is done so the "initiator" knows
3510 * which type of error to return. Otherwise we would
3511 * have to iterate over the PI twice.
3512 */
3513 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3514 ret = dif_verify(sdt, data: lba2fake_store(sip, lba: sector),
3515 sector, ei_lba);
3516 if (ret) {
3517 dif_errors++;
3518 break;
3519 }
3520 }
3521 }
3522
3523 dif_copy_prot(scp, sector: start_sec, sectors, read: true);
3524 dix_reads++;
3525
3526 return ret;
3527}
3528
3529static inline void
3530sdeb_read_lock(struct sdeb_store_info *sip)
3531{
3532 if (sdebug_no_rwlock) {
3533 if (sip)
3534 __acquire(&sip->macc_lck);
3535 else
3536 __acquire(&sdeb_fake_rw_lck);
3537 } else {
3538 if (sip)
3539 read_lock(&sip->macc_lck);
3540 else
3541 read_lock(&sdeb_fake_rw_lck);
3542 }
3543}
3544
3545static inline void
3546sdeb_read_unlock(struct sdeb_store_info *sip)
3547{
3548 if (sdebug_no_rwlock) {
3549 if (sip)
3550 __release(&sip->macc_lck);
3551 else
3552 __release(&sdeb_fake_rw_lck);
3553 } else {
3554 if (sip)
3555 read_unlock(&sip->macc_lck);
3556 else
3557 read_unlock(&sdeb_fake_rw_lck);
3558 }
3559}
3560
3561static inline void
3562sdeb_write_lock(struct sdeb_store_info *sip)
3563{
3564 if (sdebug_no_rwlock) {
3565 if (sip)
3566 __acquire(&sip->macc_lck);
3567 else
3568 __acquire(&sdeb_fake_rw_lck);
3569 } else {
3570 if (sip)
3571 write_lock(&sip->macc_lck);
3572 else
3573 write_lock(&sdeb_fake_rw_lck);
3574 }
3575}
3576
3577static inline void
3578sdeb_write_unlock(struct sdeb_store_info *sip)
3579{
3580 if (sdebug_no_rwlock) {
3581 if (sip)
3582 __release(&sip->macc_lck);
3583 else
3584 __release(&sdeb_fake_rw_lck);
3585 } else {
3586 if (sip)
3587 write_unlock(&sip->macc_lck);
3588 else
3589 write_unlock(&sdeb_fake_rw_lck);
3590 }
3591}
3592
3593static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3594{
3595 bool check_prot;
3596 u32 num;
3597 u32 ei_lba;
3598 int ret;
3599 u64 lba;
3600 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
3601 u8 *cmd = scp->cmnd;
3602
3603 switch (cmd[0]) {
3604 case READ_16:
3605 ei_lba = 0;
3606 lba = get_unaligned_be64(p: cmd + 2);
3607 num = get_unaligned_be32(p: cmd + 10);
3608 check_prot = true;
3609 break;
3610 case READ_10:
3611 ei_lba = 0;
3612 lba = get_unaligned_be32(p: cmd + 2);
3613 num = get_unaligned_be16(p: cmd + 7);
3614 check_prot = true;
3615 break;
3616 case READ_6:
3617 ei_lba = 0;
3618 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3619 (u32)(cmd[1] & 0x1f) << 16;
3620 num = (0 == cmd[4]) ? 256 : cmd[4];
3621 check_prot = true;
3622 break;
3623 case READ_12:
3624 ei_lba = 0;
3625 lba = get_unaligned_be32(p: cmd + 2);
3626 num = get_unaligned_be32(p: cmd + 6);
3627 check_prot = true;
3628 break;
3629 case XDWRITEREAD_10:
3630 ei_lba = 0;
3631 lba = get_unaligned_be32(p: cmd + 2);
3632 num = get_unaligned_be16(p: cmd + 7);
3633 check_prot = false;
3634 break;
3635 default: /* assume READ(32) */
3636 lba = get_unaligned_be64(p: cmd + 12);
3637 ei_lba = get_unaligned_be32(p: cmd + 20);
3638 num = get_unaligned_be32(p: cmd + 28);
3639 check_prot = false;
3640 break;
3641 }
3642 if (unlikely(have_dif_prot && check_prot)) {
3643 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3644 (cmd[1] & 0xe0)) {
3645 mk_sense_invalid_opcode(scp);
3646 return check_condition_result;
3647 }
3648 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3649 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3650 (cmd[1] & 0xe0) == 0)
3651 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3652 "to DIF device\n");
3653 }
3654 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3655 atomic_read(&sdeb_inject_pending))) {
3656 num /= 2;
3657 atomic_set(v: &sdeb_inject_pending, i: 0);
3658 }
3659
3660 ret = check_device_access_params(scp, lba, num, write: false);
3661 if (ret)
3662 return ret;
3663 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3664 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3665 ((lba + num) > sdebug_medium_error_start))) {
3666 /* claim unrecoverable read error */
3667 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, asq: 0);
3668 /* set info field and valid bit for fixed descriptor */
3669 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3670 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3671 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3672 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3673 put_unaligned_be32(val: ret, p: scp->sense_buffer + 3);
3674 }
3675 scsi_set_resid(cmd: scp, resid: scsi_bufflen(cmd: scp));
3676 return check_condition_result;
3677 }
3678
3679 sdeb_read_lock(sip);
3680
3681 /* DIX + T10 DIF */
3682 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3683 switch (prot_verify_read(scp, start_sec: lba, sectors: num, ei_lba)) {
3684 case 1: /* Guard tag error */
3685 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3686 sdeb_read_unlock(sip);
3687 mk_sense_buffer(scp, ABORTED_COMMAND, asc: 0x10, asq: 1);
3688 return check_condition_result;
3689 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3690 sdeb_read_unlock(sip);
3691 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10, asq: 1);
3692 return illegal_condition_result;
3693 }
3694 break;
3695 case 3: /* Reference tag error */
3696 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3697 sdeb_read_unlock(sip);
3698 mk_sense_buffer(scp, ABORTED_COMMAND, asc: 0x10, asq: 3);
3699 return check_condition_result;
3700 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3701 sdeb_read_unlock(sip);
3702 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10, asq: 3);
3703 return illegal_condition_result;
3704 }
3705 break;
3706 }
3707 }
3708
3709 ret = do_device_access(sip, scp, sg_skip: 0, lba, num, do_write: false);
3710 sdeb_read_unlock(sip);
3711 if (unlikely(ret == -1))
3712 return DID_ERROR << 16;
3713
3714 scsi_set_resid(cmd: scp, resid: scsi_bufflen(cmd: scp) - ret);
3715
3716 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3717 atomic_read(&sdeb_inject_pending))) {
3718 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3719 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, asq: 0);
3720 atomic_set(v: &sdeb_inject_pending, i: 0);
3721 return check_condition_result;
3722 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3723 /* Logical block guard check failed */
3724 mk_sense_buffer(scp, ABORTED_COMMAND, asc: 0x10, asq: 1);
3725 atomic_set(v: &sdeb_inject_pending, i: 0);
3726 return illegal_condition_result;
3727 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3728 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10, asq: 1);
3729 atomic_set(v: &sdeb_inject_pending, i: 0);
3730 return illegal_condition_result;
3731 }
3732 }
3733 return 0;
3734}
3735
3736static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3737 unsigned int sectors, u32 ei_lba)
3738{
3739 int ret;
3740 struct t10_pi_tuple *sdt;
3741 void *daddr;
3742 sector_t sector = start_sec;
3743 int ppage_offset;
3744 int dpage_offset;
3745 struct sg_mapping_iter diter;
3746 struct sg_mapping_iter piter;
3747
3748 BUG_ON(scsi_sg_count(SCpnt) == 0);
3749 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3750
3751 sg_miter_start(miter: &piter, sgl: scsi_prot_sglist(cmd: SCpnt),
3752 nents: scsi_prot_sg_count(cmd: SCpnt),
3753 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3754 sg_miter_start(miter: &diter, sgl: scsi_sglist(cmd: SCpnt), nents: scsi_sg_count(cmd: SCpnt),
3755 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3756
3757 /* For each protection page */
3758 while (sg_miter_next(miter: &piter)) {
3759 dpage_offset = 0;
3760 if (WARN_ON(!sg_miter_next(&diter))) {
3761 ret = 0x01;
3762 goto out;
3763 }
3764
3765 for (ppage_offset = 0; ppage_offset < piter.length;
3766 ppage_offset += sizeof(struct t10_pi_tuple)) {
3767 /* If we're at the end of the current
3768 * data page advance to the next one
3769 */
3770 if (dpage_offset >= diter.length) {
3771 if (WARN_ON(!sg_miter_next(&diter))) {
3772 ret = 0x01;
3773 goto out;
3774 }
3775 dpage_offset = 0;
3776 }
3777
3778 sdt = piter.addr + ppage_offset;
3779 daddr = diter.addr + dpage_offset;
3780
3781 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3782 ret = dif_verify(sdt, data: daddr, sector, ei_lba);
3783 if (ret)
3784 goto out;
3785 }
3786
3787 sector++;
3788 ei_lba++;
3789 dpage_offset += sdebug_sector_size;
3790 }
3791 diter.consumed = dpage_offset;
3792 sg_miter_stop(miter: &diter);
3793 }
3794 sg_miter_stop(miter: &piter);
3795
3796 dif_copy_prot(scp: SCpnt, sector: start_sec, sectors, read: false);
3797 dix_writes++;
3798
3799 return 0;
3800
3801out:
3802 dif_errors++;
3803 sg_miter_stop(miter: &diter);
3804 sg_miter_stop(miter: &piter);
3805 return ret;
3806}
3807
3808static unsigned long lba_to_map_index(sector_t lba)
3809{
3810 if (sdebug_unmap_alignment)
3811 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3812 sector_div(lba, sdebug_unmap_granularity);
3813 return lba;
3814}
3815
3816static sector_t map_index_to_lba(unsigned long index)
3817{
3818 sector_t lba = index * sdebug_unmap_granularity;
3819
3820 if (sdebug_unmap_alignment)
3821 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3822 return lba;
3823}
3824
3825static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3826 unsigned int *num)
3827{
3828 sector_t end;
3829 unsigned int mapped;
3830 unsigned long index;
3831 unsigned long next;
3832
3833 index = lba_to_map_index(lba);
3834 mapped = test_bit(index, sip->map_storep);
3835
3836 if (mapped)
3837 next = find_next_zero_bit(addr: sip->map_storep, size: map_size, offset: index);
3838 else
3839 next = find_next_bit(addr: sip->map_storep, size: map_size, offset: index);
3840
3841 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3842 *num = end - lba;
3843 return mapped;
3844}
3845
3846static void map_region(struct sdeb_store_info *sip, sector_t lba,
3847 unsigned int len)
3848{
3849 sector_t end = lba + len;
3850
3851 while (lba < end) {
3852 unsigned long index = lba_to_map_index(lba);
3853
3854 if (index < map_size)
3855 set_bit(nr: index, addr: sip->map_storep);
3856
3857 lba = map_index_to_lba(index: index + 1);
3858 }
3859}
3860
3861static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3862 unsigned int len)
3863{
3864 sector_t end = lba + len;
3865 u8 *fsp = sip->storep;
3866
3867 while (lba < end) {
3868 unsigned long index = lba_to_map_index(lba);
3869
3870 if (lba == map_index_to_lba(index) &&
3871 lba + sdebug_unmap_granularity <= end &&
3872 index < map_size) {
3873 clear_bit(nr: index, addr: sip->map_storep);
3874 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3875 memset(fsp + lba * sdebug_sector_size,
3876 (sdebug_lbprz & 1) ? 0 : 0xff,
3877 sdebug_sector_size *
3878 sdebug_unmap_granularity);
3879 }
3880 if (sip->dif_storep) {
3881 memset(sip->dif_storep + lba, 0xff,
3882 sizeof(*sip->dif_storep) *
3883 sdebug_unmap_granularity);
3884 }
3885 }
3886 lba = map_index_to_lba(index: index + 1);
3887 }
3888}
3889
3890static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3891{
3892 bool check_prot;
3893 u32 num;
3894 u32 ei_lba;
3895 int ret;
3896 u64 lba;
3897 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
3898 u8 *cmd = scp->cmnd;
3899
3900 switch (cmd[0]) {
3901 case WRITE_16:
3902 ei_lba = 0;
3903 lba = get_unaligned_be64(p: cmd + 2);
3904 num = get_unaligned_be32(p: cmd + 10);
3905 check_prot = true;
3906 break;
3907 case WRITE_10:
3908 ei_lba = 0;
3909 lba = get_unaligned_be32(p: cmd + 2);
3910 num = get_unaligned_be16(p: cmd + 7);
3911 check_prot = true;
3912 break;
3913 case WRITE_6:
3914 ei_lba = 0;
3915 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3916 (u32)(cmd[1] & 0x1f) << 16;
3917 num = (0 == cmd[4]) ? 256 : cmd[4];
3918 check_prot = true;
3919 break;
3920 case WRITE_12:
3921 ei_lba = 0;
3922 lba = get_unaligned_be32(p: cmd + 2);
3923 num = get_unaligned_be32(p: cmd + 6);
3924 check_prot = true;
3925 break;
3926 case 0x53: /* XDWRITEREAD(10) */
3927 ei_lba = 0;
3928 lba = get_unaligned_be32(p: cmd + 2);
3929 num = get_unaligned_be16(p: cmd + 7);
3930 check_prot = false;
3931 break;
3932 default: /* assume WRITE(32) */
3933 lba = get_unaligned_be64(p: cmd + 12);
3934 ei_lba = get_unaligned_be32(p: cmd + 20);
3935 num = get_unaligned_be32(p: cmd + 28);
3936 check_prot = false;
3937 break;
3938 }
3939 if (unlikely(have_dif_prot && check_prot)) {
3940 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3941 (cmd[1] & 0xe0)) {
3942 mk_sense_invalid_opcode(scp);
3943 return check_condition_result;
3944 }
3945 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3946 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3947 (cmd[1] & 0xe0) == 0)
3948 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3949 "to DIF device\n");
3950 }
3951
3952 sdeb_write_lock(sip);
3953 ret = check_device_access_params(scp, lba, num, write: true);
3954 if (ret) {
3955 sdeb_write_unlock(sip);
3956 return ret;
3957 }
3958
3959 /* DIX + T10 DIF */
3960 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3961 switch (prot_verify_write(SCpnt: scp, start_sec: lba, sectors: num, ei_lba)) {
3962 case 1: /* Guard tag error */
3963 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3964 sdeb_write_unlock(sip);
3965 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10, asq: 1);
3966 return illegal_condition_result;
3967 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3968 sdeb_write_unlock(sip);
3969 mk_sense_buffer(scp, ABORTED_COMMAND, asc: 0x10, asq: 1);
3970 return check_condition_result;
3971 }
3972 break;
3973 case 3: /* Reference tag error */
3974 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3975 sdeb_write_unlock(sip);
3976 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10, asq: 3);
3977 return illegal_condition_result;
3978 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3979 sdeb_write_unlock(sip);
3980 mk_sense_buffer(scp, ABORTED_COMMAND, asc: 0x10, asq: 3);
3981 return check_condition_result;
3982 }
3983 break;
3984 }
3985 }
3986
3987 ret = do_device_access(sip, scp, sg_skip: 0, lba, num, do_write: true);
3988 if (unlikely(scsi_debug_lbp()))
3989 map_region(sip, lba, len: num);
3990 /* If ZBC zone then bump its write pointer */
3991 if (sdebug_dev_is_zoned(devip))
3992 zbc_inc_wp(devip, lba, num);
3993 sdeb_write_unlock(sip);
3994 if (unlikely(-1 == ret))
3995 return DID_ERROR << 16;
3996 else if (unlikely(sdebug_verbose &&
3997 (ret < (num * sdebug_sector_size))))
3998 sdev_printk(KERN_INFO, scp->device,
3999 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4000 my_name, num * sdebug_sector_size, ret);
4001
4002 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4003 atomic_read(&sdeb_inject_pending))) {
4004 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4005 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, asq: 0);
4006 atomic_set(v: &sdeb_inject_pending, i: 0);
4007 return check_condition_result;
4008 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4009 /* Logical block guard check failed */
4010 mk_sense_buffer(scp, ABORTED_COMMAND, asc: 0x10, asq: 1);
4011 atomic_set(v: &sdeb_inject_pending, i: 0);
4012 return illegal_condition_result;
4013 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4014 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10, asq: 1);
4015 atomic_set(v: &sdeb_inject_pending, i: 0);
4016 return illegal_condition_result;
4017 }
4018 }
4019 return 0;
4020}
4021
4022/*
4023 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4024 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4025 */
4026static int resp_write_scat(struct scsi_cmnd *scp,
4027 struct sdebug_dev_info *devip)
4028{
4029 u8 *cmd = scp->cmnd;
4030 u8 *lrdp = NULL;
4031 u8 *up;
4032 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
4033 u8 wrprotect;
4034 u16 lbdof, num_lrd, k;
4035 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4036 u32 lb_size = sdebug_sector_size;
4037 u32 ei_lba;
4038 u64 lba;
4039 int ret, res;
4040 bool is_16;
4041 static const u32 lrd_size = 32; /* + parameter list header size */
4042
4043 if (cmd[0] == VARIABLE_LENGTH_CMD) {
4044 is_16 = false;
4045 wrprotect = (cmd[10] >> 5) & 0x7;
4046 lbdof = get_unaligned_be16(p: cmd + 12);
4047 num_lrd = get_unaligned_be16(p: cmd + 16);
4048 bt_len = get_unaligned_be32(p: cmd + 28);
4049 } else { /* that leaves WRITE SCATTERED(16) */
4050 is_16 = true;
4051 wrprotect = (cmd[2] >> 5) & 0x7;
4052 lbdof = get_unaligned_be16(p: cmd + 4);
4053 num_lrd = get_unaligned_be16(p: cmd + 8);
4054 bt_len = get_unaligned_be32(p: cmd + 10);
4055 if (unlikely(have_dif_prot)) {
4056 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4057 wrprotect) {
4058 mk_sense_invalid_opcode(scp);
4059 return illegal_condition_result;
4060 }
4061 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4062 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4063 wrprotect == 0)
4064 sdev_printk(KERN_ERR, scp->device,
4065 "Unprotected WR to DIF device\n");
4066 }
4067 }
4068 if ((num_lrd == 0) || (bt_len == 0))
4069 return 0; /* T10 says these do-nothings are not errors */
4070 if (lbdof == 0) {
4071 if (sdebug_verbose)
4072 sdev_printk(KERN_INFO, scp->device,
4073 "%s: %s: LB Data Offset field bad\n",
4074 my_name, __func__);
4075 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
4076 return illegal_condition_result;
4077 }
4078 lbdof_blen = lbdof * lb_size;
4079 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4080 if (sdebug_verbose)
4081 sdev_printk(KERN_INFO, scp->device,
4082 "%s: %s: LBA range descriptors don't fit\n",
4083 my_name, __func__);
4084 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
4085 return illegal_condition_result;
4086 }
4087 lrdp = kzalloc(size: lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4088 if (lrdp == NULL)
4089 return SCSI_MLQUEUE_HOST_BUSY;
4090 if (sdebug_verbose)
4091 sdev_printk(KERN_INFO, scp->device,
4092 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4093 my_name, __func__, lbdof_blen);
4094 res = fetch_to_dev_buffer(scp, arr: lrdp, arr_len: lbdof_blen);
4095 if (res == -1) {
4096 ret = DID_ERROR << 16;
4097 goto err_out;
4098 }
4099
4100 sdeb_write_lock(sip);
4101 sg_off = lbdof_blen;
4102 /* Spec says Buffer xfer Length field in number of LBs in dout */
4103 cum_lb = 0;
4104 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4105 lba = get_unaligned_be64(p: up + 0);
4106 num = get_unaligned_be32(p: up + 8);
4107 if (sdebug_verbose)
4108 sdev_printk(KERN_INFO, scp->device,
4109 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
4110 my_name, __func__, k, lba, num, sg_off);
4111 if (num == 0)
4112 continue;
4113 ret = check_device_access_params(scp, lba, num, write: true);
4114 if (ret)
4115 goto err_out_unlock;
4116 num_by = num * lb_size;
4117 ei_lba = is_16 ? 0 : get_unaligned_be32(p: up + 12);
4118
4119 if ((cum_lb + num) > bt_len) {
4120 if (sdebug_verbose)
4121 sdev_printk(KERN_INFO, scp->device,
4122 "%s: %s: sum of blocks > data provided\n",
4123 my_name, __func__);
4124 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4125 asq: 0);
4126 ret = illegal_condition_result;
4127 goto err_out_unlock;
4128 }
4129
4130 /* DIX + T10 DIF */
4131 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4132 int prot_ret = prot_verify_write(SCpnt: scp, start_sec: lba, sectors: num,
4133 ei_lba);
4134
4135 if (prot_ret) {
4136 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10,
4137 asq: prot_ret);
4138 ret = illegal_condition_result;
4139 goto err_out_unlock;
4140 }
4141 }
4142
4143 ret = do_device_access(sip, scp, sg_skip: sg_off, lba, num, do_write: true);
4144 /* If ZBC zone then bump its write pointer */
4145 if (sdebug_dev_is_zoned(devip))
4146 zbc_inc_wp(devip, lba, num);
4147 if (unlikely(scsi_debug_lbp()))
4148 map_region(sip, lba, len: num);
4149 if (unlikely(-1 == ret)) {
4150 ret = DID_ERROR << 16;
4151 goto err_out_unlock;
4152 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4153 sdev_printk(KERN_INFO, scp->device,
4154 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4155 my_name, num_by, ret);
4156
4157 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4158 atomic_read(&sdeb_inject_pending))) {
4159 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4160 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, asq: 0);
4161 atomic_set(v: &sdeb_inject_pending, i: 0);
4162 ret = check_condition_result;
4163 goto err_out_unlock;
4164 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4165 /* Logical block guard check failed */
4166 mk_sense_buffer(scp, ABORTED_COMMAND, asc: 0x10, asq: 1);
4167 atomic_set(v: &sdeb_inject_pending, i: 0);
4168 ret = illegal_condition_result;
4169 goto err_out_unlock;
4170 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4171 mk_sense_buffer(scp, ILLEGAL_REQUEST, asc: 0x10, asq: 1);
4172 atomic_set(v: &sdeb_inject_pending, i: 0);
4173 ret = illegal_condition_result;
4174 goto err_out_unlock;
4175 }
4176 }
4177 sg_off += num_by;
4178 cum_lb += num;
4179 }
4180 ret = 0;
4181err_out_unlock:
4182 sdeb_write_unlock(sip);
4183err_out:
4184 kfree(objp: lrdp);
4185 return ret;
4186}
4187
4188static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4189 u32 ei_lba, bool unmap, bool ndob)
4190{
4191 struct scsi_device *sdp = scp->device;
4192 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4193 unsigned long long i;
4194 u64 block, lbaa;
4195 u32 lb_size = sdebug_sector_size;
4196 int ret;
4197 struct sdeb_store_info *sip = devip2sip(devip: (struct sdebug_dev_info *)
4198 scp->device->hostdata, bug_if_fake_rw: true);
4199 u8 *fs1p;
4200 u8 *fsp;
4201
4202 sdeb_write_lock(sip);
4203
4204 ret = check_device_access_params(scp, lba, num, write: true);
4205 if (ret) {
4206 sdeb_write_unlock(sip);
4207 return ret;
4208 }
4209
4210 if (unmap && scsi_debug_lbp()) {
4211 unmap_region(sip, lba, len: num);
4212 goto out;
4213 }
4214 lbaa = lba;
4215 block = do_div(lbaa, sdebug_store_sectors);
4216 /* if ndob then zero 1 logical block, else fetch 1 logical block */
4217 fsp = sip->storep;
4218 fs1p = fsp + (block * lb_size);
4219 if (ndob) {
4220 memset(fs1p, 0, lb_size);
4221 ret = 0;
4222 } else
4223 ret = fetch_to_dev_buffer(scp, arr: fs1p, arr_len: lb_size);
4224
4225 if (-1 == ret) {
4226 sdeb_write_unlock(sip);
4227 return DID_ERROR << 16;
4228 } else if (sdebug_verbose && !ndob && (ret < lb_size))
4229 sdev_printk(KERN_INFO, scp->device,
4230 "%s: %s: lb size=%u, IO sent=%d bytes\n",
4231 my_name, "write same", lb_size, ret);
4232
4233 /* Copy first sector to remaining blocks */
4234 for (i = 1 ; i < num ; i++) {
4235 lbaa = lba + i;
4236 block = do_div(lbaa, sdebug_store_sectors);
4237 memmove(fsp + (block * lb_size), fs1p, lb_size);
4238 }
4239 if (scsi_debug_lbp())
4240 map_region(sip, lba, len: num);
4241 /* If ZBC zone then bump its write pointer */
4242 if (sdebug_dev_is_zoned(devip))
4243 zbc_inc_wp(devip, lba, num);
4244out:
4245 sdeb_write_unlock(sip);
4246
4247 return 0;
4248}
4249
4250static int resp_write_same_10(struct scsi_cmnd *scp,
4251 struct sdebug_dev_info *devip)
4252{
4253 u8 *cmd = scp->cmnd;
4254 u32 lba;
4255 u16 num;
4256 u32 ei_lba = 0;
4257 bool unmap = false;
4258
4259 if (cmd[1] & 0x8) {
4260 if (sdebug_lbpws10 == 0) {
4261 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 1, in_bit: 3);
4262 return check_condition_result;
4263 } else
4264 unmap = true;
4265 }
4266 lba = get_unaligned_be32(p: cmd + 2);
4267 num = get_unaligned_be16(p: cmd + 7);
4268 if (num > sdebug_write_same_length) {
4269 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 7, in_bit: -1);
4270 return check_condition_result;
4271 }
4272 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob: false);
4273}
4274
4275static int resp_write_same_16(struct scsi_cmnd *scp,
4276 struct sdebug_dev_info *devip)
4277{
4278 u8 *cmd = scp->cmnd;
4279 u64 lba;
4280 u32 num;
4281 u32 ei_lba = 0;
4282 bool unmap = false;
4283 bool ndob = false;
4284
4285 if (cmd[1] & 0x8) { /* UNMAP */
4286 if (sdebug_lbpws == 0) {
4287 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 1, in_bit: 3);
4288 return check_condition_result;
4289 } else
4290 unmap = true;
4291 }
4292 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
4293 ndob = true;
4294 lba = get_unaligned_be64(p: cmd + 2);
4295 num = get_unaligned_be32(p: cmd + 10);
4296 if (num > sdebug_write_same_length) {
4297 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 10, in_bit: -1);
4298 return check_condition_result;
4299 }
4300 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4301}
4302
4303/* Note the mode field is in the same position as the (lower) service action
4304 * field. For the Report supported operation codes command, SPC-4 suggests
4305 * each mode of this command should be reported separately; for future. */
4306static int resp_write_buffer(struct scsi_cmnd *scp,
4307 struct sdebug_dev_info *devip)
4308{
4309 u8 *cmd = scp->cmnd;
4310 struct scsi_device *sdp = scp->device;
4311 struct sdebug_dev_info *dp;
4312 u8 mode;
4313
4314 mode = cmd[1] & 0x1f;
4315 switch (mode) {
4316 case 0x4: /* download microcode (MC) and activate (ACT) */
4317 /* set UAs on this device only */
4318 set_bit(SDEBUG_UA_BUS_RESET, addr: devip->uas_bm);
4319 set_bit(SDEBUG_UA_MICROCODE_CHANGED, addr: devip->uas_bm);
4320 break;
4321 case 0x5: /* download MC, save and ACT */
4322 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, addr: devip->uas_bm);
4323 break;
4324 case 0x6: /* download MC with offsets and ACT */
4325 /* set UAs on most devices (LUs) in this target */
4326 list_for_each_entry(dp,
4327 &devip->sdbg_host->dev_info_list,
4328 dev_list)
4329 if (dp->target == sdp->id) {
4330 set_bit(SDEBUG_UA_BUS_RESET, addr: dp->uas_bm);
4331 if (devip != dp)
4332 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4333 addr: dp->uas_bm);
4334 }
4335 break;
4336 case 0x7: /* download MC with offsets, save, and ACT */
4337 /* set UA on all devices (LUs) in this target */
4338 list_for_each_entry(dp,
4339 &devip->sdbg_host->dev_info_list,
4340 dev_list)
4341 if (dp->target == sdp->id)
4342 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4343 addr: dp->uas_bm);
4344 break;
4345 default:
4346 /* do nothing for this command for other mode values */
4347 break;
4348 }
4349 return 0;
4350}
4351
4352static int resp_comp_write(struct scsi_cmnd *scp,
4353 struct sdebug_dev_info *devip)
4354{
4355 u8 *cmd = scp->cmnd;
4356 u8 *arr;
4357 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
4358 u64 lba;
4359 u32 dnum;
4360 u32 lb_size = sdebug_sector_size;
4361 u8 num;
4362 int ret;
4363 int retval = 0;
4364
4365 lba = get_unaligned_be64(p: cmd + 2);
4366 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4367 if (0 == num)
4368 return 0; /* degenerate case, not an error */
4369 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4370 (cmd[1] & 0xe0)) {
4371 mk_sense_invalid_opcode(scp);
4372 return check_condition_result;
4373 }
4374 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4375 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4376 (cmd[1] & 0xe0) == 0)
4377 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4378 "to DIF device\n");
4379 ret = check_device_access_params(scp, lba, num, write: false);
4380 if (ret)
4381 return ret;
4382 dnum = 2 * num;
4383 arr = kcalloc(n: lb_size, size: dnum, GFP_ATOMIC);
4384 if (NULL == arr) {
4385 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4386 INSUFF_RES_ASCQ);
4387 return check_condition_result;
4388 }
4389
4390 sdeb_write_lock(sip);
4391
4392 ret = do_dout_fetch(scp, num: dnum, doutp: arr);
4393 if (ret == -1) {
4394 retval = DID_ERROR << 16;
4395 goto cleanup;
4396 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4397 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4398 "indicated=%u, IO sent=%d bytes\n", my_name,
4399 dnum * lb_size, ret);
4400 if (!comp_write_worker(sip, lba, num, arr, compare_only: false)) {
4401 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, asq: 0);
4402 retval = check_condition_result;
4403 goto cleanup;
4404 }
4405 if (scsi_debug_lbp())
4406 map_region(sip, lba, len: num);
4407cleanup:
4408 sdeb_write_unlock(sip);
4409 kfree(objp: arr);
4410 return retval;
4411}
4412
4413struct unmap_block_desc {
4414 __be64 lba;
4415 __be32 blocks;
4416 __be32 __reserved;
4417};
4418
4419static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4420{
4421 unsigned char *buf;
4422 struct unmap_block_desc *desc;
4423 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
4424 unsigned int i, payload_len, descriptors;
4425 int ret;
4426
4427 if (!scsi_debug_lbp())
4428 return 0; /* fib and say its done */
4429 payload_len = get_unaligned_be16(p: scp->cmnd + 7);
4430 BUG_ON(scsi_bufflen(scp) != payload_len);
4431
4432 descriptors = (payload_len - 8) / 16;
4433 if (descriptors > sdebug_unmap_max_desc) {
4434 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 7, in_bit: -1);
4435 return check_condition_result;
4436 }
4437
4438 buf = kzalloc(size: scsi_bufflen(cmd: scp), GFP_ATOMIC);
4439 if (!buf) {
4440 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4441 INSUFF_RES_ASCQ);
4442 return check_condition_result;
4443 }
4444
4445 scsi_sg_copy_to_buffer(cmd: scp, buf, buflen: scsi_bufflen(cmd: scp));
4446
4447 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4448 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4449
4450 desc = (void *)&buf[8];
4451
4452 sdeb_write_lock(sip);
4453
4454 for (i = 0 ; i < descriptors ; i++) {
4455 unsigned long long lba = get_unaligned_be64(p: &desc[i].lba);
4456 unsigned int num = get_unaligned_be32(p: &desc[i].blocks);
4457
4458 ret = check_device_access_params(scp, lba, num, write: true);
4459 if (ret)
4460 goto out;
4461
4462 unmap_region(sip, lba, len: num);
4463 }
4464
4465 ret = 0;
4466
4467out:
4468 sdeb_write_unlock(sip);
4469 kfree(objp: buf);
4470
4471 return ret;
4472}
4473
4474#define SDEBUG_GET_LBA_STATUS_LEN 32
4475
4476static int resp_get_lba_status(struct scsi_cmnd *scp,
4477 struct sdebug_dev_info *devip)
4478{
4479 u8 *cmd = scp->cmnd;
4480 u64 lba;
4481 u32 alloc_len, mapped, num;
4482 int ret;
4483 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4484
4485 lba = get_unaligned_be64(p: cmd + 2);
4486 alloc_len = get_unaligned_be32(p: cmd + 10);
4487
4488 if (alloc_len < 24)
4489 return 0;
4490
4491 ret = check_device_access_params(scp, lba, num: 1, write: false);
4492 if (ret)
4493 return ret;
4494
4495 if (scsi_debug_lbp()) {
4496 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
4497
4498 mapped = map_state(sip, lba, num: &num);
4499 } else {
4500 mapped = 1;
4501 /* following just in case virtual_gb changed */
4502 sdebug_capacity = get_sdebug_capacity();
4503 if (sdebug_capacity - lba <= 0xffffffff)
4504 num = sdebug_capacity - lba;
4505 else
4506 num = 0xffffffff;
4507 }
4508
4509 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4510 put_unaligned_be32(val: 20, p: arr); /* Parameter Data Length */
4511 put_unaligned_be64(val: lba, p: arr + 8); /* LBA */
4512 put_unaligned_be32(val: num, p: arr + 16); /* Number of blocks */
4513 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4514
4515 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4516}
4517
4518static int resp_sync_cache(struct scsi_cmnd *scp,
4519 struct sdebug_dev_info *devip)
4520{
4521 int res = 0;
4522 u64 lba;
4523 u32 num_blocks;
4524 u8 *cmd = scp->cmnd;
4525
4526 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4527 lba = get_unaligned_be32(p: cmd + 2);
4528 num_blocks = get_unaligned_be16(p: cmd + 7);
4529 } else { /* SYNCHRONIZE_CACHE(16) */
4530 lba = get_unaligned_be64(p: cmd + 2);
4531 num_blocks = get_unaligned_be32(p: cmd + 10);
4532 }
4533 if (lba + num_blocks > sdebug_capacity) {
4534 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
4535 return check_condition_result;
4536 }
4537 if (!write_since_sync || (cmd[1] & 0x2))
4538 res = SDEG_RES_IMMED_MASK;
4539 else /* delay if write_since_sync and IMMED clear */
4540 write_since_sync = false;
4541 return res;
4542}
4543
4544/*
4545 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4546 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4547 * a GOOD status otherwise. Model a disk with a big cache and yield
4548 * CONDITION MET. Actually tries to bring range in main memory into the
4549 * cache associated with the CPU(s).
4550 */
4551static int resp_pre_fetch(struct scsi_cmnd *scp,
4552 struct sdebug_dev_info *devip)
4553{
4554 int res = 0;
4555 u64 lba;
4556 u64 block, rest = 0;
4557 u32 nblks;
4558 u8 *cmd = scp->cmnd;
4559 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
4560 u8 *fsp = sip->storep;
4561
4562 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4563 lba = get_unaligned_be32(p: cmd + 2);
4564 nblks = get_unaligned_be16(p: cmd + 7);
4565 } else { /* PRE-FETCH(16) */
4566 lba = get_unaligned_be64(p: cmd + 2);
4567 nblks = get_unaligned_be32(p: cmd + 10);
4568 }
4569 if (lba + nblks > sdebug_capacity) {
4570 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
4571 return check_condition_result;
4572 }
4573 if (!fsp)
4574 goto fini;
4575 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4576 block = do_div(lba, sdebug_store_sectors);
4577 if (block + nblks > sdebug_store_sectors)
4578 rest = block + nblks - sdebug_store_sectors;
4579
4580 /* Try to bring the PRE-FETCH range into CPU's cache */
4581 sdeb_read_lock(sip);
4582 prefetch_range(addr: fsp + (sdebug_sector_size * block),
4583 len: (nblks - rest) * sdebug_sector_size);
4584 if (rest)
4585 prefetch_range(addr: fsp, len: rest * sdebug_sector_size);
4586 sdeb_read_unlock(sip);
4587fini:
4588 if (cmd[1] & 0x2)
4589 res = SDEG_RES_IMMED_MASK;
4590 return res | condition_met_result;
4591}
4592
4593#define RL_BUCKET_ELEMS 8
4594
4595/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4596 * (W-LUN), the normal Linux scanning logic does not associate it with a
4597 * device (e.g. /dev/sg7). The following magic will make that association:
4598 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4599 * where <n> is a host number. If there are multiple targets in a host then
4600 * the above will associate a W-LUN to each target. To only get a W-LUN
4601 * for target 2, then use "echo '- 2 49409' > scan" .
4602 */
4603static int resp_report_luns(struct scsi_cmnd *scp,
4604 struct sdebug_dev_info *devip)
4605{
4606 unsigned char *cmd = scp->cmnd;
4607 unsigned int alloc_len;
4608 unsigned char select_report;
4609 u64 lun;
4610 struct scsi_lun *lun_p;
4611 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4612 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4613 unsigned int wlun_cnt; /* report luns W-LUN count */
4614 unsigned int tlun_cnt; /* total LUN count */
4615 unsigned int rlen; /* response length (in bytes) */
4616 int k, j, n, res;
4617 unsigned int off_rsp = 0;
4618 const int sz_lun = sizeof(struct scsi_lun);
4619
4620 clear_luns_changed_on_target(devip);
4621
4622 select_report = cmd[2];
4623 alloc_len = get_unaligned_be32(p: cmd + 6);
4624
4625 if (alloc_len < 4) {
4626 pr_err("alloc len too small %d\n", alloc_len);
4627 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 6, in_bit: -1);
4628 return check_condition_result;
4629 }
4630
4631 switch (select_report) {
4632 case 0: /* all LUNs apart from W-LUNs */
4633 lun_cnt = sdebug_max_luns;
4634 wlun_cnt = 0;
4635 break;
4636 case 1: /* only W-LUNs */
4637 lun_cnt = 0;
4638 wlun_cnt = 1;
4639 break;
4640 case 2: /* all LUNs */
4641 lun_cnt = sdebug_max_luns;
4642 wlun_cnt = 1;
4643 break;
4644 case 0x10: /* only administrative LUs */
4645 case 0x11: /* see SPC-5 */
4646 case 0x12: /* only subsiduary LUs owned by referenced LU */
4647 default:
4648 pr_debug("select report invalid %d\n", select_report);
4649 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: -1);
4650 return check_condition_result;
4651 }
4652
4653 if (sdebug_no_lun_0 && (lun_cnt > 0))
4654 --lun_cnt;
4655
4656 tlun_cnt = lun_cnt + wlun_cnt;
4657 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4658 scsi_set_resid(cmd: scp, resid: scsi_bufflen(cmd: scp));
4659 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4660 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4661
4662 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4663 lun = sdebug_no_lun_0 ? 1 : 0;
4664 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4665 memset(arr, 0, sizeof(arr));
4666 lun_p = (struct scsi_lun *)&arr[0];
4667 if (k == 0) {
4668 put_unaligned_be32(val: rlen, p: &arr[0]);
4669 ++lun_p;
4670 j = 1;
4671 }
4672 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4673 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4674 break;
4675 int_to_scsilun(lun++, lun_p);
4676 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4677 lun_p->scsi_lun[0] |= 0x40;
4678 }
4679 if (j < RL_BUCKET_ELEMS)
4680 break;
4681 n = j * sz_lun;
4682 res = p_fill_from_dev_buffer(scp, arr, arr_len: n, off_dst: off_rsp);
4683 if (res)
4684 return res;
4685 off_rsp += n;
4686 }
4687 if (wlun_cnt) {
4688 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4689 ++j;
4690 }
4691 if (j > 0)
4692 res = p_fill_from_dev_buffer(scp, arr, arr_len: j * sz_lun, off_dst: off_rsp);
4693 return res;
4694}
4695
4696static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4697{
4698 bool is_bytchk3 = false;
4699 u8 bytchk;
4700 int ret, j;
4701 u32 vnum, a_num, off;
4702 const u32 lb_size = sdebug_sector_size;
4703 u64 lba;
4704 u8 *arr;
4705 u8 *cmd = scp->cmnd;
4706 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: true);
4707
4708 bytchk = (cmd[1] >> 1) & 0x3;
4709 if (bytchk == 0) {
4710 return 0; /* always claim internal verify okay */
4711 } else if (bytchk == 2) {
4712 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 2, in_bit: 2);
4713 return check_condition_result;
4714 } else if (bytchk == 3) {
4715 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4716 }
4717 switch (cmd[0]) {
4718 case VERIFY_16:
4719 lba = get_unaligned_be64(p: cmd + 2);
4720 vnum = get_unaligned_be32(p: cmd + 10);
4721 break;
4722 case VERIFY: /* is VERIFY(10) */
4723 lba = get_unaligned_be32(p: cmd + 2);
4724 vnum = get_unaligned_be16(p: cmd + 7);
4725 break;
4726 default:
4727 mk_sense_invalid_opcode(scp);
4728 return check_condition_result;
4729 }
4730 if (vnum == 0)
4731 return 0; /* not an error */
4732 a_num = is_bytchk3 ? 1 : vnum;
4733 /* Treat following check like one for read (i.e. no write) access */
4734 ret = check_device_access_params(scp, lba, num: a_num, write: false);
4735 if (ret)
4736 return ret;
4737
4738 arr = kcalloc(n: lb_size, size: vnum, GFP_ATOMIC | __GFP_NOWARN);
4739 if (!arr) {
4740 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4741 INSUFF_RES_ASCQ);
4742 return check_condition_result;
4743 }
4744 /* Not changing store, so only need read access */
4745 sdeb_read_lock(sip);
4746
4747 ret = do_dout_fetch(scp, num: a_num, doutp: arr);
4748 if (ret == -1) {
4749 ret = DID_ERROR << 16;
4750 goto cleanup;
4751 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4752 sdev_printk(KERN_INFO, scp->device,
4753 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4754 my_name, __func__, a_num * lb_size, ret);
4755 }
4756 if (is_bytchk3) {
4757 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4758 memcpy(arr + off, arr, lb_size);
4759 }
4760 ret = 0;
4761 if (!comp_write_worker(sip, lba, num: vnum, arr, compare_only: true)) {
4762 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, asq: 0);
4763 ret = check_condition_result;
4764 goto cleanup;
4765 }
4766cleanup:
4767 sdeb_read_unlock(sip);
4768 kfree(objp: arr);
4769 return ret;
4770}
4771
4772#define RZONES_DESC_HD 64
4773
4774/* Report zones depending on start LBA and reporting options */
4775static int resp_report_zones(struct scsi_cmnd *scp,
4776 struct sdebug_dev_info *devip)
4777{
4778 unsigned int rep_max_zones, nrz = 0;
4779 int ret = 0;
4780 u32 alloc_len, rep_opts, rep_len;
4781 bool partial;
4782 u64 lba, zs_lba;
4783 u8 *arr = NULL, *desc;
4784 u8 *cmd = scp->cmnd;
4785 struct sdeb_zone_state *zsp = NULL;
4786 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: false);
4787
4788 if (!sdebug_dev_is_zoned(devip)) {
4789 mk_sense_invalid_opcode(scp);
4790 return check_condition_result;
4791 }
4792 zs_lba = get_unaligned_be64(p: cmd + 2);
4793 alloc_len = get_unaligned_be32(p: cmd + 10);
4794 if (alloc_len == 0)
4795 return 0; /* not an error */
4796 rep_opts = cmd[14] & 0x3f;
4797 partial = cmd[14] & 0x80;
4798
4799 if (zs_lba >= sdebug_capacity) {
4800 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
4801 return check_condition_result;
4802 }
4803
4804 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4805
4806 arr = kzalloc(size: alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4807 if (!arr) {
4808 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4809 INSUFF_RES_ASCQ);
4810 return check_condition_result;
4811 }
4812
4813 sdeb_read_lock(sip);
4814
4815 desc = arr + 64;
4816 for (lba = zs_lba; lba < sdebug_capacity;
4817 lba = zsp->z_start + zsp->z_size) {
4818 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4819 break;
4820 zsp = zbc_zone(devip, lba);
4821 switch (rep_opts) {
4822 case 0x00:
4823 /* All zones */
4824 break;
4825 case 0x01:
4826 /* Empty zones */
4827 if (zsp->z_cond != ZC1_EMPTY)
4828 continue;
4829 break;
4830 case 0x02:
4831 /* Implicit open zones */
4832 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4833 continue;
4834 break;
4835 case 0x03:
4836 /* Explicit open zones */
4837 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4838 continue;
4839 break;
4840 case 0x04:
4841 /* Closed zones */
4842 if (zsp->z_cond != ZC4_CLOSED)
4843 continue;
4844 break;
4845 case 0x05:
4846 /* Full zones */
4847 if (zsp->z_cond != ZC5_FULL)
4848 continue;
4849 break;
4850 case 0x06:
4851 case 0x07:
4852 case 0x10:
4853 /*
4854 * Read-only, offline, reset WP recommended are
4855 * not emulated: no zones to report;
4856 */
4857 continue;
4858 case 0x11:
4859 /* non-seq-resource set */
4860 if (!zsp->z_non_seq_resource)
4861 continue;
4862 break;
4863 case 0x3e:
4864 /* All zones except gap zones. */
4865 if (zbc_zone_is_gap(zsp))
4866 continue;
4867 break;
4868 case 0x3f:
4869 /* Not write pointer (conventional) zones */
4870 if (zbc_zone_is_seq(zsp))
4871 continue;
4872 break;
4873 default:
4874 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4875 INVALID_FIELD_IN_CDB, asq: 0);
4876 ret = check_condition_result;
4877 goto fini;
4878 }
4879
4880 if (nrz < rep_max_zones) {
4881 /* Fill zone descriptor */
4882 desc[0] = zsp->z_type;
4883 desc[1] = zsp->z_cond << 4;
4884 if (zsp->z_non_seq_resource)
4885 desc[1] |= 1 << 1;
4886 put_unaligned_be64(val: (u64)zsp->z_size, p: desc + 8);
4887 put_unaligned_be64(val: (u64)zsp->z_start, p: desc + 16);
4888 put_unaligned_be64(val: (u64)zsp->z_wp, p: desc + 24);
4889 desc += 64;
4890 }
4891
4892 if (partial && nrz >= rep_max_zones)
4893 break;
4894
4895 nrz++;
4896 }
4897
4898 /* Report header */
4899 /* Zone list length. */
4900 put_unaligned_be32(val: nrz * RZONES_DESC_HD, p: arr + 0);
4901 /* Maximum LBA */
4902 put_unaligned_be64(val: sdebug_capacity - 1, p: arr + 8);
4903 /* Zone starting LBA granularity. */
4904 if (devip->zcap < devip->zsize)
4905 put_unaligned_be64(val: devip->zsize, p: arr + 16);
4906
4907 rep_len = (unsigned long)desc - (unsigned long)arr;
4908 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4909
4910fini:
4911 sdeb_read_unlock(sip);
4912 kfree(objp: arr);
4913 return ret;
4914}
4915
4916/* Logic transplanted from tcmu-runner, file_zbc.c */
4917static void zbc_open_all(struct sdebug_dev_info *devip)
4918{
4919 struct sdeb_zone_state *zsp = &devip->zstate[0];
4920 unsigned int i;
4921
4922 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4923 if (zsp->z_cond == ZC4_CLOSED)
4924 zbc_open_zone(devip, zsp: &devip->zstate[i], explicit: true);
4925 }
4926}
4927
4928static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4929{
4930 int res = 0;
4931 u64 z_id;
4932 enum sdebug_z_cond zc;
4933 u8 *cmd = scp->cmnd;
4934 struct sdeb_zone_state *zsp;
4935 bool all = cmd[14] & 0x01;
4936 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: false);
4937
4938 if (!sdebug_dev_is_zoned(devip)) {
4939 mk_sense_invalid_opcode(scp);
4940 return check_condition_result;
4941 }
4942
4943 sdeb_write_lock(sip);
4944
4945 if (all) {
4946 /* Check if all closed zones can be open */
4947 if (devip->max_open &&
4948 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4949 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4950 INSUFF_ZONE_ASCQ);
4951 res = check_condition_result;
4952 goto fini;
4953 }
4954 /* Open all closed zones */
4955 zbc_open_all(devip);
4956 goto fini;
4957 }
4958
4959 /* Open the specified zone */
4960 z_id = get_unaligned_be64(p: cmd + 2);
4961 if (z_id >= sdebug_capacity) {
4962 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
4963 res = check_condition_result;
4964 goto fini;
4965 }
4966
4967 zsp = zbc_zone(devip, lba: z_id);
4968 if (z_id != zsp->z_start) {
4969 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
4970 res = check_condition_result;
4971 goto fini;
4972 }
4973 if (zbc_zone_is_conv(zsp)) {
4974 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
4975 res = check_condition_result;
4976 goto fini;
4977 }
4978
4979 zc = zsp->z_cond;
4980 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4981 goto fini;
4982
4983 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4984 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4985 INSUFF_ZONE_ASCQ);
4986 res = check_condition_result;
4987 goto fini;
4988 }
4989
4990 zbc_open_zone(devip, zsp, explicit: true);
4991fini:
4992 sdeb_write_unlock(sip);
4993 return res;
4994}
4995
4996static void zbc_close_all(struct sdebug_dev_info *devip)
4997{
4998 unsigned int i;
4999
5000 for (i = 0; i < devip->nr_zones; i++)
5001 zbc_close_zone(devip, zsp: &devip->zstate[i]);
5002}
5003
5004static int resp_close_zone(struct scsi_cmnd *scp,
5005 struct sdebug_dev_info *devip)
5006{
5007 int res = 0;
5008 u64 z_id;
5009 u8 *cmd = scp->cmnd;
5010 struct sdeb_zone_state *zsp;
5011 bool all = cmd[14] & 0x01;
5012 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: false);
5013
5014 if (!sdebug_dev_is_zoned(devip)) {
5015 mk_sense_invalid_opcode(scp);
5016 return check_condition_result;
5017 }
5018
5019 sdeb_write_lock(sip);
5020
5021 if (all) {
5022 zbc_close_all(devip);
5023 goto fini;
5024 }
5025
5026 /* Close specified zone */
5027 z_id = get_unaligned_be64(p: cmd + 2);
5028 if (z_id >= sdebug_capacity) {
5029 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
5030 res = check_condition_result;
5031 goto fini;
5032 }
5033
5034 zsp = zbc_zone(devip, lba: z_id);
5035 if (z_id != zsp->z_start) {
5036 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
5037 res = check_condition_result;
5038 goto fini;
5039 }
5040 if (zbc_zone_is_conv(zsp)) {
5041 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
5042 res = check_condition_result;
5043 goto fini;
5044 }
5045
5046 zbc_close_zone(devip, zsp);
5047fini:
5048 sdeb_write_unlock(sip);
5049 return res;
5050}
5051
5052static void zbc_finish_zone(struct sdebug_dev_info *devip,
5053 struct sdeb_zone_state *zsp, bool empty)
5054{
5055 enum sdebug_z_cond zc = zsp->z_cond;
5056
5057 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5058 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5059 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5060 zbc_close_zone(devip, zsp);
5061 if (zsp->z_cond == ZC4_CLOSED)
5062 devip->nr_closed--;
5063 zsp->z_wp = zsp->z_start + zsp->z_size;
5064 zsp->z_cond = ZC5_FULL;
5065 }
5066}
5067
5068static void zbc_finish_all(struct sdebug_dev_info *devip)
5069{
5070 unsigned int i;
5071
5072 for (i = 0; i < devip->nr_zones; i++)
5073 zbc_finish_zone(devip, zsp: &devip->zstate[i], empty: false);
5074}
5075
5076static int resp_finish_zone(struct scsi_cmnd *scp,
5077 struct sdebug_dev_info *devip)
5078{
5079 struct sdeb_zone_state *zsp;
5080 int res = 0;
5081 u64 z_id;
5082 u8 *cmd = scp->cmnd;
5083 bool all = cmd[14] & 0x01;
5084 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: false);
5085
5086 if (!sdebug_dev_is_zoned(devip)) {
5087 mk_sense_invalid_opcode(scp);
5088 return check_condition_result;
5089 }
5090
5091 sdeb_write_lock(sip);
5092
5093 if (all) {
5094 zbc_finish_all(devip);
5095 goto fini;
5096 }
5097
5098 /* Finish the specified zone */
5099 z_id = get_unaligned_be64(p: cmd + 2);
5100 if (z_id >= sdebug_capacity) {
5101 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
5102 res = check_condition_result;
5103 goto fini;
5104 }
5105
5106 zsp = zbc_zone(devip, lba: z_id);
5107 if (z_id != zsp->z_start) {
5108 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
5109 res = check_condition_result;
5110 goto fini;
5111 }
5112 if (zbc_zone_is_conv(zsp)) {
5113 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
5114 res = check_condition_result;
5115 goto fini;
5116 }
5117
5118 zbc_finish_zone(devip, zsp, empty: true);
5119fini:
5120 sdeb_write_unlock(sip);
5121 return res;
5122}
5123
5124static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5125 struct sdeb_zone_state *zsp)
5126{
5127 enum sdebug_z_cond zc;
5128 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: false);
5129
5130 if (!zbc_zone_is_seq(zsp))
5131 return;
5132
5133 zc = zsp->z_cond;
5134 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5135 zbc_close_zone(devip, zsp);
5136
5137 if (zsp->z_cond == ZC4_CLOSED)
5138 devip->nr_closed--;
5139
5140 if (zsp->z_wp > zsp->z_start)
5141 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5142 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5143
5144 zsp->z_non_seq_resource = false;
5145 zsp->z_wp = zsp->z_start;
5146 zsp->z_cond = ZC1_EMPTY;
5147}
5148
5149static void zbc_rwp_all(struct sdebug_dev_info *devip)
5150{
5151 unsigned int i;
5152
5153 for (i = 0; i < devip->nr_zones; i++)
5154 zbc_rwp_zone(devip, zsp: &devip->zstate[i]);
5155}
5156
5157static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5158{
5159 struct sdeb_zone_state *zsp;
5160 int res = 0;
5161 u64 z_id;
5162 u8 *cmd = scp->cmnd;
5163 bool all = cmd[14] & 0x01;
5164 struct sdeb_store_info *sip = devip2sip(devip, bug_if_fake_rw: false);
5165
5166 if (!sdebug_dev_is_zoned(devip)) {
5167 mk_sense_invalid_opcode(scp);
5168 return check_condition_result;
5169 }
5170
5171 sdeb_write_lock(sip);
5172
5173 if (all) {
5174 zbc_rwp_all(devip);
5175 goto fini;
5176 }
5177
5178 z_id = get_unaligned_be64(p: cmd + 2);
5179 if (z_id >= sdebug_capacity) {
5180 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, asq: 0);
5181 res = check_condition_result;
5182 goto fini;
5183 }
5184
5185 zsp = zbc_zone(devip, lba: z_id);
5186 if (z_id != zsp->z_start) {
5187 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
5188 res = check_condition_result;
5189 goto fini;
5190 }
5191 if (zbc_zone_is_conv(zsp)) {
5192 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, asq: 0);
5193 res = check_condition_result;
5194 goto fini;
5195 }
5196
5197 zbc_rwp_zone(devip, zsp);
5198fini:
5199 sdeb_write_unlock(sip);
5200 return res;
5201}
5202
5203static u32 get_tag(struct scsi_cmnd *cmnd)
5204{
5205 return blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd: cmnd));
5206}
5207
5208/* Queued (deferred) command completions converge here. */
5209static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5210{
5211 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5212 unsigned long flags;
5213 struct scsi_cmnd *scp = sqcp->scmd;
5214 struct sdebug_scsi_cmd *sdsc;
5215 bool aborted;
5216
5217 if (sdebug_statistics) {
5218 atomic_inc(v: &sdebug_completions);
5219 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5220 atomic_inc(v: &sdebug_miss_cpus);
5221 }
5222
5223 if (!scp) {
5224 pr_err("scmd=NULL\n");
5225 goto out;
5226 }
5227
5228 sdsc = scsi_cmd_priv(cmd: scp);
5229 spin_lock_irqsave(&sdsc->lock, flags);
5230 aborted = sd_dp->aborted;
5231 if (unlikely(aborted))
5232 sd_dp->aborted = false;
5233 ASSIGN_QUEUED_CMD(scp, NULL);
5234
5235 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
5236
5237 if (aborted) {
5238 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5239 blk_abort_request(scsi_cmd_to_rq(scmd: scp));
5240 goto out;
5241 }
5242
5243 scsi_done(cmd: scp); /* callback to mid level */
5244out:
5245 sdebug_free_queued_cmd(sqcp);
5246}
5247
5248/* When high resolution timer goes off this function is called. */
5249static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5250{
5251 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5252 hrt);
5253 sdebug_q_cmd_complete(sd_dp);
5254 return HRTIMER_NORESTART;
5255}
5256
5257/* When work queue schedules work, it calls this function. */
5258static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5259{
5260 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5261 ew.work);
5262 sdebug_q_cmd_complete(sd_dp);
5263}
5264
5265static bool got_shared_uuid;
5266static uuid_t shared_uuid;
5267
5268static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5269{
5270 struct sdeb_zone_state *zsp;
5271 sector_t capacity = get_sdebug_capacity();
5272 sector_t conv_capacity;
5273 sector_t zstart = 0;
5274 unsigned int i;
5275
5276 /*
5277 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5278 * a zone size allowing for at least 4 zones on the device. Otherwise,
5279 * use the specified zone size checking that at least 2 zones can be
5280 * created for the device.
5281 */
5282 if (!sdeb_zbc_zone_size_mb) {
5283 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5284 >> ilog2(sdebug_sector_size);
5285 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5286 devip->zsize >>= 1;
5287 if (devip->zsize < 2) {
5288 pr_err("Device capacity too small\n");
5289 return -EINVAL;
5290 }
5291 } else {
5292 if (!is_power_of_2(n: sdeb_zbc_zone_size_mb)) {
5293 pr_err("Zone size is not a power of 2\n");
5294 return -EINVAL;
5295 }
5296 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5297 >> ilog2(sdebug_sector_size);
5298 if (devip->zsize >= capacity) {
5299 pr_err("Zone size too large for device capacity\n");
5300 return -EINVAL;
5301 }
5302 }
5303
5304 devip->zsize_shift = ilog2(devip->zsize);
5305 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5306
5307 if (sdeb_zbc_zone_cap_mb == 0) {
5308 devip->zcap = devip->zsize;
5309 } else {
5310 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5311 ilog2(sdebug_sector_size);
5312 if (devip->zcap > devip->zsize) {
5313 pr_err("Zone capacity too large\n");
5314 return -EINVAL;
5315 }
5316 }
5317
5318 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5319 if (conv_capacity >= capacity) {
5320 pr_err("Number of conventional zones too large\n");
5321 return -EINVAL;
5322 }
5323 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5324 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5325 devip->zsize_shift;
5326 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5327
5328 /* Add gap zones if zone capacity is smaller than the zone size */
5329 if (devip->zcap < devip->zsize)
5330 devip->nr_zones += devip->nr_seq_zones;
5331
5332 if (devip->zmodel == BLK_ZONED_HM) {
5333 /* zbc_max_open_zones can be 0, meaning "not reported" */
5334 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5335 devip->max_open = (devip->nr_zones - 1) / 2;
5336 else
5337 devip->max_open = sdeb_zbc_max_open;
5338 }
5339
5340 devip->zstate = kcalloc(n: devip->nr_zones,
5341 size: sizeof(struct sdeb_zone_state), GFP_KERNEL);
5342 if (!devip->zstate)
5343 return -ENOMEM;
5344
5345 for (i = 0; i < devip->nr_zones; i++) {
5346 zsp = &devip->zstate[i];
5347
5348 zsp->z_start = zstart;
5349
5350 if (i < devip->nr_conv_zones) {
5351 zsp->z_type = ZBC_ZTYPE_CNV;
5352 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5353 zsp->z_wp = (sector_t)-1;
5354 zsp->z_size =
5355 min_t(u64, devip->zsize, capacity - zstart);
5356 } else if ((zstart & (devip->zsize - 1)) == 0) {
5357 if (devip->zmodel == BLK_ZONED_HM)
5358 zsp->z_type = ZBC_ZTYPE_SWR;
5359 else
5360 zsp->z_type = ZBC_ZTYPE_SWP;
5361 zsp->z_cond = ZC1_EMPTY;
5362 zsp->z_wp = zsp->z_start;
5363 zsp->z_size =
5364 min_t(u64, devip->zcap, capacity - zstart);
5365 } else {
5366 zsp->z_type = ZBC_ZTYPE_GAP;
5367 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5368 zsp->z_wp = (sector_t)-1;
5369 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5370 capacity - zstart);
5371 }
5372
5373 WARN_ON_ONCE((int)zsp->z_size <= 0);
5374 zstart += zsp->z_size;
5375 }
5376
5377 return 0;
5378}
5379
5380static struct sdebug_dev_info *sdebug_device_create(
5381 struct sdebug_host_info *sdbg_host, gfp_t flags)
5382{
5383 struct sdebug_dev_info *devip;
5384
5385 devip = kzalloc(size: sizeof(*devip), flags);
5386 if (devip) {
5387 if (sdebug_uuid_ctl == 1)
5388 uuid_gen(u: &devip->lu_name);
5389 else if (sdebug_uuid_ctl == 2) {
5390 if (got_shared_uuid)
5391 devip->lu_name = shared_uuid;
5392 else {
5393 uuid_gen(u: &shared_uuid);
5394 got_shared_uuid = true;
5395 devip->lu_name = shared_uuid;
5396 }
5397 }
5398 devip->sdbg_host = sdbg_host;
5399 if (sdeb_zbc_in_use) {
5400 devip->zmodel = sdeb_zbc_model;
5401 if (sdebug_device_create_zones(devip)) {
5402 kfree(objp: devip);
5403 return NULL;
5404 }
5405 } else {
5406 devip->zmodel = BLK_ZONED_NONE;
5407 }
5408 devip->create_ts = ktime_get_boottime();
5409 atomic_set(v: &devip->stopped, i: (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5410 spin_lock_init(&devip->list_lock);
5411 INIT_LIST_HEAD(list: &devip->inject_err_list);
5412 list_add_tail(new: &devip->dev_list, head: &sdbg_host->dev_info_list);
5413 }
5414 return devip;
5415}
5416
5417static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5418{
5419 struct sdebug_host_info *sdbg_host;
5420 struct sdebug_dev_info *open_devip = NULL;
5421 struct sdebug_dev_info *devip;
5422
5423 sdbg_host = shost_to_sdebug_host(sdev->host);
5424
5425 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5426 if ((devip->used) && (devip->channel == sdev->channel) &&
5427 (devip->target == sdev->id) &&
5428 (devip->lun == sdev->lun))
5429 return devip;
5430 else {
5431 if ((!devip->used) && (!open_devip))
5432 open_devip = devip;
5433 }
5434 }
5435 if (!open_devip) { /* try and make a new one */
5436 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5437 if (!open_devip) {
5438 pr_err("out of memory at line %d\n", __LINE__);
5439 return NULL;
5440 }
5441 }
5442
5443 open_devip->channel = sdev->channel;
5444 open_devip->target = sdev->id;
5445 open_devip->lun = sdev->lun;
5446 open_devip->sdbg_host = sdbg_host;
5447 set_bit(SDEBUG_UA_POOCCUR, addr: open_devip->uas_bm);
5448 open_devip->used = true;
5449 return open_devip;
5450}
5451
5452static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5453{
5454 if (sdebug_verbose)
5455 pr_info("slave_alloc <%u %u %u %llu>\n",
5456 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5457
5458 return 0;
5459}
5460
5461static int scsi_debug_slave_configure(struct scsi_device *sdp)
5462{
5463 struct sdebug_dev_info *devip =
5464 (struct sdebug_dev_info *)sdp->hostdata;
5465 struct dentry *dentry;
5466
5467 if (sdebug_verbose)
5468 pr_info("slave_configure <%u %u %u %llu>\n",
5469 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5470 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5471 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5472 if (devip == NULL) {
5473 devip = find_build_dev_info(sdev: sdp);
5474 if (devip == NULL)
5475 return 1; /* no resources, will be marked offline */
5476 }
5477 sdp->hostdata = devip;
5478 if (sdebug_no_uld)
5479 sdp->no_uld_attach = 1;
5480 config_cdb_len(sdev: sdp);
5481
5482 if (sdebug_allow_restart)
5483 sdp->allow_restart = 1;
5484
5485 devip->debugfs_entry = debugfs_create_dir(name: dev_name(dev: &sdp->sdev_dev),
5486 parent: sdebug_debugfs_root);
5487 if (IS_ERR_OR_NULL(ptr: devip->debugfs_entry))
5488 pr_info("%s: failed to create debugfs directory for device %s\n",
5489 __func__, dev_name(&sdp->sdev_gendev));
5490
5491 dentry = debugfs_create_file(name: "error", mode: 0600, parent: devip->debugfs_entry, data: sdp,
5492 fops: &sdebug_error_fops);
5493 if (IS_ERR_OR_NULL(ptr: dentry))
5494 pr_info("%s: failed to create error file for device %s\n",
5495 __func__, dev_name(&sdp->sdev_gendev));
5496
5497 return 0;
5498}
5499
5500static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5501{
5502 struct sdebug_dev_info *devip =
5503 (struct sdebug_dev_info *)sdp->hostdata;
5504 struct sdebug_err_inject *err;
5505
5506 if (sdebug_verbose)
5507 pr_info("slave_destroy <%u %u %u %llu>\n",
5508 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5509
5510 if (!devip)
5511 return;
5512
5513 spin_lock(lock: &devip->list_lock);
5514 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5515 list_del_rcu(entry: &err->list);
5516 call_rcu(head: &err->rcu, func: sdebug_err_free);
5517 }
5518 spin_unlock(lock: &devip->list_lock);
5519
5520 debugfs_remove(dentry: devip->debugfs_entry);
5521
5522 /* make this slot available for re-use */
5523 devip->used = false;
5524 sdp->hostdata = NULL;
5525}
5526
5527/* Returns true if we require the queued memory to be freed by the caller. */
5528static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5529 enum sdeb_defer_type defer_t)
5530{
5531 if (defer_t == SDEB_DEFER_HRT) {
5532 int res = hrtimer_try_to_cancel(timer: &sd_dp->hrt);
5533
5534 switch (res) {
5535 case 0: /* Not active, it must have already run */
5536 case -1: /* -1 It's executing the CB */
5537 return false;
5538 case 1: /* Was active, we've now cancelled */
5539 default:
5540 return true;
5541 }
5542 } else if (defer_t == SDEB_DEFER_WQ) {
5543 /* Cancel if pending */
5544 if (cancel_work_sync(work: &sd_dp->ew.work))
5545 return true;
5546 /* Was not pending, so it must have run */
5547 return false;
5548 } else if (defer_t == SDEB_DEFER_POLL) {
5549 return true;
5550 }
5551
5552 return false;
5553}
5554
5555
5556static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5557{
5558 enum sdeb_defer_type l_defer_t;
5559 struct sdebug_defer *sd_dp;
5560 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd: cmnd);
5561 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5562
5563 lockdep_assert_held(&sdsc->lock);
5564
5565 if (!sqcp)
5566 return false;
5567 sd_dp = &sqcp->sd_dp;
5568 l_defer_t = READ_ONCE(sd_dp->defer_t);
5569 ASSIGN_QUEUED_CMD(cmnd, NULL);
5570
5571 if (stop_qc_helper(sd_dp, defer_t: l_defer_t))
5572 sdebug_free_queued_cmd(sqcp);
5573
5574 return true;
5575}
5576
5577/*
5578 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5579 */
5580static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5581{
5582 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd: cmnd);
5583 unsigned long flags;
5584 bool res;
5585
5586 spin_lock_irqsave(&sdsc->lock, flags);
5587 res = scsi_debug_stop_cmnd(cmnd);
5588 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
5589
5590 return res;
5591}
5592
5593/*
5594 * All we can do is set the cmnd as internally aborted and wait for it to
5595 * finish. We cannot call scsi_done() as normal completion path may do that.
5596 */
5597static bool sdebug_stop_cmnd(struct request *rq, void *data)
5598{
5599 scsi_debug_abort_cmnd(cmnd: blk_mq_rq_to_pdu(rq));
5600
5601 return true;
5602}
5603
5604/* Deletes (stops) timers or work queues of all queued commands */
5605static void stop_all_queued(void)
5606{
5607 struct sdebug_host_info *sdhp;
5608
5609 mutex_lock(&sdebug_host_list_mutex);
5610 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5611 struct Scsi_Host *shost = sdhp->shost;
5612
5613 blk_mq_tagset_busy_iter(tagset: &shost->tag_set, fn: sdebug_stop_cmnd, NULL);
5614 }
5615 mutex_unlock(lock: &sdebug_host_list_mutex);
5616}
5617
5618static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5619{
5620 struct scsi_device *sdp = cmnd->device;
5621 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5622 struct sdebug_err_inject *err;
5623 unsigned char *cmd = cmnd->cmnd;
5624 int ret = 0;
5625
5626 if (devip == NULL)
5627 return 0;
5628
5629 rcu_read_lock();
5630 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5631 if (err->type == ERR_ABORT_CMD_FAILED &&
5632 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5633 ret = !!err->cnt;
5634 if (err->cnt < 0)
5635 err->cnt++;
5636
5637 rcu_read_unlock();
5638 return ret;
5639 }
5640 }
5641 rcu_read_unlock();
5642
5643 return 0;
5644}
5645
5646static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5647{
5648 bool ok = scsi_debug_abort_cmnd(cmnd: SCpnt);
5649 u8 *cmd = SCpnt->cmnd;
5650 u8 opcode = cmd[0];
5651
5652 ++num_aborts;
5653
5654 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5655 sdev_printk(KERN_INFO, SCpnt->device,
5656 "%s: command%s found\n", __func__,
5657 ok ? "" : " not");
5658
5659 if (sdebug_fail_abort(cmnd: SCpnt)) {
5660 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5661 opcode);
5662 return FAILED;
5663 }
5664
5665 return SUCCESS;
5666}
5667
5668static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5669{
5670 struct scsi_device *sdp = data;
5671 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5672
5673 if (scmd->device == sdp)
5674 scsi_debug_abort_cmnd(cmnd: scmd);
5675
5676 return true;
5677}
5678
5679/* Deletes (stops) timers or work queues of all queued commands per sdev */
5680static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5681{
5682 struct Scsi_Host *shost = sdp->host;
5683
5684 blk_mq_tagset_busy_iter(tagset: &shost->tag_set,
5685 fn: scsi_debug_stop_all_queued_iter, priv: sdp);
5686}
5687
5688static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5689{
5690 struct scsi_device *sdp = cmnd->device;
5691 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5692 struct sdebug_err_inject *err;
5693 unsigned char *cmd = cmnd->cmnd;
5694 int ret = 0;
5695
5696 if (devip == NULL)
5697 return 0;
5698
5699 rcu_read_lock();
5700 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5701 if (err->type == ERR_LUN_RESET_FAILED &&
5702 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5703 ret = !!err->cnt;
5704 if (err->cnt < 0)
5705 err->cnt++;
5706
5707 rcu_read_unlock();
5708 return ret;
5709 }
5710 }
5711 rcu_read_unlock();
5712
5713 return 0;
5714}
5715
5716static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5717{
5718 struct scsi_device *sdp = SCpnt->device;
5719 struct sdebug_dev_info *devip = sdp->hostdata;
5720 u8 *cmd = SCpnt->cmnd;
5721 u8 opcode = cmd[0];
5722
5723 ++num_dev_resets;
5724
5725 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5726 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5727
5728 scsi_debug_stop_all_queued(sdp);
5729 if (devip)
5730 set_bit(SDEBUG_UA_POR, addr: devip->uas_bm);
5731
5732 if (sdebug_fail_lun_reset(cmnd: SCpnt)) {
5733 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5734 return FAILED;
5735 }
5736
5737 return SUCCESS;
5738}
5739
5740static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5741{
5742 struct scsi_target *starget = scsi_target(sdev: cmnd->device);
5743 struct sdebug_target_info *targetip =
5744 (struct sdebug_target_info *)starget->hostdata;
5745
5746 if (targetip)
5747 return targetip->reset_fail;
5748
5749 return 0;
5750}
5751
5752static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5753{
5754 struct scsi_device *sdp = SCpnt->device;
5755 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5756 struct sdebug_dev_info *devip;
5757 u8 *cmd = SCpnt->cmnd;
5758 u8 opcode = cmd[0];
5759 int k = 0;
5760
5761 ++num_target_resets;
5762 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5763 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5764
5765 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5766 if (devip->target == sdp->id) {
5767 set_bit(SDEBUG_UA_BUS_RESET, addr: devip->uas_bm);
5768 ++k;
5769 }
5770 }
5771
5772 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5773 sdev_printk(KERN_INFO, sdp,
5774 "%s: %d device(s) found in target\n", __func__, k);
5775
5776 if (sdebug_fail_target_reset(cmnd: SCpnt)) {
5777 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5778 opcode);
5779 return FAILED;
5780 }
5781
5782 return SUCCESS;
5783}
5784
5785static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5786{
5787 struct scsi_device *sdp = SCpnt->device;
5788 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5789 struct sdebug_dev_info *devip;
5790 int k = 0;
5791
5792 ++num_bus_resets;
5793
5794 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5795 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5796
5797 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5798 set_bit(SDEBUG_UA_BUS_RESET, addr: devip->uas_bm);
5799 ++k;
5800 }
5801
5802 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5803 sdev_printk(KERN_INFO, sdp,
5804 "%s: %d device(s) found in host\n", __func__, k);
5805 return SUCCESS;
5806}
5807
5808static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5809{
5810 struct sdebug_host_info *sdbg_host;
5811 struct sdebug_dev_info *devip;
5812 int k = 0;
5813
5814 ++num_host_resets;
5815 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5816 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5817 mutex_lock(&sdebug_host_list_mutex);
5818 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5819 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5820 dev_list) {
5821 set_bit(SDEBUG_UA_BUS_RESET, addr: devip->uas_bm);
5822 ++k;
5823 }
5824 }
5825 mutex_unlock(lock: &sdebug_host_list_mutex);
5826 stop_all_queued();
5827 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5828 sdev_printk(KERN_INFO, SCpnt->device,
5829 "%s: %d device(s) found\n", __func__, k);
5830 return SUCCESS;
5831}
5832
5833static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5834{
5835 struct msdos_partition *pp;
5836 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5837 int sectors_per_part, num_sectors, k;
5838 int heads_by_sects, start_sec, end_sec;
5839
5840 /* assume partition table already zeroed */
5841 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5842 return;
5843 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5844 sdebug_num_parts = SDEBUG_MAX_PARTS;
5845 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5846 }
5847 num_sectors = (int)get_sdebug_capacity();
5848 sectors_per_part = (num_sectors - sdebug_sectors_per)
5849 / sdebug_num_parts;
5850 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5851 starts[0] = sdebug_sectors_per;
5852 max_part_secs = sectors_per_part;
5853 for (k = 1; k < sdebug_num_parts; ++k) {
5854 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5855 * heads_by_sects;
5856 if (starts[k] - starts[k - 1] < max_part_secs)
5857 max_part_secs = starts[k] - starts[k - 1];
5858 }
5859 starts[sdebug_num_parts] = num_sectors;
5860 starts[sdebug_num_parts + 1] = 0;
5861
5862 ramp[510] = 0x55; /* magic partition markings */
5863 ramp[511] = 0xAA;
5864 pp = (struct msdos_partition *)(ramp + 0x1be);
5865 for (k = 0; starts[k + 1]; ++k, ++pp) {
5866 start_sec = starts[k];
5867 end_sec = starts[k] + max_part_secs - 1;
5868 pp->boot_ind = 0;
5869
5870 pp->cyl = start_sec / heads_by_sects;
5871 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5872 / sdebug_sectors_per;
5873 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5874
5875 pp->end_cyl = end_sec / heads_by_sects;
5876 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5877 / sdebug_sectors_per;
5878 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5879
5880 pp->start_sect = cpu_to_le32(start_sec);
5881 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5882 pp->sys_ind = 0x83; /* plain Linux partition */
5883 }
5884}
5885
5886static void block_unblock_all_queues(bool block)
5887{
5888 struct sdebug_host_info *sdhp;
5889
5890 lockdep_assert_held(&sdebug_host_list_mutex);
5891
5892 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5893 struct Scsi_Host *shost = sdhp->shost;
5894
5895 if (block)
5896 scsi_block_requests(shost);
5897 else
5898 scsi_unblock_requests(shost);
5899 }
5900}
5901
5902/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5903 * commands will be processed normally before triggers occur.
5904 */
5905static void tweak_cmnd_count(void)
5906{
5907 int count, modulo;
5908
5909 modulo = abs(sdebug_every_nth);
5910 if (modulo < 2)
5911 return;
5912
5913 mutex_lock(&sdebug_host_list_mutex);
5914 block_unblock_all_queues(block: true);
5915 count = atomic_read(v: &sdebug_cmnd_count);
5916 atomic_set(v: &sdebug_cmnd_count, i: (count / modulo) * modulo);
5917 block_unblock_all_queues(block: false);
5918 mutex_unlock(lock: &sdebug_host_list_mutex);
5919}
5920
5921static void clear_queue_stats(void)
5922{
5923 atomic_set(v: &sdebug_cmnd_count, i: 0);
5924 atomic_set(v: &sdebug_completions, i: 0);
5925 atomic_set(v: &sdebug_miss_cpus, i: 0);
5926 atomic_set(v: &sdebug_a_tsf, i: 0);
5927}
5928
5929static bool inject_on_this_cmd(void)
5930{
5931 if (sdebug_every_nth == 0)
5932 return false;
5933 return (atomic_read(v: &sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5934}
5935
5936#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5937
5938
5939void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5940{
5941 if (sqcp)
5942 kmem_cache_free(s: queued_cmd_cache, objp: sqcp);
5943}
5944
5945static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5946{
5947 struct sdebug_queued_cmd *sqcp;
5948 struct sdebug_defer *sd_dp;
5949
5950 sqcp = kmem_cache_zalloc(k: queued_cmd_cache, GFP_ATOMIC);
5951 if (!sqcp)
5952 return NULL;
5953
5954 sd_dp = &sqcp->sd_dp;
5955
5956 hrtimer_init(timer: &sd_dp->hrt, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL_PINNED);
5957 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5958 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5959
5960 sqcp->scmd = scmd;
5961
5962 return sqcp;
5963}
5964
5965/* Complete the processing of the thread that queued a SCSI command to this
5966 * driver. It either completes the command by calling cmnd_done() or
5967 * schedules a hr timer or work queue then returns 0. Returns
5968 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5969 */
5970static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5971 int scsi_result,
5972 int (*pfp)(struct scsi_cmnd *,
5973 struct sdebug_dev_info *),
5974 int delta_jiff, int ndelay)
5975{
5976 struct request *rq = scsi_cmd_to_rq(scmd: cmnd);
5977 bool polled = rq->cmd_flags & REQ_POLLED;
5978 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd: cmnd);
5979 unsigned long flags;
5980 u64 ns_from_boot = 0;
5981 struct sdebug_queued_cmd *sqcp;
5982 struct scsi_device *sdp;
5983 struct sdebug_defer *sd_dp;
5984
5985 if (unlikely(devip == NULL)) {
5986 if (scsi_result == 0)
5987 scsi_result = DID_NO_CONNECT << 16;
5988 goto respond_in_thread;
5989 }
5990 sdp = cmnd->device;
5991
5992 if (delta_jiff == 0)
5993 goto respond_in_thread;
5994
5995
5996 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5997 (scsi_result == 0))) {
5998 int num_in_q = scsi_device_busy(sdev: sdp);
5999 int qdepth = cmnd->device->queue_depth;
6000
6001 if ((num_in_q == qdepth) &&
6002 (atomic_inc_return(v: &sdebug_a_tsf) >=
6003 abs(sdebug_every_nth))) {
6004 atomic_set(v: &sdebug_a_tsf, i: 0);
6005 scsi_result = device_qfull_result;
6006
6007 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6008 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6009 __func__, num_in_q);
6010 }
6011 }
6012
6013 sqcp = sdebug_alloc_queued_cmd(scmd: cmnd);
6014 if (!sqcp) {
6015 pr_err("%s no alloc\n", __func__);
6016 return SCSI_MLQUEUE_HOST_BUSY;
6017 }
6018 sd_dp = &sqcp->sd_dp;
6019
6020 if (polled)
6021 ns_from_boot = ktime_get_boottime_ns();
6022
6023 /* one of the resp_*() response functions is called here */
6024 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6025 if (cmnd->result & SDEG_RES_IMMED_MASK) {
6026 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6027 delta_jiff = ndelay = 0;
6028 }
6029 if (cmnd->result == 0 && scsi_result != 0)
6030 cmnd->result = scsi_result;
6031 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6032 if (atomic_read(v: &sdeb_inject_pending)) {
6033 mk_sense_buffer(scp: cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6034 atomic_set(v: &sdeb_inject_pending, i: 0);
6035 cmnd->result = check_condition_result;
6036 }
6037 }
6038
6039 if (unlikely(sdebug_verbose && cmnd->result))
6040 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6041 __func__, cmnd->result);
6042
6043 if (delta_jiff > 0 || ndelay > 0) {
6044 ktime_t kt;
6045
6046 if (delta_jiff > 0) {
6047 u64 ns = jiffies_to_nsecs(j: delta_jiff);
6048
6049 if (sdebug_random && ns < U32_MAX) {
6050 ns = get_random_u32_below(ceil: (u32)ns);
6051 } else if (sdebug_random) {
6052 ns >>= 12; /* scale to 4 usec precision */
6053 if (ns < U32_MAX) /* over 4 hours max */
6054 ns = get_random_u32_below(ceil: (u32)ns);
6055 ns <<= 12;
6056 }
6057 kt = ns_to_ktime(ns);
6058 } else { /* ndelay has a 4.2 second max */
6059 kt = sdebug_random ? get_random_u32_below(ceil: (u32)ndelay) :
6060 (u32)ndelay;
6061 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6062 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6063
6064 if (kt <= d) { /* elapsed duration >= kt */
6065 /* call scsi_done() from this thread */
6066 sdebug_free_queued_cmd(sqcp);
6067 scsi_done(cmd: cmnd);
6068 return 0;
6069 }
6070 /* otherwise reduce kt by elapsed time */
6071 kt -= d;
6072 }
6073 }
6074 if (sdebug_statistics)
6075 sd_dp->issuing_cpu = raw_smp_processor_id();
6076 if (polled) {
6077 spin_lock_irqsave(&sdsc->lock, flags);
6078 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6079 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6080 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6081 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
6082 } else {
6083 /* schedule the invocation of scsi_done() for a later time */
6084 spin_lock_irqsave(&sdsc->lock, flags);
6085 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6086 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6087 hrtimer_start(timer: &sd_dp->hrt, tim: kt, mode: HRTIMER_MODE_REL_PINNED);
6088 /*
6089 * The completion handler will try to grab sqcp->lock,
6090 * so there is no chance that the completion handler
6091 * will call scsi_done() until we release the lock
6092 * here (so ok to keep referencing sdsc).
6093 */
6094 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
6095 }
6096 } else { /* jdelay < 0, use work queue */
6097 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6098 atomic_read(&sdeb_inject_pending))) {
6099 sd_dp->aborted = true;
6100 atomic_set(v: &sdeb_inject_pending, i: 0);
6101 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6102 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6103 }
6104
6105 if (sdebug_statistics)
6106 sd_dp->issuing_cpu = raw_smp_processor_id();
6107 if (polled) {
6108 spin_lock_irqsave(&sdsc->lock, flags);
6109 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6110 sd_dp->cmpl_ts = ns_to_ktime(ns: ns_from_boot);
6111 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6112 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
6113 } else {
6114 spin_lock_irqsave(&sdsc->lock, flags);
6115 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6116 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6117 schedule_work(work: &sd_dp->ew.work);
6118 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
6119 }
6120 }
6121
6122 return 0;
6123
6124respond_in_thread: /* call back to mid-layer using invocation thread */
6125 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6126 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6127 if (cmnd->result == 0 && scsi_result != 0)
6128 cmnd->result = scsi_result;
6129 scsi_done(cmd: cmnd);
6130 return 0;
6131}
6132
6133/* Note: The following macros create attribute files in the
6134 /sys/module/scsi_debug/parameters directory. Unfortunately this
6135 driver is unaware of a change and cannot trigger auxiliary actions
6136 as it can when the corresponding attribute in the
6137 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6138 */
6139module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6140module_param_named(ato, sdebug_ato, int, S_IRUGO);
6141module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6142module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6143module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6144module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6145module_param_named(dif, sdebug_dif, int, S_IRUGO);
6146module_param_named(dix, sdebug_dix, int, S_IRUGO);
6147module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6148module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6149module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6150module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6151module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6152module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6153module_param_string(inq_product, sdebug_inq_product_id,
6154 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6155module_param_string(inq_rev, sdebug_inq_product_rev,
6156 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6157module_param_string(inq_vendor, sdebug_inq_vendor_id,
6158 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6159module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6160module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6161module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6162module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6163module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6164module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6165module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6166module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6167module_param_named(medium_error_count, sdebug_medium_error_count, int,
6168 S_IRUGO | S_IWUSR);
6169module_param_named(medium_error_start, sdebug_medium_error_start, int,
6170 S_IRUGO | S_IWUSR);
6171module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6172module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6173module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6174module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6175module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6176module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6177module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6178module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6179module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6180module_param_named(per_host_store, sdebug_per_host_store, bool,
6181 S_IRUGO | S_IWUSR);
6182module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6183module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6184module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6185module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6186module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6187module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6188module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6189module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6190module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6191module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6192module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6193module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6194module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6195module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6196module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6197module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6198module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6199module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6200 S_IRUGO | S_IWUSR);
6201module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6202module_param_named(write_same_length, sdebug_write_same_length, int,
6203 S_IRUGO | S_IWUSR);
6204module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6205module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6206module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6207module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6208module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6209module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6210
6211MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6212MODULE_DESCRIPTION("SCSI debug adapter driver");
6213MODULE_LICENSE("GPL");
6214MODULE_VERSION(SDEBUG_VERSION);
6215
6216MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6217MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6218MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6219MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6220MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6221MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6222MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6223MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6224MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6225MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6226MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6227MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6228MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6229MODULE_PARM_DESC(host_max_queue,
6230 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6231MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6232MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6233 SDEBUG_VERSION "\")");
6234MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6235MODULE_PARM_DESC(lbprz,
6236 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6237MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6238MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6239MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6240MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6241MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6242MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6243MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6244MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6245MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6246MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6247MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6248MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6249MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6250MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6251MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6252MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6253MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6254MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6255MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6256MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6257MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6258MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6259MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6260MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6261MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6262MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6263MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6264MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6265MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6266MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6267MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6268MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6269MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6270MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6271MODULE_PARM_DESC(uuid_ctl,
6272 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6273MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6274MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6275MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6276MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6277MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6278MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6279MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6280MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6281MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6282MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6283
6284#define SDEBUG_INFO_LEN 256
6285static char sdebug_info[SDEBUG_INFO_LEN];
6286
6287static const char *scsi_debug_info(struct Scsi_Host *shp)
6288{
6289 int k;
6290
6291 k = scnprintf(buf: sdebug_info, SDEBUG_INFO_LEN, fmt: "%s: version %s [%s]\n",
6292 my_name, SDEBUG_VERSION, sdebug_version_date);
6293 if (k >= (SDEBUG_INFO_LEN - 1))
6294 return sdebug_info;
6295 scnprintf(buf: sdebug_info + k, SDEBUG_INFO_LEN - k,
6296 fmt: " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6297 sdebug_dev_size_mb, sdebug_opts, submit_queues,
6298 "statistics", (int)sdebug_statistics);
6299 return sdebug_info;
6300}
6301
6302/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6303static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6304 int length)
6305{
6306 char arr[16];
6307 int opts;
6308 int minLen = length > 15 ? 15 : length;
6309
6310 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6311 return -EACCES;
6312 memcpy(arr, buffer, minLen);
6313 arr[minLen] = '\0';
6314 if (1 != sscanf(arr, "%d", &opts))
6315 return -EINVAL;
6316 sdebug_opts = opts;
6317 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6318 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6319 if (sdebug_every_nth != 0)
6320 tweak_cmnd_count();
6321 return length;
6322}
6323
6324struct sdebug_submit_queue_data {
6325 int *first;
6326 int *last;
6327 int queue_num;
6328};
6329
6330static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6331{
6332 struct sdebug_submit_queue_data *data = opaque;
6333 u32 unique_tag = blk_mq_unique_tag(rq);
6334 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6335 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6336 int queue_num = data->queue_num;
6337
6338 if (hwq != queue_num)
6339 return true;
6340
6341 /* Rely on iter'ing in ascending tag order */
6342 if (*data->first == -1)
6343 *data->first = *data->last = tag;
6344 else
6345 *data->last = tag;
6346
6347 return true;
6348}
6349
6350/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6351 * same for each scsi_debug host (if more than one). Some of the counters
6352 * output are not atomics so might be inaccurate in a busy system. */
6353static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6354{
6355 struct sdebug_host_info *sdhp;
6356 int j;
6357
6358 seq_printf(m, fmt: "scsi_debug adapter driver, version %s [%s]\n",
6359 SDEBUG_VERSION, sdebug_version_date);
6360 seq_printf(m, fmt: "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6361 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6362 sdebug_opts, sdebug_every_nth);
6363 seq_printf(m, fmt: "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6364 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6365 sdebug_sector_size, "bytes");
6366 seq_printf(m, fmt: "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6367 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6368 num_aborts);
6369 seq_printf(m, fmt: "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6370 num_dev_resets, num_target_resets, num_bus_resets,
6371 num_host_resets);
6372 seq_printf(m, fmt: "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6373 dix_reads, dix_writes, dif_errors);
6374 seq_printf(m, fmt: "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6375 sdebug_statistics);
6376 seq_printf(m, fmt: "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6377 atomic_read(v: &sdebug_cmnd_count),
6378 atomic_read(v: &sdebug_completions),
6379 "miss_cpus", atomic_read(v: &sdebug_miss_cpus),
6380 atomic_read(v: &sdebug_a_tsf),
6381 atomic_read(v: &sdeb_mq_poll_count));
6382
6383 seq_printf(m, fmt: "submit_queues=%d\n", submit_queues);
6384 for (j = 0; j < submit_queues; ++j) {
6385 int f = -1, l = -1;
6386 struct sdebug_submit_queue_data data = {
6387 .queue_num = j,
6388 .first = &f,
6389 .last = &l,
6390 };
6391 seq_printf(m, fmt: " queue %d:\n", j);
6392 blk_mq_tagset_busy_iter(tagset: &host->tag_set, fn: sdebug_submit_queue_iter,
6393 priv: &data);
6394 if (f >= 0) {
6395 seq_printf(m, fmt: " in_use_bm BUSY: %s: %d,%d\n",
6396 "first,last bits", f, l);
6397 }
6398 }
6399
6400 seq_printf(m, fmt: "this host_no=%d\n", host->host_no);
6401 if (!xa_empty(xa: per_store_ap)) {
6402 bool niu;
6403 int idx;
6404 unsigned long l_idx;
6405 struct sdeb_store_info *sip;
6406
6407 seq_puts(m, s: "\nhost list:\n");
6408 j = 0;
6409 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6410 idx = sdhp->si_idx;
6411 seq_printf(m, fmt: " %d: host_no=%d, si_idx=%d\n", j,
6412 sdhp->shost->host_no, idx);
6413 ++j;
6414 }
6415 seq_printf(m, fmt: "\nper_store array [most_recent_idx=%d]:\n",
6416 sdeb_most_recent_idx);
6417 j = 0;
6418 xa_for_each(per_store_ap, l_idx, sip) {
6419 niu = xa_get_mark(per_store_ap, index: l_idx,
6420 SDEB_XA_NOT_IN_USE);
6421 idx = (int)l_idx;
6422 seq_printf(m, fmt: " %d: idx=%d%s\n", j, idx,
6423 (niu ? " not_in_use" : ""));
6424 ++j;
6425 }
6426 }
6427 return 0;
6428}
6429
6430static ssize_t delay_show(struct device_driver *ddp, char *buf)
6431{
6432 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_jdelay);
6433}
6434/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6435 * of delay is jiffies.
6436 */
6437static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6438 size_t count)
6439{
6440 int jdelay, res;
6441
6442 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6443 res = count;
6444 if (sdebug_jdelay != jdelay) {
6445 struct sdebug_host_info *sdhp;
6446
6447 mutex_lock(&sdebug_host_list_mutex);
6448 block_unblock_all_queues(block: true);
6449
6450 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6451 struct Scsi_Host *shost = sdhp->shost;
6452
6453 if (scsi_host_busy(shost)) {
6454 res = -EBUSY; /* queued commands */
6455 break;
6456 }
6457 }
6458 if (res > 0) {
6459 sdebug_jdelay = jdelay;
6460 sdebug_ndelay = 0;
6461 }
6462 block_unblock_all_queues(block: false);
6463 mutex_unlock(lock: &sdebug_host_list_mutex);
6464 }
6465 return res;
6466 }
6467 return -EINVAL;
6468}
6469static DRIVER_ATTR_RW(delay);
6470
6471static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6472{
6473 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_ndelay);
6474}
6475/* Returns -EBUSY if ndelay is being changed and commands are queued */
6476/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6477static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6478 size_t count)
6479{
6480 int ndelay, res;
6481
6482 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6483 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6484 res = count;
6485 if (sdebug_ndelay != ndelay) {
6486 struct sdebug_host_info *sdhp;
6487
6488 mutex_lock(&sdebug_host_list_mutex);
6489 block_unblock_all_queues(block: true);
6490
6491 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6492 struct Scsi_Host *shost = sdhp->shost;
6493
6494 if (scsi_host_busy(shost)) {
6495 res = -EBUSY; /* queued commands */
6496 break;
6497 }
6498 }
6499
6500 if (res > 0) {
6501 sdebug_ndelay = ndelay;
6502 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6503 : DEF_JDELAY;
6504 }
6505 block_unblock_all_queues(block: false);
6506 mutex_unlock(lock: &sdebug_host_list_mutex);
6507 }
6508 return res;
6509 }
6510 return -EINVAL;
6511}
6512static DRIVER_ATTR_RW(ndelay);
6513
6514static ssize_t opts_show(struct device_driver *ddp, char *buf)
6515{
6516 return scnprintf(buf, PAGE_SIZE, fmt: "0x%x\n", sdebug_opts);
6517}
6518
6519static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6520 size_t count)
6521{
6522 int opts;
6523 char work[20];
6524
6525 if (sscanf(buf, "%10s", work) == 1) {
6526 if (strncasecmp(s1: work, s2: "0x", n: 2) == 0) {
6527 if (kstrtoint(s: work + 2, base: 16, res: &opts) == 0)
6528 goto opts_done;
6529 } else {
6530 if (kstrtoint(s: work, base: 10, res: &opts) == 0)
6531 goto opts_done;
6532 }
6533 }
6534 return -EINVAL;
6535opts_done:
6536 sdebug_opts = opts;
6537 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6538 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6539 tweak_cmnd_count();
6540 return count;
6541}
6542static DRIVER_ATTR_RW(opts);
6543
6544static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6545{
6546 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_ptype);
6547}
6548static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6549 size_t count)
6550{
6551 int n;
6552
6553 /* Cannot change from or to TYPE_ZBC with sysfs */
6554 if (sdebug_ptype == TYPE_ZBC)
6555 return -EINVAL;
6556
6557 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6558 if (n == TYPE_ZBC)
6559 return -EINVAL;
6560 sdebug_ptype = n;
6561 return count;
6562 }
6563 return -EINVAL;
6564}
6565static DRIVER_ATTR_RW(ptype);
6566
6567static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6568{
6569 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_dsense);
6570}
6571static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6572 size_t count)
6573{
6574 int n;
6575
6576 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6577 sdebug_dsense = n;
6578 return count;
6579 }
6580 return -EINVAL;
6581}
6582static DRIVER_ATTR_RW(dsense);
6583
6584static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6585{
6586 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_fake_rw);
6587}
6588static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6589 size_t count)
6590{
6591 int n, idx;
6592
6593 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6594 bool want_store = (n == 0);
6595 struct sdebug_host_info *sdhp;
6596
6597 n = (n > 0);
6598 sdebug_fake_rw = (sdebug_fake_rw > 0);
6599 if (sdebug_fake_rw == n)
6600 return count; /* not transitioning so do nothing */
6601
6602 if (want_store) { /* 1 --> 0 transition, set up store */
6603 if (sdeb_first_idx < 0) {
6604 idx = sdebug_add_store();
6605 if (idx < 0)
6606 return idx;
6607 } else {
6608 idx = sdeb_first_idx;
6609 xa_clear_mark(per_store_ap, index: idx,
6610 SDEB_XA_NOT_IN_USE);
6611 }
6612 /* make all hosts use same store */
6613 list_for_each_entry(sdhp, &sdebug_host_list,
6614 host_list) {
6615 if (sdhp->si_idx != idx) {
6616 xa_set_mark(per_store_ap, index: sdhp->si_idx,
6617 SDEB_XA_NOT_IN_USE);
6618 sdhp->si_idx = idx;
6619 }
6620 }
6621 sdeb_most_recent_idx = idx;
6622 } else { /* 0 --> 1 transition is trigger for shrink */
6623 sdebug_erase_all_stores(apart_from_first: true /* apart from first */);
6624 }
6625 sdebug_fake_rw = n;
6626 return count;
6627 }
6628 return -EINVAL;
6629}
6630static DRIVER_ATTR_RW(fake_rw);
6631
6632static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6633{
6634 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_no_lun_0);
6635}
6636static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6637 size_t count)
6638{
6639 int n;
6640
6641 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6642 sdebug_no_lun_0 = n;
6643 return count;
6644 }
6645 return -EINVAL;
6646}
6647static DRIVER_ATTR_RW(no_lun_0);
6648
6649static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6650{
6651 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_num_tgts);
6652}
6653static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6654 size_t count)
6655{
6656 int n;
6657
6658 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6659 sdebug_num_tgts = n;
6660 sdebug_max_tgts_luns();
6661 return count;
6662 }
6663 return -EINVAL;
6664}
6665static DRIVER_ATTR_RW(num_tgts);
6666
6667static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6668{
6669 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_dev_size_mb);
6670}
6671static DRIVER_ATTR_RO(dev_size_mb);
6672
6673static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6674{
6675 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_per_host_store);
6676}
6677
6678static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6679 size_t count)
6680{
6681 bool v;
6682
6683 if (kstrtobool(s: buf, res: &v))
6684 return -EINVAL;
6685
6686 sdebug_per_host_store = v;
6687 return count;
6688}
6689static DRIVER_ATTR_RW(per_host_store);
6690
6691static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6692{
6693 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_num_parts);
6694}
6695static DRIVER_ATTR_RO(num_parts);
6696
6697static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6698{
6699 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_every_nth);
6700}
6701static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6702 size_t count)
6703{
6704 int nth;
6705 char work[20];
6706
6707 if (sscanf(buf, "%10s", work) == 1) {
6708 if (strncasecmp(s1: work, s2: "0x", n: 2) == 0) {
6709 if (kstrtoint(s: work + 2, base: 16, res: &nth) == 0)
6710 goto every_nth_done;
6711 } else {
6712 if (kstrtoint(s: work, base: 10, res: &nth) == 0)
6713 goto every_nth_done;
6714 }
6715 }
6716 return -EINVAL;
6717
6718every_nth_done:
6719 sdebug_every_nth = nth;
6720 if (nth && !sdebug_statistics) {
6721 pr_info("every_nth needs statistics=1, set it\n");
6722 sdebug_statistics = true;
6723 }
6724 tweak_cmnd_count();
6725 return count;
6726}
6727static DRIVER_ATTR_RW(every_nth);
6728
6729static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6730{
6731 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", (int)sdebug_lun_am);
6732}
6733static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6734 size_t count)
6735{
6736 int n;
6737 bool changed;
6738
6739 if (kstrtoint(s: buf, base: 0, res: &n))
6740 return -EINVAL;
6741 if (n >= 0) {
6742 if (n > (int)SAM_LUN_AM_FLAT) {
6743 pr_warn("only LUN address methods 0 and 1 are supported\n");
6744 return -EINVAL;
6745 }
6746 changed = ((int)sdebug_lun_am != n);
6747 sdebug_lun_am = n;
6748 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6749 struct sdebug_host_info *sdhp;
6750 struct sdebug_dev_info *dp;
6751
6752 mutex_lock(&sdebug_host_list_mutex);
6753 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6754 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6755 set_bit(SDEBUG_UA_LUNS_CHANGED, addr: dp->uas_bm);
6756 }
6757 }
6758 mutex_unlock(lock: &sdebug_host_list_mutex);
6759 }
6760 return count;
6761 }
6762 return -EINVAL;
6763}
6764static DRIVER_ATTR_RW(lun_format);
6765
6766static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6767{
6768 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_max_luns);
6769}
6770static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6771 size_t count)
6772{
6773 int n;
6774 bool changed;
6775
6776 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6777 if (n > 256) {
6778 pr_warn("max_luns can be no more than 256\n");
6779 return -EINVAL;
6780 }
6781 changed = (sdebug_max_luns != n);
6782 sdebug_max_luns = n;
6783 sdebug_max_tgts_luns();
6784 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6785 struct sdebug_host_info *sdhp;
6786 struct sdebug_dev_info *dp;
6787
6788 mutex_lock(&sdebug_host_list_mutex);
6789 list_for_each_entry(sdhp, &sdebug_host_list,
6790 host_list) {
6791 list_for_each_entry(dp, &sdhp->dev_info_list,
6792 dev_list) {
6793 set_bit(SDEBUG_UA_LUNS_CHANGED,
6794 addr: dp->uas_bm);
6795 }
6796 }
6797 mutex_unlock(lock: &sdebug_host_list_mutex);
6798 }
6799 return count;
6800 }
6801 return -EINVAL;
6802}
6803static DRIVER_ATTR_RW(max_luns);
6804
6805static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6806{
6807 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_max_queue);
6808}
6809/* N.B. max_queue can be changed while there are queued commands. In flight
6810 * commands beyond the new max_queue will be completed. */
6811static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6812 size_t count)
6813{
6814 int n;
6815
6816 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6817 (n <= SDEBUG_CANQUEUE) &&
6818 (sdebug_host_max_queue == 0)) {
6819 mutex_lock(&sdebug_host_list_mutex);
6820
6821 /* We may only change sdebug_max_queue when we have no shosts */
6822 if (list_empty(head: &sdebug_host_list))
6823 sdebug_max_queue = n;
6824 else
6825 count = -EBUSY;
6826 mutex_unlock(lock: &sdebug_host_list_mutex);
6827 return count;
6828 }
6829 return -EINVAL;
6830}
6831static DRIVER_ATTR_RW(max_queue);
6832
6833static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6834{
6835 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_host_max_queue);
6836}
6837
6838static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6839{
6840 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_no_rwlock);
6841}
6842
6843static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6844{
6845 bool v;
6846
6847 if (kstrtobool(s: buf, res: &v))
6848 return -EINVAL;
6849
6850 sdebug_no_rwlock = v;
6851 return count;
6852}
6853static DRIVER_ATTR_RW(no_rwlock);
6854
6855/*
6856 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6857 * in range [0, sdebug_host_max_queue), we can't change it.
6858 */
6859static DRIVER_ATTR_RO(host_max_queue);
6860
6861static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6862{
6863 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_no_uld);
6864}
6865static DRIVER_ATTR_RO(no_uld);
6866
6867static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6868{
6869 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_scsi_level);
6870}
6871static DRIVER_ATTR_RO(scsi_level);
6872
6873static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6874{
6875 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_virtual_gb);
6876}
6877static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6878 size_t count)
6879{
6880 int n;
6881 bool changed;
6882
6883 /* Ignore capacity change for ZBC drives for now */
6884 if (sdeb_zbc_in_use)
6885 return -ENOTSUPP;
6886
6887 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6888 changed = (sdebug_virtual_gb != n);
6889 sdebug_virtual_gb = n;
6890 sdebug_capacity = get_sdebug_capacity();
6891 if (changed) {
6892 struct sdebug_host_info *sdhp;
6893 struct sdebug_dev_info *dp;
6894
6895 mutex_lock(&sdebug_host_list_mutex);
6896 list_for_each_entry(sdhp, &sdebug_host_list,
6897 host_list) {
6898 list_for_each_entry(dp, &sdhp->dev_info_list,
6899 dev_list) {
6900 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6901 addr: dp->uas_bm);
6902 }
6903 }
6904 mutex_unlock(lock: &sdebug_host_list_mutex);
6905 }
6906 return count;
6907 }
6908 return -EINVAL;
6909}
6910static DRIVER_ATTR_RW(virtual_gb);
6911
6912static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6913{
6914 /* absolute number of hosts currently active is what is shown */
6915 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_num_hosts);
6916}
6917
6918static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6919 size_t count)
6920{
6921 bool found;
6922 unsigned long idx;
6923 struct sdeb_store_info *sip;
6924 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6925 int delta_hosts;
6926
6927 if (sscanf(buf, "%d", &delta_hosts) != 1)
6928 return -EINVAL;
6929 if (delta_hosts > 0) {
6930 do {
6931 found = false;
6932 if (want_phs) {
6933 xa_for_each_marked(per_store_ap, idx, sip,
6934 SDEB_XA_NOT_IN_USE) {
6935 sdeb_most_recent_idx = (int)idx;
6936 found = true;
6937 break;
6938 }
6939 if (found) /* re-use case */
6940 sdebug_add_host_helper(per_host_idx: (int)idx);
6941 else
6942 sdebug_do_add_host(mk_new_store: true);
6943 } else {
6944 sdebug_do_add_host(mk_new_store: false);
6945 }
6946 } while (--delta_hosts);
6947 } else if (delta_hosts < 0) {
6948 do {
6949 sdebug_do_remove_host(the_end: false);
6950 } while (++delta_hosts);
6951 }
6952 return count;
6953}
6954static DRIVER_ATTR_RW(add_host);
6955
6956static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6957{
6958 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_vpd_use_hostno);
6959}
6960static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6961 size_t count)
6962{
6963 int n;
6964
6965 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6966 sdebug_vpd_use_hostno = n;
6967 return count;
6968 }
6969 return -EINVAL;
6970}
6971static DRIVER_ATTR_RW(vpd_use_hostno);
6972
6973static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6974{
6975 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", (int)sdebug_statistics);
6976}
6977static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6978 size_t count)
6979{
6980 int n;
6981
6982 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6983 if (n > 0)
6984 sdebug_statistics = true;
6985 else {
6986 clear_queue_stats();
6987 sdebug_statistics = false;
6988 }
6989 return count;
6990 }
6991 return -EINVAL;
6992}
6993static DRIVER_ATTR_RW(statistics);
6994
6995static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6996{
6997 return scnprintf(buf, PAGE_SIZE, fmt: "%u\n", sdebug_sector_size);
6998}
6999static DRIVER_ATTR_RO(sector_size);
7000
7001static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7002{
7003 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", submit_queues);
7004}
7005static DRIVER_ATTR_RO(submit_queues);
7006
7007static ssize_t dix_show(struct device_driver *ddp, char *buf)
7008{
7009 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_dix);
7010}
7011static DRIVER_ATTR_RO(dix);
7012
7013static ssize_t dif_show(struct device_driver *ddp, char *buf)
7014{
7015 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_dif);
7016}
7017static DRIVER_ATTR_RO(dif);
7018
7019static ssize_t guard_show(struct device_driver *ddp, char *buf)
7020{
7021 return scnprintf(buf, PAGE_SIZE, fmt: "%u\n", sdebug_guard);
7022}
7023static DRIVER_ATTR_RO(guard);
7024
7025static ssize_t ato_show(struct device_driver *ddp, char *buf)
7026{
7027 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_ato);
7028}
7029static DRIVER_ATTR_RO(ato);
7030
7031static ssize_t map_show(struct device_driver *ddp, char *buf)
7032{
7033 ssize_t count = 0;
7034
7035 if (!scsi_debug_lbp())
7036 return scnprintf(buf, PAGE_SIZE, fmt: "0-%u\n",
7037 sdebug_store_sectors);
7038
7039 if (sdebug_fake_rw == 0 && !xa_empty(xa: per_store_ap)) {
7040 struct sdeb_store_info *sip = xa_load(per_store_ap, index: 0);
7041
7042 if (sip)
7043 count = scnprintf(buf, PAGE_SIZE - 1, fmt: "%*pbl",
7044 (int)map_size, sip->map_storep);
7045 }
7046 buf[count++] = '\n';
7047 buf[count] = '\0';
7048
7049 return count;
7050}
7051static DRIVER_ATTR_RO(map);
7052
7053static ssize_t random_show(struct device_driver *ddp, char *buf)
7054{
7055 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_random);
7056}
7057
7058static ssize_t random_store(struct device_driver *ddp, const char *buf,
7059 size_t count)
7060{
7061 bool v;
7062
7063 if (kstrtobool(s: buf, res: &v))
7064 return -EINVAL;
7065
7066 sdebug_random = v;
7067 return count;
7068}
7069static DRIVER_ATTR_RW(random);
7070
7071static ssize_t removable_show(struct device_driver *ddp, char *buf)
7072{
7073 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_removable ? 1 : 0);
7074}
7075static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7076 size_t count)
7077{
7078 int n;
7079
7080 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7081 sdebug_removable = (n > 0);
7082 return count;
7083 }
7084 return -EINVAL;
7085}
7086static DRIVER_ATTR_RW(removable);
7087
7088static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7089{
7090 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", !!sdebug_host_lock);
7091}
7092/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7093static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7094 size_t count)
7095{
7096 int n;
7097
7098 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7099 sdebug_host_lock = (n > 0);
7100 return count;
7101 }
7102 return -EINVAL;
7103}
7104static DRIVER_ATTR_RW(host_lock);
7105
7106static ssize_t strict_show(struct device_driver *ddp, char *buf)
7107{
7108 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", !!sdebug_strict);
7109}
7110static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7111 size_t count)
7112{
7113 int n;
7114
7115 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7116 sdebug_strict = (n > 0);
7117 return count;
7118 }
7119 return -EINVAL;
7120}
7121static DRIVER_ATTR_RW(strict);
7122
7123static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7124{
7125 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", !!sdebug_uuid_ctl);
7126}
7127static DRIVER_ATTR_RO(uuid_ctl);
7128
7129static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7130{
7131 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdebug_cdb_len);
7132}
7133static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7134 size_t count)
7135{
7136 int ret, n;
7137
7138 ret = kstrtoint(s: buf, base: 0, res: &n);
7139 if (ret)
7140 return ret;
7141 sdebug_cdb_len = n;
7142 all_config_cdb_len();
7143 return count;
7144}
7145static DRIVER_ATTR_RW(cdb_len);
7146
7147static const char * const zbc_model_strs_a[] = {
7148 [BLK_ZONED_NONE] = "none",
7149 [BLK_ZONED_HA] = "host-aware",
7150 [BLK_ZONED_HM] = "host-managed",
7151};
7152
7153static const char * const zbc_model_strs_b[] = {
7154 [BLK_ZONED_NONE] = "no",
7155 [BLK_ZONED_HA] = "aware",
7156 [BLK_ZONED_HM] = "managed",
7157};
7158
7159static const char * const zbc_model_strs_c[] = {
7160 [BLK_ZONED_NONE] = "0",
7161 [BLK_ZONED_HA] = "1",
7162 [BLK_ZONED_HM] = "2",
7163};
7164
7165static int sdeb_zbc_model_str(const char *cp)
7166{
7167 int res = sysfs_match_string(zbc_model_strs_a, cp);
7168
7169 if (res < 0) {
7170 res = sysfs_match_string(zbc_model_strs_b, cp);
7171 if (res < 0) {
7172 res = sysfs_match_string(zbc_model_strs_c, cp);
7173 if (res < 0)
7174 return -EINVAL;
7175 }
7176 }
7177 return res;
7178}
7179
7180static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7181{
7182 return scnprintf(buf, PAGE_SIZE, fmt: "%s\n",
7183 zbc_model_strs_a[sdeb_zbc_model]);
7184}
7185static DRIVER_ATTR_RO(zbc);
7186
7187static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7188{
7189 return scnprintf(buf, PAGE_SIZE, fmt: "%d\n", sdeb_tur_ms_to_ready);
7190}
7191static DRIVER_ATTR_RO(tur_ms_to_ready);
7192
7193/* Note: The following array creates attribute files in the
7194 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7195 files (over those found in the /sys/module/scsi_debug/parameters
7196 directory) is that auxiliary actions can be triggered when an attribute
7197 is changed. For example see: add_host_store() above.
7198 */
7199
7200static struct attribute *sdebug_drv_attrs[] = {
7201 &driver_attr_delay.attr,
7202 &driver_attr_opts.attr,
7203 &driver_attr_ptype.attr,
7204 &driver_attr_dsense.attr,
7205 &driver_attr_fake_rw.attr,
7206 &driver_attr_host_max_queue.attr,
7207 &driver_attr_no_lun_0.attr,
7208 &driver_attr_num_tgts.attr,
7209 &driver_attr_dev_size_mb.attr,
7210 &driver_attr_num_parts.attr,
7211 &driver_attr_every_nth.attr,
7212 &driver_attr_lun_format.attr,
7213 &driver_attr_max_luns.attr,
7214 &driver_attr_max_queue.attr,
7215 &driver_attr_no_rwlock.attr,
7216 &driver_attr_no_uld.attr,
7217 &driver_attr_scsi_level.attr,
7218 &driver_attr_virtual_gb.attr,
7219 &driver_attr_add_host.attr,
7220 &driver_attr_per_host_store.attr,
7221 &driver_attr_vpd_use_hostno.attr,
7222 &driver_attr_sector_size.attr,
7223 &driver_attr_statistics.attr,
7224 &driver_attr_submit_queues.attr,
7225 &driver_attr_dix.attr,
7226 &driver_attr_dif.attr,
7227 &driver_attr_guard.attr,
7228 &driver_attr_ato.attr,
7229 &driver_attr_map.attr,
7230 &driver_attr_random.attr,
7231 &driver_attr_removable.attr,
7232 &driver_attr_host_lock.attr,
7233 &driver_attr_ndelay.attr,
7234 &driver_attr_strict.attr,
7235 &driver_attr_uuid_ctl.attr,
7236 &driver_attr_cdb_len.attr,
7237 &driver_attr_tur_ms_to_ready.attr,
7238 &driver_attr_zbc.attr,
7239 NULL,
7240};
7241ATTRIBUTE_GROUPS(sdebug_drv);
7242
7243static struct device *pseudo_primary;
7244
7245static int __init scsi_debug_init(void)
7246{
7247 bool want_store = (sdebug_fake_rw == 0);
7248 unsigned long sz;
7249 int k, ret, hosts_to_add;
7250 int idx = -1;
7251
7252 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7253 pr_warn("ndelay must be less than 1 second, ignored\n");
7254 sdebug_ndelay = 0;
7255 } else if (sdebug_ndelay > 0)
7256 sdebug_jdelay = JDELAY_OVERRIDDEN;
7257
7258 switch (sdebug_sector_size) {
7259 case 512:
7260 case 1024:
7261 case 2048:
7262 case 4096:
7263 break;
7264 default:
7265 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7266 return -EINVAL;
7267 }
7268
7269 switch (sdebug_dif) {
7270 case T10_PI_TYPE0_PROTECTION:
7271 break;
7272 case T10_PI_TYPE1_PROTECTION:
7273 case T10_PI_TYPE2_PROTECTION:
7274 case T10_PI_TYPE3_PROTECTION:
7275 have_dif_prot = true;
7276 break;
7277
7278 default:
7279 pr_err("dif must be 0, 1, 2 or 3\n");
7280 return -EINVAL;
7281 }
7282
7283 if (sdebug_num_tgts < 0) {
7284 pr_err("num_tgts must be >= 0\n");
7285 return -EINVAL;
7286 }
7287
7288 if (sdebug_guard > 1) {
7289 pr_err("guard must be 0 or 1\n");
7290 return -EINVAL;
7291 }
7292
7293 if (sdebug_ato > 1) {
7294 pr_err("ato must be 0 or 1\n");
7295 return -EINVAL;
7296 }
7297
7298 if (sdebug_physblk_exp > 15) {
7299 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7300 return -EINVAL;
7301 }
7302
7303 sdebug_lun_am = sdebug_lun_am_i;
7304 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7305 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7306 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7307 }
7308
7309 if (sdebug_max_luns > 256) {
7310 if (sdebug_max_luns > 16384) {
7311 pr_warn("max_luns can be no more than 16384, use default\n");
7312 sdebug_max_luns = DEF_MAX_LUNS;
7313 }
7314 sdebug_lun_am = SAM_LUN_AM_FLAT;
7315 }
7316
7317 if (sdebug_lowest_aligned > 0x3fff) {
7318 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7319 return -EINVAL;
7320 }
7321
7322 if (submit_queues < 1) {
7323 pr_err("submit_queues must be 1 or more\n");
7324 return -EINVAL;
7325 }
7326
7327 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7328 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7329 return -EINVAL;
7330 }
7331
7332 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7333 (sdebug_host_max_queue < 0)) {
7334 pr_err("host_max_queue must be in range [0 %d]\n",
7335 SDEBUG_CANQUEUE);
7336 return -EINVAL;
7337 }
7338
7339 if (sdebug_host_max_queue &&
7340 (sdebug_max_queue != sdebug_host_max_queue)) {
7341 sdebug_max_queue = sdebug_host_max_queue;
7342 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7343 sdebug_max_queue);
7344 }
7345
7346 /*
7347 * check for host managed zoned block device specified with
7348 * ptype=0x14 or zbc=XXX.
7349 */
7350 if (sdebug_ptype == TYPE_ZBC) {
7351 sdeb_zbc_model = BLK_ZONED_HM;
7352 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7353 k = sdeb_zbc_model_str(cp: sdeb_zbc_model_s);
7354 if (k < 0)
7355 return k;
7356 sdeb_zbc_model = k;
7357 switch (sdeb_zbc_model) {
7358 case BLK_ZONED_NONE:
7359 case BLK_ZONED_HA:
7360 sdebug_ptype = TYPE_DISK;
7361 break;
7362 case BLK_ZONED_HM:
7363 sdebug_ptype = TYPE_ZBC;
7364 break;
7365 default:
7366 pr_err("Invalid ZBC model\n");
7367 return -EINVAL;
7368 }
7369 }
7370 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7371 sdeb_zbc_in_use = true;
7372 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7373 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7374 }
7375
7376 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7377 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7378 if (sdebug_dev_size_mb < 1)
7379 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7380 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7381 sdebug_store_sectors = sz / sdebug_sector_size;
7382 sdebug_capacity = get_sdebug_capacity();
7383
7384 /* play around with geometry, don't waste too much on track 0 */
7385 sdebug_heads = 8;
7386 sdebug_sectors_per = 32;
7387 if (sdebug_dev_size_mb >= 256)
7388 sdebug_heads = 64;
7389 else if (sdebug_dev_size_mb >= 16)
7390 sdebug_heads = 32;
7391 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7392 (sdebug_sectors_per * sdebug_heads);
7393 if (sdebug_cylinders_per >= 1024) {
7394 /* other LLDs do this; implies >= 1GB ram disk ... */
7395 sdebug_heads = 255;
7396 sdebug_sectors_per = 63;
7397 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7398 (sdebug_sectors_per * sdebug_heads);
7399 }
7400 if (scsi_debug_lbp()) {
7401 sdebug_unmap_max_blocks =
7402 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7403
7404 sdebug_unmap_max_desc =
7405 clamp(sdebug_unmap_max_desc, 0U, 256U);
7406
7407 sdebug_unmap_granularity =
7408 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7409
7410 if (sdebug_unmap_alignment &&
7411 sdebug_unmap_granularity <=
7412 sdebug_unmap_alignment) {
7413 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7414 return -EINVAL;
7415 }
7416 }
7417 xa_init_flags(xa: per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7418 if (want_store) {
7419 idx = sdebug_add_store();
7420 if (idx < 0)
7421 return idx;
7422 }
7423
7424 pseudo_primary = root_device_register("pseudo_0");
7425 if (IS_ERR(ptr: pseudo_primary)) {
7426 pr_warn("root_device_register() error\n");
7427 ret = PTR_ERR(ptr: pseudo_primary);
7428 goto free_vm;
7429 }
7430 ret = bus_register(bus: &pseudo_lld_bus);
7431 if (ret < 0) {
7432 pr_warn("bus_register error: %d\n", ret);
7433 goto dev_unreg;
7434 }
7435 ret = driver_register(drv: &sdebug_driverfs_driver);
7436 if (ret < 0) {
7437 pr_warn("driver_register error: %d\n", ret);
7438 goto bus_unreg;
7439 }
7440
7441 hosts_to_add = sdebug_add_host;
7442 sdebug_add_host = 0;
7443
7444 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7445 if (!queued_cmd_cache) {
7446 ret = -ENOMEM;
7447 goto driver_unreg;
7448 }
7449
7450 sdebug_debugfs_root = debugfs_create_dir(name: "scsi_debug", NULL);
7451 if (IS_ERR_OR_NULL(ptr: sdebug_debugfs_root))
7452 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7453
7454 for (k = 0; k < hosts_to_add; k++) {
7455 if (want_store && k == 0) {
7456 ret = sdebug_add_host_helper(per_host_idx: idx);
7457 if (ret < 0) {
7458 pr_err("add_host_helper k=%d, error=%d\n",
7459 k, -ret);
7460 break;
7461 }
7462 } else {
7463 ret = sdebug_do_add_host(mk_new_store: want_store &&
7464 sdebug_per_host_store);
7465 if (ret < 0) {
7466 pr_err("add_host k=%d error=%d\n", k, -ret);
7467 break;
7468 }
7469 }
7470 }
7471 if (sdebug_verbose)
7472 pr_info("built %d host(s)\n", sdebug_num_hosts);
7473
7474 return 0;
7475
7476driver_unreg:
7477 driver_unregister(drv: &sdebug_driverfs_driver);
7478bus_unreg:
7479 bus_unregister(bus: &pseudo_lld_bus);
7480dev_unreg:
7481 root_device_unregister(root: pseudo_primary);
7482free_vm:
7483 sdebug_erase_store(idx, NULL);
7484 return ret;
7485}
7486
7487static void __exit scsi_debug_exit(void)
7488{
7489 int k = sdebug_num_hosts;
7490
7491 for (; k; k--)
7492 sdebug_do_remove_host(the_end: true);
7493 kmem_cache_destroy(s: queued_cmd_cache);
7494 driver_unregister(drv: &sdebug_driverfs_driver);
7495 bus_unregister(bus: &pseudo_lld_bus);
7496 root_device_unregister(root: pseudo_primary);
7497
7498 sdebug_erase_all_stores(apart_from_first: false);
7499 xa_destroy(per_store_ap);
7500 debugfs_remove(dentry: sdebug_debugfs_root);
7501}
7502
7503device_initcall(scsi_debug_init);
7504module_exit(scsi_debug_exit);
7505
7506static void sdebug_release_adapter(struct device *dev)
7507{
7508 struct sdebug_host_info *sdbg_host;
7509
7510 sdbg_host = dev_to_sdebug_host(dev);
7511 kfree(objp: sdbg_host);
7512}
7513
7514/* idx must be valid, if sip is NULL then it will be obtained using idx */
7515static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7516{
7517 if (idx < 0)
7518 return;
7519 if (!sip) {
7520 if (xa_empty(xa: per_store_ap))
7521 return;
7522 sip = xa_load(per_store_ap, index: idx);
7523 if (!sip)
7524 return;
7525 }
7526 vfree(addr: sip->map_storep);
7527 vfree(addr: sip->dif_storep);
7528 vfree(addr: sip->storep);
7529 xa_erase(per_store_ap, index: idx);
7530 kfree(objp: sip);
7531}
7532
7533/* Assume apart_from_first==false only in shutdown case. */
7534static void sdebug_erase_all_stores(bool apart_from_first)
7535{
7536 unsigned long idx;
7537 struct sdeb_store_info *sip = NULL;
7538
7539 xa_for_each(per_store_ap, idx, sip) {
7540 if (apart_from_first)
7541 apart_from_first = false;
7542 else
7543 sdebug_erase_store(idx, sip);
7544 }
7545 if (apart_from_first)
7546 sdeb_most_recent_idx = sdeb_first_idx;
7547}
7548
7549/*
7550 * Returns store xarray new element index (idx) if >=0 else negated errno.
7551 * Limit the number of stores to 65536.
7552 */
7553static int sdebug_add_store(void)
7554{
7555 int res;
7556 u32 n_idx;
7557 unsigned long iflags;
7558 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7559 struct sdeb_store_info *sip = NULL;
7560 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7561
7562 sip = kzalloc(size: sizeof(*sip), GFP_KERNEL);
7563 if (!sip)
7564 return -ENOMEM;
7565
7566 xa_lock_irqsave(per_store_ap, iflags);
7567 res = __xa_alloc(per_store_ap, id: &n_idx, entry: sip, xal, GFP_ATOMIC);
7568 if (unlikely(res < 0)) {
7569 xa_unlock_irqrestore(per_store_ap, iflags);
7570 kfree(objp: sip);
7571 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7572 return res;
7573 }
7574 sdeb_most_recent_idx = n_idx;
7575 if (sdeb_first_idx < 0)
7576 sdeb_first_idx = n_idx;
7577 xa_unlock_irqrestore(per_store_ap, iflags);
7578
7579 res = -ENOMEM;
7580 sip->storep = vzalloc(size: sz);
7581 if (!sip->storep) {
7582 pr_err("user data oom\n");
7583 goto err;
7584 }
7585 if (sdebug_num_parts > 0)
7586 sdebug_build_parts(ramp: sip->storep, store_size: sz);
7587
7588 /* DIF/DIX: what T10 calls Protection Information (PI) */
7589 if (sdebug_dix) {
7590 int dif_size;
7591
7592 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7593 sip->dif_storep = vmalloc(size: dif_size);
7594
7595 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7596 sip->dif_storep);
7597
7598 if (!sip->dif_storep) {
7599 pr_err("DIX oom\n");
7600 goto err;
7601 }
7602 memset(sip->dif_storep, 0xff, dif_size);
7603 }
7604 /* Logical Block Provisioning */
7605 if (scsi_debug_lbp()) {
7606 map_size = lba_to_map_index(lba: sdebug_store_sectors - 1) + 1;
7607 sip->map_storep = vmalloc(array_size(sizeof(long),
7608 BITS_TO_LONGS(map_size)));
7609
7610 pr_info("%lu provisioning blocks\n", map_size);
7611
7612 if (!sip->map_storep) {
7613 pr_err("LBP map oom\n");
7614 goto err;
7615 }
7616
7617 bitmap_zero(dst: sip->map_storep, nbits: map_size);
7618
7619 /* Map first 1KB for partition table */
7620 if (sdebug_num_parts)
7621 map_region(sip, lba: 0, len: 2);
7622 }
7623
7624 rwlock_init(&sip->macc_lck);
7625 return (int)n_idx;
7626err:
7627 sdebug_erase_store(idx: (int)n_idx, sip);
7628 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7629 return res;
7630}
7631
7632static int sdebug_add_host_helper(int per_host_idx)
7633{
7634 int k, devs_per_host, idx;
7635 int error = -ENOMEM;
7636 struct sdebug_host_info *sdbg_host;
7637 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7638
7639 sdbg_host = kzalloc(size: sizeof(*sdbg_host), GFP_KERNEL);
7640 if (!sdbg_host)
7641 return -ENOMEM;
7642 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7643 if (xa_get_mark(per_store_ap, index: idx, SDEB_XA_NOT_IN_USE))
7644 xa_clear_mark(per_store_ap, index: idx, SDEB_XA_NOT_IN_USE);
7645 sdbg_host->si_idx = idx;
7646
7647 INIT_LIST_HEAD(list: &sdbg_host->dev_info_list);
7648
7649 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7650 for (k = 0; k < devs_per_host; k++) {
7651 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7652 if (!sdbg_devinfo)
7653 goto clean;
7654 }
7655
7656 mutex_lock(&sdebug_host_list_mutex);
7657 list_add_tail(new: &sdbg_host->host_list, head: &sdebug_host_list);
7658 mutex_unlock(lock: &sdebug_host_list_mutex);
7659
7660 sdbg_host->dev.bus = &pseudo_lld_bus;
7661 sdbg_host->dev.parent = pseudo_primary;
7662 sdbg_host->dev.release = &sdebug_release_adapter;
7663 dev_set_name(dev: &sdbg_host->dev, name: "adapter%d", sdebug_num_hosts);
7664
7665 error = device_register(dev: &sdbg_host->dev);
7666 if (error) {
7667 mutex_lock(&sdebug_host_list_mutex);
7668 list_del(entry: &sdbg_host->host_list);
7669 mutex_unlock(lock: &sdebug_host_list_mutex);
7670 goto clean;
7671 }
7672
7673 ++sdebug_num_hosts;
7674 return 0;
7675
7676clean:
7677 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7678 dev_list) {
7679 list_del(entry: &sdbg_devinfo->dev_list);
7680 kfree(objp: sdbg_devinfo->zstate);
7681 kfree(objp: sdbg_devinfo);
7682 }
7683 if (sdbg_host->dev.release)
7684 put_device(dev: &sdbg_host->dev);
7685 else
7686 kfree(objp: sdbg_host);
7687 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7688 return error;
7689}
7690
7691static int sdebug_do_add_host(bool mk_new_store)
7692{
7693 int ph_idx = sdeb_most_recent_idx;
7694
7695 if (mk_new_store) {
7696 ph_idx = sdebug_add_store();
7697 if (ph_idx < 0)
7698 return ph_idx;
7699 }
7700 return sdebug_add_host_helper(per_host_idx: ph_idx);
7701}
7702
7703static void sdebug_do_remove_host(bool the_end)
7704{
7705 int idx = -1;
7706 struct sdebug_host_info *sdbg_host = NULL;
7707 struct sdebug_host_info *sdbg_host2;
7708
7709 mutex_lock(&sdebug_host_list_mutex);
7710 if (!list_empty(head: &sdebug_host_list)) {
7711 sdbg_host = list_entry(sdebug_host_list.prev,
7712 struct sdebug_host_info, host_list);
7713 idx = sdbg_host->si_idx;
7714 }
7715 if (!the_end && idx >= 0) {
7716 bool unique = true;
7717
7718 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7719 if (sdbg_host2 == sdbg_host)
7720 continue;
7721 if (idx == sdbg_host2->si_idx) {
7722 unique = false;
7723 break;
7724 }
7725 }
7726 if (unique) {
7727 xa_set_mark(per_store_ap, index: idx, SDEB_XA_NOT_IN_USE);
7728 if (idx == sdeb_most_recent_idx)
7729 --sdeb_most_recent_idx;
7730 }
7731 }
7732 if (sdbg_host)
7733 list_del(entry: &sdbg_host->host_list);
7734 mutex_unlock(lock: &sdebug_host_list_mutex);
7735
7736 if (!sdbg_host)
7737 return;
7738
7739 device_unregister(dev: &sdbg_host->dev);
7740 --sdebug_num_hosts;
7741}
7742
7743static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7744{
7745 struct sdebug_dev_info *devip = sdev->hostdata;
7746
7747 if (!devip)
7748 return -ENODEV;
7749
7750 mutex_lock(&sdebug_host_list_mutex);
7751 block_unblock_all_queues(block: true);
7752
7753 if (qdepth > SDEBUG_CANQUEUE) {
7754 qdepth = SDEBUG_CANQUEUE;
7755 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7756 qdepth, SDEBUG_CANQUEUE);
7757 }
7758 if (qdepth < 1)
7759 qdepth = 1;
7760 if (qdepth != sdev->queue_depth)
7761 scsi_change_queue_depth(sdev, qdepth);
7762
7763 block_unblock_all_queues(block: false);
7764 mutex_unlock(lock: &sdebug_host_list_mutex);
7765
7766 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7767 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7768
7769 return sdev->queue_depth;
7770}
7771
7772static bool fake_timeout(struct scsi_cmnd *scp)
7773{
7774 if (0 == (atomic_read(v: &sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7775 if (sdebug_every_nth < -1)
7776 sdebug_every_nth = -1;
7777 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7778 return true; /* ignore command causing timeout */
7779 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7780 scsi_medium_access_command(scmd: scp))
7781 return true; /* time out reads and writes */
7782 }
7783 return false;
7784}
7785
7786/* Response to TUR or media access command when device stopped */
7787static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7788{
7789 int stopped_state;
7790 u64 diff_ns = 0;
7791 ktime_t now_ts = ktime_get_boottime();
7792 struct scsi_device *sdp = scp->device;
7793
7794 stopped_state = atomic_read(v: &devip->stopped);
7795 if (stopped_state == 2) {
7796 if (ktime_to_ns(kt: now_ts) > ktime_to_ns(kt: devip->create_ts)) {
7797 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7798 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7799 /* tur_ms_to_ready timer extinguished */
7800 atomic_set(v: &devip->stopped, i: 0);
7801 return 0;
7802 }
7803 }
7804 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, asq: 0x1);
7805 if (sdebug_verbose)
7806 sdev_printk(KERN_INFO, sdp,
7807 "%s: Not ready: in process of becoming ready\n", my_name);
7808 if (scp->cmnd[0] == TEST_UNIT_READY) {
7809 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7810
7811 if (diff_ns <= tur_nanosecs_to_ready)
7812 diff_ns = tur_nanosecs_to_ready - diff_ns;
7813 else
7814 diff_ns = tur_nanosecs_to_ready;
7815 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7816 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7817 scsi_set_sense_information(buf: scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7818 info: diff_ns);
7819 return check_condition_result;
7820 }
7821 }
7822 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, asq: 0x2);
7823 if (sdebug_verbose)
7824 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7825 my_name);
7826 return check_condition_result;
7827}
7828
7829static void sdebug_map_queues(struct Scsi_Host *shost)
7830{
7831 int i, qoff;
7832
7833 if (shost->nr_hw_queues == 1)
7834 return;
7835
7836 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7837 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7838
7839 map->nr_queues = 0;
7840
7841 if (i == HCTX_TYPE_DEFAULT)
7842 map->nr_queues = submit_queues - poll_queues;
7843 else if (i == HCTX_TYPE_POLL)
7844 map->nr_queues = poll_queues;
7845
7846 if (!map->nr_queues) {
7847 BUG_ON(i == HCTX_TYPE_DEFAULT);
7848 continue;
7849 }
7850
7851 map->queue_offset = qoff;
7852 blk_mq_map_queues(qmap: map);
7853
7854 qoff += map->nr_queues;
7855 }
7856}
7857
7858struct sdebug_blk_mq_poll_data {
7859 unsigned int queue_num;
7860 int *num_entries;
7861};
7862
7863/*
7864 * We don't handle aborted commands here, but it does not seem possible to have
7865 * aborted polled commands from schedule_resp()
7866 */
7867static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7868{
7869 struct sdebug_blk_mq_poll_data *data = opaque;
7870 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7871 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7872 struct sdebug_defer *sd_dp;
7873 u32 unique_tag = blk_mq_unique_tag(rq);
7874 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7875 struct sdebug_queued_cmd *sqcp;
7876 unsigned long flags;
7877 int queue_num = data->queue_num;
7878 ktime_t time;
7879
7880 /* We're only interested in one queue for this iteration */
7881 if (hwq != queue_num)
7882 return true;
7883
7884 /* Subsequent checks would fail if this failed, but check anyway */
7885 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7886 return true;
7887
7888 time = ktime_get_boottime();
7889
7890 spin_lock_irqsave(&sdsc->lock, flags);
7891 sqcp = TO_QUEUED_CMD(cmd);
7892 if (!sqcp) {
7893 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
7894 return true;
7895 }
7896
7897 sd_dp = &sqcp->sd_dp;
7898 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7899 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
7900 return true;
7901 }
7902
7903 if (time < sd_dp->cmpl_ts) {
7904 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
7905 return true;
7906 }
7907
7908 ASSIGN_QUEUED_CMD(cmd, NULL);
7909 spin_unlock_irqrestore(lock: &sdsc->lock, flags);
7910
7911 if (sdebug_statistics) {
7912 atomic_inc(v: &sdebug_completions);
7913 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7914 atomic_inc(v: &sdebug_miss_cpus);
7915 }
7916
7917 sdebug_free_queued_cmd(sqcp);
7918
7919 scsi_done(cmd); /* callback to mid level */
7920 (*data->num_entries)++;
7921 return true;
7922}
7923
7924static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7925{
7926 int num_entries = 0;
7927 struct sdebug_blk_mq_poll_data data = {
7928 .queue_num = queue_num,
7929 .num_entries = &num_entries,
7930 };
7931
7932 blk_mq_tagset_busy_iter(tagset: &shost->tag_set, fn: sdebug_blk_mq_poll_iter,
7933 priv: &data);
7934
7935 if (num_entries > 0)
7936 atomic_add(i: num_entries, v: &sdeb_mq_poll_count);
7937 return num_entries;
7938}
7939
7940static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
7941{
7942 struct scsi_device *sdp = cmnd->device;
7943 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7944 struct sdebug_err_inject *err;
7945 unsigned char *cmd = cmnd->cmnd;
7946 int ret = 0;
7947
7948 if (devip == NULL)
7949 return 0;
7950
7951 rcu_read_lock();
7952 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7953 if (err->type == ERR_TMOUT_CMD &&
7954 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7955 ret = !!err->cnt;
7956 if (err->cnt < 0)
7957 err->cnt++;
7958
7959 rcu_read_unlock();
7960 return ret;
7961 }
7962 }
7963 rcu_read_unlock();
7964
7965 return 0;
7966}
7967
7968static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
7969{
7970 struct scsi_device *sdp = cmnd->device;
7971 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7972 struct sdebug_err_inject *err;
7973 unsigned char *cmd = cmnd->cmnd;
7974 int ret = 0;
7975
7976 if (devip == NULL)
7977 return 0;
7978
7979 rcu_read_lock();
7980 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7981 if (err->type == ERR_FAIL_QUEUE_CMD &&
7982 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7983 ret = err->cnt ? err->queuecmd_ret : 0;
7984 if (err->cnt < 0)
7985 err->cnt++;
7986
7987 rcu_read_unlock();
7988 return ret;
7989 }
7990 }
7991 rcu_read_unlock();
7992
7993 return 0;
7994}
7995
7996static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
7997 struct sdebug_err_inject *info)
7998{
7999 struct scsi_device *sdp = cmnd->device;
8000 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8001 struct sdebug_err_inject *err;
8002 unsigned char *cmd = cmnd->cmnd;
8003 int ret = 0;
8004 int result;
8005
8006 if (devip == NULL)
8007 return 0;
8008
8009 rcu_read_lock();
8010 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8011 if (err->type == ERR_FAIL_CMD &&
8012 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8013 if (!err->cnt) {
8014 rcu_read_unlock();
8015 return 0;
8016 }
8017
8018 ret = !!err->cnt;
8019 rcu_read_unlock();
8020 goto out_handle;
8021 }
8022 }
8023 rcu_read_unlock();
8024
8025 return 0;
8026
8027out_handle:
8028 if (err->cnt < 0)
8029 err->cnt++;
8030 mk_sense_buffer(scp: cmnd, key: err->sense_key, asc: err->asc, asq: err->asq);
8031 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8032 *info = *err;
8033 *retval = schedule_resp(cmnd, devip, scsi_result: result, NULL, delta_jiff: 0, ndelay: 0);
8034
8035 return ret;
8036}
8037
8038static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8039 struct scsi_cmnd *scp)
8040{
8041 u8 sdeb_i;
8042 struct scsi_device *sdp = scp->device;
8043 const struct opcode_info_t *oip;
8044 const struct opcode_info_t *r_oip;
8045 struct sdebug_dev_info *devip;
8046 u8 *cmd = scp->cmnd;
8047 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8048 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8049 int k, na;
8050 int errsts = 0;
8051 u64 lun_index = sdp->lun & 0x3FFF;
8052 u32 flags;
8053 u16 sa;
8054 u8 opcode = cmd[0];
8055 bool has_wlun_rl;
8056 bool inject_now;
8057 int ret = 0;
8058 struct sdebug_err_inject err;
8059
8060 scsi_set_resid(cmd: scp, resid: 0);
8061 if (sdebug_statistics) {
8062 atomic_inc(v: &sdebug_cmnd_count);
8063 inject_now = inject_on_this_cmd();
8064 } else {
8065 inject_now = false;
8066 }
8067 if (unlikely(sdebug_verbose &&
8068 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8069 char b[120];
8070 int n, len, sb;
8071
8072 len = scp->cmd_len;
8073 sb = (int)sizeof(b);
8074 if (len > 32)
8075 strcpy(p: b, q: "too long, over 32 bytes");
8076 else {
8077 for (k = 0, n = 0; k < len && n < sb; ++k)
8078 n += scnprintf(buf: b + n, size: sb - n, fmt: "%02x ",
8079 (u32)cmd[k]);
8080 }
8081 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8082 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8083 }
8084 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8085 return SCSI_MLQUEUE_HOST_BUSY;
8086 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8087 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8088 goto err_out;
8089
8090 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
8091 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
8092 devip = (struct sdebug_dev_info *)sdp->hostdata;
8093 if (unlikely(!devip)) {
8094 devip = find_build_dev_info(sdev: sdp);
8095 if (NULL == devip)
8096 goto err_out;
8097 }
8098
8099 if (sdebug_timeout_cmd(cmnd: scp)) {
8100 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8101 return 0;
8102 }
8103
8104 ret = sdebug_fail_queue_cmd(cmnd: scp);
8105 if (ret) {
8106 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8107 opcode, ret);
8108 return ret;
8109 }
8110
8111 if (sdebug_fail_cmd(cmnd: scp, retval: &ret, info: &err)) {
8112 scmd_printk(KERN_INFO, scp,
8113 "fail command 0x%x with hostbyte=0x%x, "
8114 "driverbyte=0x%x, statusbyte=0x%x, "
8115 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8116 opcode, err.host_byte, err.driver_byte,
8117 err.status_byte, err.sense_key, err.asc, err.asq);
8118 return ret;
8119 }
8120
8121 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8122 atomic_set(v: &sdeb_inject_pending, i: 1);
8123
8124 na = oip->num_attached;
8125 r_pfp = oip->pfp;
8126 if (na) { /* multiple commands with this opcode */
8127 r_oip = oip;
8128 if (FF_SA & r_oip->flags) {
8129 if (F_SA_LOW & oip->flags)
8130 sa = 0x1f & cmd[1];
8131 else
8132 sa = get_unaligned_be16(p: cmd + 8);
8133 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8134 if (opcode == oip->opcode && sa == oip->sa)
8135 break;
8136 }
8137 } else { /* since no service action only check opcode */
8138 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8139 if (opcode == oip->opcode)
8140 break;
8141 }
8142 }
8143 if (k > na) {
8144 if (F_SA_LOW & r_oip->flags)
8145 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 1, in_bit: 4);
8146 else if (F_SA_HIGH & r_oip->flags)
8147 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: 8, in_bit: 7);
8148 else
8149 mk_sense_invalid_opcode(scp);
8150 goto check_cond;
8151 }
8152 } /* else (when na==0) we assume the oip is a match */
8153 flags = oip->flags;
8154 if (unlikely(F_INV_OP & flags)) {
8155 mk_sense_invalid_opcode(scp);
8156 goto check_cond;
8157 }
8158 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8159 if (sdebug_verbose)
8160 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8161 my_name, opcode, " supported for wlun");
8162 mk_sense_invalid_opcode(scp);
8163 goto check_cond;
8164 }
8165 if (unlikely(sdebug_strict)) { /* check cdb against mask */
8166 u8 rem;
8167 int j;
8168
8169 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8170 rem = ~oip->len_mask[k] & cmd[k];
8171 if (rem) {
8172 for (j = 7; j >= 0; --j, rem <<= 1) {
8173 if (0x80 & rem)
8174 break;
8175 }
8176 mk_sense_invalid_fld(scp, c_d: SDEB_IN_CDB, in_byte: k, in_bit: j);
8177 goto check_cond;
8178 }
8179 }
8180 }
8181 if (unlikely(!(F_SKIP_UA & flags) &&
8182 find_first_bit(devip->uas_bm,
8183 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8184 errsts = make_ua(scp, devip);
8185 if (errsts)
8186 goto check_cond;
8187 }
8188 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8189 atomic_read(&devip->stopped))) {
8190 errsts = resp_not_ready(scp, devip);
8191 if (errsts)
8192 goto fini;
8193 }
8194 if (sdebug_fake_rw && (F_FAKE_RW & flags))
8195 goto fini;
8196 if (unlikely(sdebug_every_nth)) {
8197 if (fake_timeout(scp))
8198 return 0; /* ignore command: make trouble */
8199 }
8200 if (likely(oip->pfp))
8201 pfp = oip->pfp; /* calls a resp_* function */
8202 else
8203 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
8204
8205fini:
8206 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
8207 return schedule_resp(cmnd: scp, devip, scsi_result: errsts, pfp, delta_jiff: 0, ndelay: 0);
8208 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8209 sdebug_ndelay > 10000)) {
8210 /*
8211 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8212 * for Start Stop Unit (SSU) want at least 1 second delay and
8213 * if sdebug_jdelay>1 want a long delay of that many seconds.
8214 * For Synchronize Cache want 1/20 of SSU's delay.
8215 */
8216 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8217 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8218
8219 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8220 return schedule_resp(cmnd: scp, devip, scsi_result: errsts, pfp, delta_jiff: jdelay, ndelay: 0);
8221 } else
8222 return schedule_resp(cmnd: scp, devip, scsi_result: errsts, pfp, delta_jiff: sdebug_jdelay,
8223 ndelay: sdebug_ndelay);
8224check_cond:
8225 return schedule_resp(cmnd: scp, devip, scsi_result: check_condition_result, NULL, delta_jiff: 0, ndelay: 0);
8226err_out:
8227 return schedule_resp(cmnd: scp, NULL, scsi_result: DID_NO_CONNECT << 16, NULL, delta_jiff: 0, ndelay: 0);
8228}
8229
8230static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8231{
8232 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8233
8234 spin_lock_init(&sdsc->lock);
8235
8236 return 0;
8237}
8238
8239static struct scsi_host_template sdebug_driver_template = {
8240 .show_info = scsi_debug_show_info,
8241 .write_info = scsi_debug_write_info,
8242 .proc_name = sdebug_proc_name,
8243 .name = "SCSI DEBUG",
8244 .info = scsi_debug_info,
8245 .slave_alloc = scsi_debug_slave_alloc,
8246 .slave_configure = scsi_debug_slave_configure,
8247 .slave_destroy = scsi_debug_slave_destroy,
8248 .ioctl = scsi_debug_ioctl,
8249 .queuecommand = scsi_debug_queuecommand,
8250 .change_queue_depth = sdebug_change_qdepth,
8251 .map_queues = sdebug_map_queues,
8252 .mq_poll = sdebug_blk_mq_poll,
8253 .eh_abort_handler = scsi_debug_abort,
8254 .eh_device_reset_handler = scsi_debug_device_reset,
8255 .eh_target_reset_handler = scsi_debug_target_reset,
8256 .eh_bus_reset_handler = scsi_debug_bus_reset,
8257 .eh_host_reset_handler = scsi_debug_host_reset,
8258 .can_queue = SDEBUG_CANQUEUE,
8259 .this_id = 7,
8260 .sg_tablesize = SG_MAX_SEGMENTS,
8261 .cmd_per_lun = DEF_CMD_PER_LUN,
8262 .max_sectors = -1U,
8263 .max_segment_size = -1U,
8264 .module = THIS_MODULE,
8265 .track_queue_depth = 1,
8266 .cmd_size = sizeof(struct sdebug_scsi_cmd),
8267 .init_cmd_priv = sdebug_init_cmd_priv,
8268 .target_alloc = sdebug_target_alloc,
8269 .target_destroy = sdebug_target_destroy,
8270};
8271
8272static int sdebug_driver_probe(struct device *dev)
8273{
8274 int error = 0;
8275 struct sdebug_host_info *sdbg_host;
8276 struct Scsi_Host *hpnt;
8277 int hprot;
8278
8279 sdbg_host = dev_to_sdebug_host(dev);
8280
8281 sdebug_driver_template.can_queue = sdebug_max_queue;
8282 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8283 if (!sdebug_clustering)
8284 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8285
8286 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8287 if (NULL == hpnt) {
8288 pr_err("scsi_host_alloc failed\n");
8289 error = -ENODEV;
8290 return error;
8291 }
8292 if (submit_queues > nr_cpu_ids) {
8293 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8294 my_name, submit_queues, nr_cpu_ids);
8295 submit_queues = nr_cpu_ids;
8296 }
8297 /*
8298 * Decide whether to tell scsi subsystem that we want mq. The
8299 * following should give the same answer for each host.
8300 */
8301 hpnt->nr_hw_queues = submit_queues;
8302 if (sdebug_host_max_queue)
8303 hpnt->host_tagset = 1;
8304
8305 /* poll queues are possible for nr_hw_queues > 1 */
8306 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8307 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8308 my_name, poll_queues, hpnt->nr_hw_queues);
8309 poll_queues = 0;
8310 }
8311
8312 /*
8313 * Poll queues don't need interrupts, but we need at least one I/O queue
8314 * left over for non-polled I/O.
8315 * If condition not met, trim poll_queues to 1 (just for simplicity).
8316 */
8317 if (poll_queues >= submit_queues) {
8318 if (submit_queues < 3)
8319 pr_warn("%s: trim poll_queues to 1\n", my_name);
8320 else
8321 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8322 my_name, submit_queues - 1);
8323 poll_queues = 1;
8324 }
8325 if (poll_queues)
8326 hpnt->nr_maps = 3;
8327
8328 sdbg_host->shost = hpnt;
8329 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8330 hpnt->max_id = sdebug_num_tgts + 1;
8331 else
8332 hpnt->max_id = sdebug_num_tgts;
8333 /* = sdebug_max_luns; */
8334 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8335
8336 hprot = 0;
8337
8338 switch (sdebug_dif) {
8339
8340 case T10_PI_TYPE1_PROTECTION:
8341 hprot = SHOST_DIF_TYPE1_PROTECTION;
8342 if (sdebug_dix)
8343 hprot |= SHOST_DIX_TYPE1_PROTECTION;
8344 break;
8345
8346 case T10_PI_TYPE2_PROTECTION:
8347 hprot = SHOST_DIF_TYPE2_PROTECTION;
8348 if (sdebug_dix)
8349 hprot |= SHOST_DIX_TYPE2_PROTECTION;
8350 break;
8351
8352 case T10_PI_TYPE3_PROTECTION:
8353 hprot = SHOST_DIF_TYPE3_PROTECTION;
8354 if (sdebug_dix)
8355 hprot |= SHOST_DIX_TYPE3_PROTECTION;
8356 break;
8357
8358 default:
8359 if (sdebug_dix)
8360 hprot |= SHOST_DIX_TYPE0_PROTECTION;
8361 break;
8362 }
8363
8364 scsi_host_set_prot(shost: hpnt, mask: hprot);
8365
8366 if (have_dif_prot || sdebug_dix)
8367 pr_info("host protection%s%s%s%s%s%s%s\n",
8368 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8369 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8370 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8371 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8372 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8373 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8374 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8375
8376 if (sdebug_guard == 1)
8377 scsi_host_set_guard(shost: hpnt, type: SHOST_DIX_GUARD_IP);
8378 else
8379 scsi_host_set_guard(shost: hpnt, type: SHOST_DIX_GUARD_CRC);
8380
8381 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8382 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8383 if (sdebug_every_nth) /* need stats counters for every_nth */
8384 sdebug_statistics = true;
8385 error = scsi_add_host(host: hpnt, dev: &sdbg_host->dev);
8386 if (error) {
8387 pr_err("scsi_add_host failed\n");
8388 error = -ENODEV;
8389 scsi_host_put(t: hpnt);
8390 } else {
8391 scsi_scan_host(hpnt);
8392 }
8393
8394 return error;
8395}
8396
8397static void sdebug_driver_remove(struct device *dev)
8398{
8399 struct sdebug_host_info *sdbg_host;
8400 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8401
8402 sdbg_host = dev_to_sdebug_host(dev);
8403
8404 scsi_remove_host(sdbg_host->shost);
8405
8406 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8407 dev_list) {
8408 list_del(entry: &sdbg_devinfo->dev_list);
8409 kfree(objp: sdbg_devinfo->zstate);
8410 kfree(objp: sdbg_devinfo);
8411 }
8412
8413 scsi_host_put(t: sdbg_host->shost);
8414}
8415
8416static struct bus_type pseudo_lld_bus = {
8417 .name = "pseudo",
8418 .probe = sdebug_driver_probe,
8419 .remove = sdebug_driver_remove,
8420 .drv_groups = sdebug_drv_groups,
8421};
8422

source code of linux/drivers/scsi/scsi_debug.c