1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * NVM Express device driver |
4 | * Copyright (c) 2011-2014, Intel Corporation. |
5 | */ |
6 | |
7 | #include <linux/blkdev.h> |
8 | #include <linux/blk-mq.h> |
9 | #include <linux/blk-integrity.h> |
10 | #include <linux/compat.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/hdreg.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/backing-dev.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/types.h> |
19 | #include <linux/pr.h> |
20 | #include <linux/ptrace.h> |
21 | #include <linux/nvme_ioctl.h> |
22 | #include <linux/pm_qos.h> |
23 | #include <asm/unaligned.h> |
24 | |
25 | #include "nvme.h" |
26 | #include "fabrics.h" |
27 | #include <linux/nvme-auth.h> |
28 | #include <linux/nvme-keyring.h> |
29 | |
30 | #define CREATE_TRACE_POINTS |
31 | #include "trace.h" |
32 | |
33 | #define NVME_MINORS (1U << MINORBITS) |
34 | |
35 | struct nvme_ns_info { |
36 | struct nvme_ns_ids ids; |
37 | u32 nsid; |
38 | __le32 anagrpid; |
39 | bool is_shared; |
40 | bool is_readonly; |
41 | bool is_ready; |
42 | bool is_removed; |
43 | }; |
44 | |
45 | unsigned int admin_timeout = 60; |
46 | module_param(admin_timeout, uint, 0644); |
47 | MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands" ); |
48 | EXPORT_SYMBOL_GPL(admin_timeout); |
49 | |
50 | unsigned int nvme_io_timeout = 30; |
51 | module_param_named(io_timeout, nvme_io_timeout, uint, 0644); |
52 | MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O" ); |
53 | EXPORT_SYMBOL_GPL(nvme_io_timeout); |
54 | |
55 | static unsigned char shutdown_timeout = 5; |
56 | module_param(shutdown_timeout, byte, 0644); |
57 | MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown" ); |
58 | |
59 | static u8 nvme_max_retries = 5; |
60 | module_param_named(max_retries, nvme_max_retries, byte, 0644); |
61 | MODULE_PARM_DESC(max_retries, "max number of retries a command may have" ); |
62 | |
63 | static unsigned long default_ps_max_latency_us = 100000; |
64 | module_param(default_ps_max_latency_us, ulong, 0644); |
65 | MODULE_PARM_DESC(default_ps_max_latency_us, |
66 | "max power saving latency for new devices; use PM QOS to change per device" ); |
67 | |
68 | static bool force_apst; |
69 | module_param(force_apst, bool, 0644); |
70 | MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off" ); |
71 | |
72 | static unsigned long apst_primary_timeout_ms = 100; |
73 | module_param(apst_primary_timeout_ms, ulong, 0644); |
74 | MODULE_PARM_DESC(apst_primary_timeout_ms, |
75 | "primary APST timeout in ms" ); |
76 | |
77 | static unsigned long apst_secondary_timeout_ms = 2000; |
78 | module_param(apst_secondary_timeout_ms, ulong, 0644); |
79 | MODULE_PARM_DESC(apst_secondary_timeout_ms, |
80 | "secondary APST timeout in ms" ); |
81 | |
82 | static unsigned long apst_primary_latency_tol_us = 15000; |
83 | module_param(apst_primary_latency_tol_us, ulong, 0644); |
84 | MODULE_PARM_DESC(apst_primary_latency_tol_us, |
85 | "primary APST latency tolerance in us" ); |
86 | |
87 | static unsigned long apst_secondary_latency_tol_us = 100000; |
88 | module_param(apst_secondary_latency_tol_us, ulong, 0644); |
89 | MODULE_PARM_DESC(apst_secondary_latency_tol_us, |
90 | "secondary APST latency tolerance in us" ); |
91 | |
92 | /* |
93 | * nvme_wq - hosts nvme related works that are not reset or delete |
94 | * nvme_reset_wq - hosts nvme reset works |
95 | * nvme_delete_wq - hosts nvme delete works |
96 | * |
97 | * nvme_wq will host works such as scan, aen handling, fw activation, |
98 | * keep-alive, periodic reconnects etc. nvme_reset_wq |
99 | * runs reset works which also flush works hosted on nvme_wq for |
100 | * serialization purposes. nvme_delete_wq host controller deletion |
101 | * works which flush reset works for serialization. |
102 | */ |
103 | struct workqueue_struct *nvme_wq; |
104 | EXPORT_SYMBOL_GPL(nvme_wq); |
105 | |
106 | struct workqueue_struct *nvme_reset_wq; |
107 | EXPORT_SYMBOL_GPL(nvme_reset_wq); |
108 | |
109 | struct workqueue_struct *nvme_delete_wq; |
110 | EXPORT_SYMBOL_GPL(nvme_delete_wq); |
111 | |
112 | static LIST_HEAD(nvme_subsystems); |
113 | static DEFINE_MUTEX(nvme_subsystems_lock); |
114 | |
115 | static DEFINE_IDA(nvme_instance_ida); |
116 | static dev_t nvme_ctrl_base_chr_devt; |
117 | static struct class *nvme_class; |
118 | static struct class *nvme_subsys_class; |
119 | |
120 | static DEFINE_IDA(nvme_ns_chr_minor_ida); |
121 | static dev_t nvme_ns_chr_devt; |
122 | static struct class *nvme_ns_chr_class; |
123 | |
124 | static void nvme_put_subsystem(struct nvme_subsystem *subsys); |
125 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, |
126 | unsigned nsid); |
127 | static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, |
128 | struct nvme_command *cmd); |
129 | |
130 | void nvme_queue_scan(struct nvme_ctrl *ctrl) |
131 | { |
132 | /* |
133 | * Only new queue scan work when admin and IO queues are both alive |
134 | */ |
135 | if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) |
136 | queue_work(wq: nvme_wq, work: &ctrl->scan_work); |
137 | } |
138 | |
139 | /* |
140 | * Use this function to proceed with scheduling reset_work for a controller |
141 | * that had previously been set to the resetting state. This is intended for |
142 | * code paths that can't be interrupted by other reset attempts. A hot removal |
143 | * may prevent this from succeeding. |
144 | */ |
145 | int nvme_try_sched_reset(struct nvme_ctrl *ctrl) |
146 | { |
147 | if (ctrl->state != NVME_CTRL_RESETTING) |
148 | return -EBUSY; |
149 | if (!queue_work(wq: nvme_reset_wq, work: &ctrl->reset_work)) |
150 | return -EBUSY; |
151 | return 0; |
152 | } |
153 | EXPORT_SYMBOL_GPL(nvme_try_sched_reset); |
154 | |
155 | static void nvme_failfast_work(struct work_struct *work) |
156 | { |
157 | struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), |
158 | struct nvme_ctrl, failfast_work); |
159 | |
160 | if (ctrl->state != NVME_CTRL_CONNECTING) |
161 | return; |
162 | |
163 | set_bit(nr: NVME_CTRL_FAILFAST_EXPIRED, addr: &ctrl->flags); |
164 | dev_info(ctrl->device, "failfast expired\n" ); |
165 | nvme_kick_requeue_lists(ctrl); |
166 | } |
167 | |
168 | static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) |
169 | { |
170 | if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) |
171 | return; |
172 | |
173 | schedule_delayed_work(dwork: &ctrl->failfast_work, |
174 | delay: ctrl->opts->fast_io_fail_tmo * HZ); |
175 | } |
176 | |
177 | static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) |
178 | { |
179 | if (!ctrl->opts) |
180 | return; |
181 | |
182 | cancel_delayed_work_sync(dwork: &ctrl->failfast_work); |
183 | clear_bit(nr: NVME_CTRL_FAILFAST_EXPIRED, addr: &ctrl->flags); |
184 | } |
185 | |
186 | |
187 | int nvme_reset_ctrl(struct nvme_ctrl *ctrl) |
188 | { |
189 | if (!nvme_change_ctrl_state(ctrl, new_state: NVME_CTRL_RESETTING)) |
190 | return -EBUSY; |
191 | if (!queue_work(wq: nvme_reset_wq, work: &ctrl->reset_work)) |
192 | return -EBUSY; |
193 | return 0; |
194 | } |
195 | EXPORT_SYMBOL_GPL(nvme_reset_ctrl); |
196 | |
197 | int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) |
198 | { |
199 | int ret; |
200 | |
201 | ret = nvme_reset_ctrl(ctrl); |
202 | if (!ret) { |
203 | flush_work(work: &ctrl->reset_work); |
204 | if (ctrl->state != NVME_CTRL_LIVE) |
205 | ret = -ENETRESET; |
206 | } |
207 | |
208 | return ret; |
209 | } |
210 | |
211 | static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) |
212 | { |
213 | dev_info(ctrl->device, |
214 | "Removing ctrl: NQN \"%s\"\n" , nvmf_ctrl_subsysnqn(ctrl)); |
215 | |
216 | flush_work(work: &ctrl->reset_work); |
217 | nvme_stop_ctrl(ctrl); |
218 | nvme_remove_namespaces(ctrl); |
219 | ctrl->ops->delete_ctrl(ctrl); |
220 | nvme_uninit_ctrl(ctrl); |
221 | } |
222 | |
223 | static void nvme_delete_ctrl_work(struct work_struct *work) |
224 | { |
225 | struct nvme_ctrl *ctrl = |
226 | container_of(work, struct nvme_ctrl, delete_work); |
227 | |
228 | nvme_do_delete_ctrl(ctrl); |
229 | } |
230 | |
231 | int nvme_delete_ctrl(struct nvme_ctrl *ctrl) |
232 | { |
233 | if (!nvme_change_ctrl_state(ctrl, new_state: NVME_CTRL_DELETING)) |
234 | return -EBUSY; |
235 | if (!queue_work(wq: nvme_delete_wq, work: &ctrl->delete_work)) |
236 | return -EBUSY; |
237 | return 0; |
238 | } |
239 | EXPORT_SYMBOL_GPL(nvme_delete_ctrl); |
240 | |
241 | void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) |
242 | { |
243 | /* |
244 | * Keep a reference until nvme_do_delete_ctrl() complete, |
245 | * since ->delete_ctrl can free the controller. |
246 | */ |
247 | nvme_get_ctrl(ctrl); |
248 | if (nvme_change_ctrl_state(ctrl, new_state: NVME_CTRL_DELETING)) |
249 | nvme_do_delete_ctrl(ctrl); |
250 | nvme_put_ctrl(ctrl); |
251 | } |
252 | |
253 | static blk_status_t nvme_error_status(u16 status) |
254 | { |
255 | switch (status & 0x7ff) { |
256 | case NVME_SC_SUCCESS: |
257 | return BLK_STS_OK; |
258 | case NVME_SC_CAP_EXCEEDED: |
259 | return BLK_STS_NOSPC; |
260 | case NVME_SC_LBA_RANGE: |
261 | case NVME_SC_CMD_INTERRUPTED: |
262 | case NVME_SC_NS_NOT_READY: |
263 | return BLK_STS_TARGET; |
264 | case NVME_SC_BAD_ATTRIBUTES: |
265 | case NVME_SC_ONCS_NOT_SUPPORTED: |
266 | case NVME_SC_INVALID_OPCODE: |
267 | case NVME_SC_INVALID_FIELD: |
268 | case NVME_SC_INVALID_NS: |
269 | return BLK_STS_NOTSUPP; |
270 | case NVME_SC_WRITE_FAULT: |
271 | case NVME_SC_READ_ERROR: |
272 | case NVME_SC_UNWRITTEN_BLOCK: |
273 | case NVME_SC_ACCESS_DENIED: |
274 | case NVME_SC_READ_ONLY: |
275 | case NVME_SC_COMPARE_FAILED: |
276 | return BLK_STS_MEDIUM; |
277 | case NVME_SC_GUARD_CHECK: |
278 | case NVME_SC_APPTAG_CHECK: |
279 | case NVME_SC_REFTAG_CHECK: |
280 | case NVME_SC_INVALID_PI: |
281 | return BLK_STS_PROTECTION; |
282 | case NVME_SC_RESERVATION_CONFLICT: |
283 | return BLK_STS_RESV_CONFLICT; |
284 | case NVME_SC_HOST_PATH_ERROR: |
285 | return BLK_STS_TRANSPORT; |
286 | case NVME_SC_ZONE_TOO_MANY_ACTIVE: |
287 | return BLK_STS_ZONE_ACTIVE_RESOURCE; |
288 | case NVME_SC_ZONE_TOO_MANY_OPEN: |
289 | return BLK_STS_ZONE_OPEN_RESOURCE; |
290 | default: |
291 | return BLK_STS_IOERR; |
292 | } |
293 | } |
294 | |
295 | static void nvme_retry_req(struct request *req) |
296 | { |
297 | unsigned long delay = 0; |
298 | u16 crd; |
299 | |
300 | /* The mask and shift result must be <= 3 */ |
301 | crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; |
302 | if (crd) |
303 | delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; |
304 | |
305 | nvme_req(req)->retries++; |
306 | blk_mq_requeue_request(rq: req, kick_requeue_list: false); |
307 | blk_mq_delay_kick_requeue_list(q: req->q, msecs: delay); |
308 | } |
309 | |
310 | static void nvme_log_error(struct request *req) |
311 | { |
312 | struct nvme_ns *ns = req->q->queuedata; |
313 | struct nvme_request *nr = nvme_req(req); |
314 | |
315 | if (ns) { |
316 | pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n" , |
317 | ns->disk ? ns->disk->disk_name : "?" , |
318 | nvme_get_opcode_str(nr->cmd->common.opcode), |
319 | nr->cmd->common.opcode, |
320 | (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), |
321 | (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, |
322 | nvme_get_error_status_str(nr->status), |
323 | nr->status >> 8 & 7, /* Status Code Type */ |
324 | nr->status & 0xff, /* Status Code */ |
325 | nr->status & NVME_SC_MORE ? "MORE " : "" , |
326 | nr->status & NVME_SC_DNR ? "DNR " : "" ); |
327 | return; |
328 | } |
329 | |
330 | pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n" , |
331 | dev_name(nr->ctrl->device), |
332 | nvme_get_admin_opcode_str(nr->cmd->common.opcode), |
333 | nr->cmd->common.opcode, |
334 | nvme_get_error_status_str(nr->status), |
335 | nr->status >> 8 & 7, /* Status Code Type */ |
336 | nr->status & 0xff, /* Status Code */ |
337 | nr->status & NVME_SC_MORE ? "MORE " : "" , |
338 | nr->status & NVME_SC_DNR ? "DNR " : "" ); |
339 | } |
340 | |
341 | enum nvme_disposition { |
342 | COMPLETE, |
343 | RETRY, |
344 | FAILOVER, |
345 | AUTHENTICATE, |
346 | }; |
347 | |
348 | static inline enum nvme_disposition nvme_decide_disposition(struct request *req) |
349 | { |
350 | if (likely(nvme_req(req)->status == 0)) |
351 | return COMPLETE; |
352 | |
353 | if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) |
354 | return AUTHENTICATE; |
355 | |
356 | if (blk_noretry_request(req) || |
357 | (nvme_req(req)->status & NVME_SC_DNR) || |
358 | nvme_req(req)->retries >= nvme_max_retries) |
359 | return COMPLETE; |
360 | |
361 | if (req->cmd_flags & REQ_NVME_MPATH) { |
362 | if (nvme_is_path_error(status: nvme_req(req)->status) || |
363 | blk_queue_dying(req->q)) |
364 | return FAILOVER; |
365 | } else { |
366 | if (blk_queue_dying(req->q)) |
367 | return COMPLETE; |
368 | } |
369 | |
370 | return RETRY; |
371 | } |
372 | |
373 | static inline void nvme_end_req_zoned(struct request *req) |
374 | { |
375 | if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && |
376 | req_op(req) == REQ_OP_ZONE_APPEND) |
377 | req->__sector = nvme_lba_to_sect(ns: req->q->queuedata, |
378 | le64_to_cpu(nvme_req(req)->result.u64)); |
379 | } |
380 | |
381 | static inline void nvme_end_req(struct request *req) |
382 | { |
383 | blk_status_t status = nvme_error_status(status: nvme_req(req)->status); |
384 | |
385 | if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) |
386 | nvme_log_error(req); |
387 | nvme_end_req_zoned(req); |
388 | nvme_trace_bio_complete(req); |
389 | if (req->cmd_flags & REQ_NVME_MPATH) |
390 | nvme_mpath_end_request(rq: req); |
391 | blk_mq_end_request(rq: req, error: status); |
392 | } |
393 | |
394 | void nvme_complete_rq(struct request *req) |
395 | { |
396 | struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; |
397 | |
398 | trace_nvme_complete_rq(req); |
399 | nvme_cleanup_cmd(req); |
400 | |
401 | /* |
402 | * Completions of long-running commands should not be able to |
403 | * defer sending of periodic keep alives, since the controller |
404 | * may have completed processing such commands a long time ago |
405 | * (arbitrarily close to command submission time). |
406 | * req->deadline - req->timeout is the command submission time |
407 | * in jiffies. |
408 | */ |
409 | if (ctrl->kas && |
410 | req->deadline - req->timeout >= ctrl->ka_last_check_time) |
411 | ctrl->comp_seen = true; |
412 | |
413 | switch (nvme_decide_disposition(req)) { |
414 | case COMPLETE: |
415 | nvme_end_req(req); |
416 | return; |
417 | case RETRY: |
418 | nvme_retry_req(req); |
419 | return; |
420 | case FAILOVER: |
421 | nvme_failover_req(req); |
422 | return; |
423 | case AUTHENTICATE: |
424 | #ifdef CONFIG_NVME_HOST_AUTH |
425 | queue_work(wq: nvme_wq, work: &ctrl->dhchap_auth_work); |
426 | nvme_retry_req(req); |
427 | #else |
428 | nvme_end_req(req); |
429 | #endif |
430 | return; |
431 | } |
432 | } |
433 | EXPORT_SYMBOL_GPL(nvme_complete_rq); |
434 | |
435 | void nvme_complete_batch_req(struct request *req) |
436 | { |
437 | trace_nvme_complete_rq(req); |
438 | nvme_cleanup_cmd(req); |
439 | nvme_end_req_zoned(req); |
440 | } |
441 | EXPORT_SYMBOL_GPL(nvme_complete_batch_req); |
442 | |
443 | /* |
444 | * Called to unwind from ->queue_rq on a failed command submission so that the |
445 | * multipathing code gets called to potentially failover to another path. |
446 | * The caller needs to unwind all transport specific resource allocations and |
447 | * must return propagate the return value. |
448 | */ |
449 | blk_status_t nvme_host_path_error(struct request *req) |
450 | { |
451 | nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; |
452 | blk_mq_set_request_complete(rq: req); |
453 | nvme_complete_rq(req); |
454 | return BLK_STS_OK; |
455 | } |
456 | EXPORT_SYMBOL_GPL(nvme_host_path_error); |
457 | |
458 | bool nvme_cancel_request(struct request *req, void *data) |
459 | { |
460 | dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, |
461 | "Cancelling I/O %d" , req->tag); |
462 | |
463 | /* don't abort one completed or idle request */ |
464 | if (blk_mq_rq_state(rq: req) != MQ_RQ_IN_FLIGHT) |
465 | return true; |
466 | |
467 | nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; |
468 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
469 | blk_mq_complete_request(rq: req); |
470 | return true; |
471 | } |
472 | EXPORT_SYMBOL_GPL(nvme_cancel_request); |
473 | |
474 | void nvme_cancel_tagset(struct nvme_ctrl *ctrl) |
475 | { |
476 | if (ctrl->tagset) { |
477 | blk_mq_tagset_busy_iter(tagset: ctrl->tagset, |
478 | fn: nvme_cancel_request, priv: ctrl); |
479 | blk_mq_tagset_wait_completed_request(tagset: ctrl->tagset); |
480 | } |
481 | } |
482 | EXPORT_SYMBOL_GPL(nvme_cancel_tagset); |
483 | |
484 | void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) |
485 | { |
486 | if (ctrl->admin_tagset) { |
487 | blk_mq_tagset_busy_iter(tagset: ctrl->admin_tagset, |
488 | fn: nvme_cancel_request, priv: ctrl); |
489 | blk_mq_tagset_wait_completed_request(tagset: ctrl->admin_tagset); |
490 | } |
491 | } |
492 | EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); |
493 | |
494 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
495 | enum nvme_ctrl_state new_state) |
496 | { |
497 | enum nvme_ctrl_state old_state; |
498 | unsigned long flags; |
499 | bool changed = false; |
500 | |
501 | spin_lock_irqsave(&ctrl->lock, flags); |
502 | |
503 | old_state = ctrl->state; |
504 | switch (new_state) { |
505 | case NVME_CTRL_LIVE: |
506 | switch (old_state) { |
507 | case NVME_CTRL_NEW: |
508 | case NVME_CTRL_RESETTING: |
509 | case NVME_CTRL_CONNECTING: |
510 | changed = true; |
511 | fallthrough; |
512 | default: |
513 | break; |
514 | } |
515 | break; |
516 | case NVME_CTRL_RESETTING: |
517 | switch (old_state) { |
518 | case NVME_CTRL_NEW: |
519 | case NVME_CTRL_LIVE: |
520 | changed = true; |
521 | fallthrough; |
522 | default: |
523 | break; |
524 | } |
525 | break; |
526 | case NVME_CTRL_CONNECTING: |
527 | switch (old_state) { |
528 | case NVME_CTRL_NEW: |
529 | case NVME_CTRL_RESETTING: |
530 | changed = true; |
531 | fallthrough; |
532 | default: |
533 | break; |
534 | } |
535 | break; |
536 | case NVME_CTRL_DELETING: |
537 | switch (old_state) { |
538 | case NVME_CTRL_LIVE: |
539 | case NVME_CTRL_RESETTING: |
540 | case NVME_CTRL_CONNECTING: |
541 | changed = true; |
542 | fallthrough; |
543 | default: |
544 | break; |
545 | } |
546 | break; |
547 | case NVME_CTRL_DELETING_NOIO: |
548 | switch (old_state) { |
549 | case NVME_CTRL_DELETING: |
550 | case NVME_CTRL_DEAD: |
551 | changed = true; |
552 | fallthrough; |
553 | default: |
554 | break; |
555 | } |
556 | break; |
557 | case NVME_CTRL_DEAD: |
558 | switch (old_state) { |
559 | case NVME_CTRL_DELETING: |
560 | changed = true; |
561 | fallthrough; |
562 | default: |
563 | break; |
564 | } |
565 | break; |
566 | default: |
567 | break; |
568 | } |
569 | |
570 | if (changed) { |
571 | ctrl->state = new_state; |
572 | wake_up_all(&ctrl->state_wq); |
573 | } |
574 | |
575 | spin_unlock_irqrestore(lock: &ctrl->lock, flags); |
576 | if (!changed) |
577 | return false; |
578 | |
579 | if (ctrl->state == NVME_CTRL_LIVE) { |
580 | if (old_state == NVME_CTRL_CONNECTING) |
581 | nvme_stop_failfast_work(ctrl); |
582 | nvme_kick_requeue_lists(ctrl); |
583 | } else if (ctrl->state == NVME_CTRL_CONNECTING && |
584 | old_state == NVME_CTRL_RESETTING) { |
585 | nvme_start_failfast_work(ctrl); |
586 | } |
587 | return changed; |
588 | } |
589 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); |
590 | |
591 | /* |
592 | * Returns true for sink states that can't ever transition back to live. |
593 | */ |
594 | static bool nvme_state_terminal(struct nvme_ctrl *ctrl) |
595 | { |
596 | switch (ctrl->state) { |
597 | case NVME_CTRL_NEW: |
598 | case NVME_CTRL_LIVE: |
599 | case NVME_CTRL_RESETTING: |
600 | case NVME_CTRL_CONNECTING: |
601 | return false; |
602 | case NVME_CTRL_DELETING: |
603 | case NVME_CTRL_DELETING_NOIO: |
604 | case NVME_CTRL_DEAD: |
605 | return true; |
606 | default: |
607 | WARN_ONCE(1, "Unhandled ctrl state:%d" , ctrl->state); |
608 | return true; |
609 | } |
610 | } |
611 | |
612 | /* |
613 | * Waits for the controller state to be resetting, or returns false if it is |
614 | * not possible to ever transition to that state. |
615 | */ |
616 | bool nvme_wait_reset(struct nvme_ctrl *ctrl) |
617 | { |
618 | wait_event(ctrl->state_wq, |
619 | nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || |
620 | nvme_state_terminal(ctrl)); |
621 | return ctrl->state == NVME_CTRL_RESETTING; |
622 | } |
623 | EXPORT_SYMBOL_GPL(nvme_wait_reset); |
624 | |
625 | static void nvme_free_ns_head(struct kref *ref) |
626 | { |
627 | struct nvme_ns_head *head = |
628 | container_of(ref, struct nvme_ns_head, ref); |
629 | |
630 | nvme_mpath_remove_disk(head); |
631 | ida_free(&head->subsys->ns_ida, id: head->instance); |
632 | cleanup_srcu_struct(ssp: &head->srcu); |
633 | nvme_put_subsystem(subsys: head->subsys); |
634 | kfree(objp: head); |
635 | } |
636 | |
637 | bool nvme_tryget_ns_head(struct nvme_ns_head *head) |
638 | { |
639 | return kref_get_unless_zero(kref: &head->ref); |
640 | } |
641 | |
642 | void nvme_put_ns_head(struct nvme_ns_head *head) |
643 | { |
644 | kref_put(kref: &head->ref, release: nvme_free_ns_head); |
645 | } |
646 | |
647 | static void nvme_free_ns(struct kref *kref) |
648 | { |
649 | struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); |
650 | |
651 | put_disk(disk: ns->disk); |
652 | nvme_put_ns_head(head: ns->head); |
653 | nvme_put_ctrl(ctrl: ns->ctrl); |
654 | kfree(objp: ns); |
655 | } |
656 | |
657 | static inline bool nvme_get_ns(struct nvme_ns *ns) |
658 | { |
659 | return kref_get_unless_zero(kref: &ns->kref); |
660 | } |
661 | |
662 | void nvme_put_ns(struct nvme_ns *ns) |
663 | { |
664 | kref_put(kref: &ns->kref, release: nvme_free_ns); |
665 | } |
666 | EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); |
667 | |
668 | static inline void nvme_clear_nvme_request(struct request *req) |
669 | { |
670 | nvme_req(req)->status = 0; |
671 | nvme_req(req)->retries = 0; |
672 | nvme_req(req)->flags = 0; |
673 | req->rq_flags |= RQF_DONTPREP; |
674 | } |
675 | |
676 | /* initialize a passthrough request */ |
677 | void nvme_init_request(struct request *req, struct nvme_command *cmd) |
678 | { |
679 | if (req->q->queuedata) |
680 | req->timeout = NVME_IO_TIMEOUT; |
681 | else /* no queuedata implies admin queue */ |
682 | req->timeout = NVME_ADMIN_TIMEOUT; |
683 | |
684 | /* passthru commands should let the driver set the SGL flags */ |
685 | cmd->common.flags &= ~NVME_CMD_SGL_ALL; |
686 | |
687 | req->cmd_flags |= REQ_FAILFAST_DRIVER; |
688 | if (req->mq_hctx->type == HCTX_TYPE_POLL) |
689 | req->cmd_flags |= REQ_POLLED; |
690 | nvme_clear_nvme_request(req); |
691 | req->rq_flags |= RQF_QUIET; |
692 | memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); |
693 | } |
694 | EXPORT_SYMBOL_GPL(nvme_init_request); |
695 | |
696 | /* |
697 | * For something we're not in a state to send to the device the default action |
698 | * is to busy it and retry it after the controller state is recovered. However, |
699 | * if the controller is deleting or if anything is marked for failfast or |
700 | * nvme multipath it is immediately failed. |
701 | * |
702 | * Note: commands used to initialize the controller will be marked for failfast. |
703 | * Note: nvme cli/ioctl commands are marked for failfast. |
704 | */ |
705 | blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, |
706 | struct request *rq) |
707 | { |
708 | if (ctrl->state != NVME_CTRL_DELETING_NOIO && |
709 | ctrl->state != NVME_CTRL_DELETING && |
710 | ctrl->state != NVME_CTRL_DEAD && |
711 | !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && |
712 | !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) |
713 | return BLK_STS_RESOURCE; |
714 | return nvme_host_path_error(rq); |
715 | } |
716 | EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); |
717 | |
718 | bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, |
719 | bool queue_live) |
720 | { |
721 | struct nvme_request *req = nvme_req(req: rq); |
722 | |
723 | /* |
724 | * currently we have a problem sending passthru commands |
725 | * on the admin_q if the controller is not LIVE because we can't |
726 | * make sure that they are going out after the admin connect, |
727 | * controller enable and/or other commands in the initialization |
728 | * sequence. until the controller will be LIVE, fail with |
729 | * BLK_STS_RESOURCE so that they will be rescheduled. |
730 | */ |
731 | if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) |
732 | return false; |
733 | |
734 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
735 | /* |
736 | * Only allow commands on a live queue, except for the connect |
737 | * command, which is require to set the queue live in the |
738 | * appropinquate states. |
739 | */ |
740 | switch (ctrl->state) { |
741 | case NVME_CTRL_CONNECTING: |
742 | if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(cmd: req->cmd) && |
743 | (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || |
744 | req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || |
745 | req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) |
746 | return true; |
747 | break; |
748 | default: |
749 | break; |
750 | case NVME_CTRL_DEAD: |
751 | return false; |
752 | } |
753 | } |
754 | |
755 | return queue_live; |
756 | } |
757 | EXPORT_SYMBOL_GPL(__nvme_check_ready); |
758 | |
759 | static inline void nvme_setup_flush(struct nvme_ns *ns, |
760 | struct nvme_command *cmnd) |
761 | { |
762 | memset(cmnd, 0, sizeof(*cmnd)); |
763 | cmnd->common.opcode = nvme_cmd_flush; |
764 | cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); |
765 | } |
766 | |
767 | static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, |
768 | struct nvme_command *cmnd) |
769 | { |
770 | unsigned short segments = blk_rq_nr_discard_segments(rq: req), n = 0; |
771 | struct nvme_dsm_range *range; |
772 | struct bio *bio; |
773 | |
774 | /* |
775 | * Some devices do not consider the DSM 'Number of Ranges' field when |
776 | * determining how much data to DMA. Always allocate memory for maximum |
777 | * number of segments to prevent device reading beyond end of buffer. |
778 | */ |
779 | static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; |
780 | |
781 | range = kzalloc(size: alloc_size, GFP_ATOMIC | __GFP_NOWARN); |
782 | if (!range) { |
783 | /* |
784 | * If we fail allocation our range, fallback to the controller |
785 | * discard page. If that's also busy, it's safe to return |
786 | * busy, as we know we can make progress once that's freed. |
787 | */ |
788 | if (test_and_set_bit_lock(nr: 0, addr: &ns->ctrl->discard_page_busy)) |
789 | return BLK_STS_RESOURCE; |
790 | |
791 | range = page_address(ns->ctrl->discard_page); |
792 | } |
793 | |
794 | if (queue_max_discard_segments(q: req->q) == 1) { |
795 | u64 slba = nvme_sect_to_lba(ns, sector: blk_rq_pos(rq: req)); |
796 | u32 nlb = blk_rq_sectors(rq: req) >> (ns->lba_shift - 9); |
797 | |
798 | range[0].cattr = cpu_to_le32(0); |
799 | range[0].nlb = cpu_to_le32(nlb); |
800 | range[0].slba = cpu_to_le64(slba); |
801 | n = 1; |
802 | } else { |
803 | __rq_for_each_bio(bio, req) { |
804 | u64 slba = nvme_sect_to_lba(ns, sector: bio->bi_iter.bi_sector); |
805 | u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; |
806 | |
807 | if (n < segments) { |
808 | range[n].cattr = cpu_to_le32(0); |
809 | range[n].nlb = cpu_to_le32(nlb); |
810 | range[n].slba = cpu_to_le64(slba); |
811 | } |
812 | n++; |
813 | } |
814 | } |
815 | |
816 | if (WARN_ON_ONCE(n != segments)) { |
817 | if (virt_to_page(range) == ns->ctrl->discard_page) |
818 | clear_bit_unlock(nr: 0, addr: &ns->ctrl->discard_page_busy); |
819 | else |
820 | kfree(objp: range); |
821 | return BLK_STS_IOERR; |
822 | } |
823 | |
824 | memset(cmnd, 0, sizeof(*cmnd)); |
825 | cmnd->dsm.opcode = nvme_cmd_dsm; |
826 | cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); |
827 | cmnd->dsm.nr = cpu_to_le32(segments - 1); |
828 | cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); |
829 | |
830 | bvec_set_virt(bv: &req->special_vec, vaddr: range, len: alloc_size); |
831 | req->rq_flags |= RQF_SPECIAL_PAYLOAD; |
832 | |
833 | return BLK_STS_OK; |
834 | } |
835 | |
836 | static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, |
837 | struct request *req) |
838 | { |
839 | u32 upper, lower; |
840 | u64 ref48; |
841 | |
842 | /* both rw and write zeroes share the same reftag format */ |
843 | switch (ns->guard_type) { |
844 | case NVME_NVM_NS_16B_GUARD: |
845 | cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); |
846 | break; |
847 | case NVME_NVM_NS_64B_GUARD: |
848 | ref48 = ext_pi_ref_tag(rq: req); |
849 | lower = lower_32_bits(ref48); |
850 | upper = upper_32_bits(ref48); |
851 | |
852 | cmnd->rw.reftag = cpu_to_le32(lower); |
853 | cmnd->rw.cdw3 = cpu_to_le32(upper); |
854 | break; |
855 | default: |
856 | break; |
857 | } |
858 | } |
859 | |
860 | static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, |
861 | struct request *req, struct nvme_command *cmnd) |
862 | { |
863 | memset(cmnd, 0, sizeof(*cmnd)); |
864 | |
865 | if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) |
866 | return nvme_setup_discard(ns, req, cmnd); |
867 | |
868 | cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; |
869 | cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); |
870 | cmnd->write_zeroes.slba = |
871 | cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); |
872 | cmnd->write_zeroes.length = |
873 | cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); |
874 | |
875 | if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) |
876 | cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); |
877 | |
878 | if (nvme_ns_has_pi(ns)) { |
879 | cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); |
880 | |
881 | switch (ns->pi_type) { |
882 | case NVME_NS_DPS_PI_TYPE1: |
883 | case NVME_NS_DPS_PI_TYPE2: |
884 | nvme_set_ref_tag(ns, cmnd, req); |
885 | break; |
886 | } |
887 | } |
888 | |
889 | return BLK_STS_OK; |
890 | } |
891 | |
892 | static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, |
893 | struct request *req, struct nvme_command *cmnd, |
894 | enum nvme_opcode op) |
895 | { |
896 | u16 control = 0; |
897 | u32 dsmgmt = 0; |
898 | |
899 | if (req->cmd_flags & REQ_FUA) |
900 | control |= NVME_RW_FUA; |
901 | if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) |
902 | control |= NVME_RW_LR; |
903 | |
904 | if (req->cmd_flags & REQ_RAHEAD) |
905 | dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; |
906 | |
907 | cmnd->rw.opcode = op; |
908 | cmnd->rw.flags = 0; |
909 | cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); |
910 | cmnd->rw.cdw2 = 0; |
911 | cmnd->rw.cdw3 = 0; |
912 | cmnd->rw.metadata = 0; |
913 | cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); |
914 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); |
915 | cmnd->rw.reftag = 0; |
916 | cmnd->rw.apptag = 0; |
917 | cmnd->rw.appmask = 0; |
918 | |
919 | if (ns->ms) { |
920 | /* |
921 | * If formated with metadata, the block layer always provides a |
922 | * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else |
923 | * we enable the PRACT bit for protection information or set the |
924 | * namespace capacity to zero to prevent any I/O. |
925 | */ |
926 | if (!blk_integrity_rq(rq: req)) { |
927 | if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) |
928 | return BLK_STS_NOTSUPP; |
929 | control |= NVME_RW_PRINFO_PRACT; |
930 | } |
931 | |
932 | switch (ns->pi_type) { |
933 | case NVME_NS_DPS_PI_TYPE3: |
934 | control |= NVME_RW_PRINFO_PRCHK_GUARD; |
935 | break; |
936 | case NVME_NS_DPS_PI_TYPE1: |
937 | case NVME_NS_DPS_PI_TYPE2: |
938 | control |= NVME_RW_PRINFO_PRCHK_GUARD | |
939 | NVME_RW_PRINFO_PRCHK_REF; |
940 | if (op == nvme_cmd_zone_append) |
941 | control |= NVME_RW_APPEND_PIREMAP; |
942 | nvme_set_ref_tag(ns, cmnd, req); |
943 | break; |
944 | } |
945 | } |
946 | |
947 | cmnd->rw.control = cpu_to_le16(control); |
948 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); |
949 | return 0; |
950 | } |
951 | |
952 | void nvme_cleanup_cmd(struct request *req) |
953 | { |
954 | if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { |
955 | struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; |
956 | |
957 | if (req->special_vec.bv_page == ctrl->discard_page) |
958 | clear_bit_unlock(nr: 0, addr: &ctrl->discard_page_busy); |
959 | else |
960 | kfree(objp: bvec_virt(bvec: &req->special_vec)); |
961 | } |
962 | } |
963 | EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); |
964 | |
965 | blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) |
966 | { |
967 | struct nvme_command *cmd = nvme_req(req)->cmd; |
968 | blk_status_t ret = BLK_STS_OK; |
969 | |
970 | if (!(req->rq_flags & RQF_DONTPREP)) |
971 | nvme_clear_nvme_request(req); |
972 | |
973 | switch (req_op(req)) { |
974 | case REQ_OP_DRV_IN: |
975 | case REQ_OP_DRV_OUT: |
976 | /* these are setup prior to execution in nvme_init_request() */ |
977 | break; |
978 | case REQ_OP_FLUSH: |
979 | nvme_setup_flush(ns, cmnd: cmd); |
980 | break; |
981 | case REQ_OP_ZONE_RESET_ALL: |
982 | case REQ_OP_ZONE_RESET: |
983 | ret = nvme_setup_zone_mgmt_send(ns, req, cmnd: cmd, action: NVME_ZONE_RESET); |
984 | break; |
985 | case REQ_OP_ZONE_OPEN: |
986 | ret = nvme_setup_zone_mgmt_send(ns, req, cmnd: cmd, action: NVME_ZONE_OPEN); |
987 | break; |
988 | case REQ_OP_ZONE_CLOSE: |
989 | ret = nvme_setup_zone_mgmt_send(ns, req, cmnd: cmd, action: NVME_ZONE_CLOSE); |
990 | break; |
991 | case REQ_OP_ZONE_FINISH: |
992 | ret = nvme_setup_zone_mgmt_send(ns, req, cmnd: cmd, action: NVME_ZONE_FINISH); |
993 | break; |
994 | case REQ_OP_WRITE_ZEROES: |
995 | ret = nvme_setup_write_zeroes(ns, req, cmnd: cmd); |
996 | break; |
997 | case REQ_OP_DISCARD: |
998 | ret = nvme_setup_discard(ns, req, cmnd: cmd); |
999 | break; |
1000 | case REQ_OP_READ: |
1001 | ret = nvme_setup_rw(ns, req, cmnd: cmd, op: nvme_cmd_read); |
1002 | break; |
1003 | case REQ_OP_WRITE: |
1004 | ret = nvme_setup_rw(ns, req, cmnd: cmd, op: nvme_cmd_write); |
1005 | break; |
1006 | case REQ_OP_ZONE_APPEND: |
1007 | ret = nvme_setup_rw(ns, req, cmnd: cmd, op: nvme_cmd_zone_append); |
1008 | break; |
1009 | default: |
1010 | WARN_ON_ONCE(1); |
1011 | return BLK_STS_IOERR; |
1012 | } |
1013 | |
1014 | cmd->common.command_id = nvme_cid(rq: req); |
1015 | trace_nvme_setup_cmd(req, cmd); |
1016 | return ret; |
1017 | } |
1018 | EXPORT_SYMBOL_GPL(nvme_setup_cmd); |
1019 | |
1020 | /* |
1021 | * Return values: |
1022 | * 0: success |
1023 | * >0: nvme controller's cqe status response |
1024 | * <0: kernel error in lieu of controller response |
1025 | */ |
1026 | int nvme_execute_rq(struct request *rq, bool at_head) |
1027 | { |
1028 | blk_status_t status; |
1029 | |
1030 | status = blk_execute_rq(rq, at_head); |
1031 | if (nvme_req(req: rq)->flags & NVME_REQ_CANCELLED) |
1032 | return -EINTR; |
1033 | if (nvme_req(req: rq)->status) |
1034 | return nvme_req(req: rq)->status; |
1035 | return blk_status_to_errno(status); |
1036 | } |
1037 | EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); |
1038 | |
1039 | /* |
1040 | * Returns 0 on success. If the result is negative, it's a Linux error code; |
1041 | * if the result is positive, it's an NVM Express status code |
1042 | */ |
1043 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
1044 | union nvme_result *result, void *buffer, unsigned bufflen, |
1045 | int qid, int at_head, blk_mq_req_flags_t flags) |
1046 | { |
1047 | struct request *req; |
1048 | int ret; |
1049 | |
1050 | if (qid == NVME_QID_ANY) |
1051 | req = blk_mq_alloc_request(q, opf: nvme_req_op(cmd), flags); |
1052 | else |
1053 | req = blk_mq_alloc_request_hctx(q, opf: nvme_req_op(cmd), flags, |
1054 | hctx_idx: qid - 1); |
1055 | |
1056 | if (IS_ERR(ptr: req)) |
1057 | return PTR_ERR(ptr: req); |
1058 | nvme_init_request(req, cmd); |
1059 | |
1060 | if (buffer && bufflen) { |
1061 | ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); |
1062 | if (ret) |
1063 | goto out; |
1064 | } |
1065 | |
1066 | ret = nvme_execute_rq(req, at_head); |
1067 | if (result && ret >= 0) |
1068 | *result = nvme_req(req)->result; |
1069 | out: |
1070 | blk_mq_free_request(rq: req); |
1071 | return ret; |
1072 | } |
1073 | EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); |
1074 | |
1075 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
1076 | void *buffer, unsigned bufflen) |
1077 | { |
1078 | return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, |
1079 | NVME_QID_ANY, 0, 0); |
1080 | } |
1081 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); |
1082 | |
1083 | u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) |
1084 | { |
1085 | u32 effects = 0; |
1086 | |
1087 | if (ns) { |
1088 | effects = le32_to_cpu(ns->head->effects->iocs[opcode]); |
1089 | if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) |
1090 | dev_warn_once(ctrl->device, |
1091 | "IO command:%02x has unusual effects:%08x\n" , |
1092 | opcode, effects); |
1093 | |
1094 | /* |
1095 | * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, |
1096 | * which would deadlock when done on an I/O command. Note that |
1097 | * We already warn about an unusual effect above. |
1098 | */ |
1099 | effects &= ~NVME_CMD_EFFECTS_CSE_MASK; |
1100 | } else { |
1101 | effects = le32_to_cpu(ctrl->effects->acs[opcode]); |
1102 | } |
1103 | |
1104 | return effects; |
1105 | } |
1106 | EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); |
1107 | |
1108 | u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) |
1109 | { |
1110 | u32 effects = nvme_command_effects(ctrl, ns, opcode); |
1111 | |
1112 | /* |
1113 | * For simplicity, IO to all namespaces is quiesced even if the command |
1114 | * effects say only one namespace is affected. |
1115 | */ |
1116 | if (effects & NVME_CMD_EFFECTS_CSE_MASK) { |
1117 | mutex_lock(&ctrl->scan_lock); |
1118 | mutex_lock(&ctrl->subsys->lock); |
1119 | nvme_mpath_start_freeze(subsys: ctrl->subsys); |
1120 | nvme_mpath_wait_freeze(subsys: ctrl->subsys); |
1121 | nvme_start_freeze(ctrl); |
1122 | nvme_wait_freeze(ctrl); |
1123 | } |
1124 | return effects; |
1125 | } |
1126 | EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); |
1127 | |
1128 | void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, |
1129 | struct nvme_command *cmd, int status) |
1130 | { |
1131 | if (effects & NVME_CMD_EFFECTS_CSE_MASK) { |
1132 | nvme_unfreeze(ctrl); |
1133 | nvme_mpath_unfreeze(subsys: ctrl->subsys); |
1134 | mutex_unlock(lock: &ctrl->subsys->lock); |
1135 | mutex_unlock(lock: &ctrl->scan_lock); |
1136 | } |
1137 | if (effects & NVME_CMD_EFFECTS_CCC) { |
1138 | if (!test_and_set_bit(nr: NVME_CTRL_DIRTY_CAPABILITY, |
1139 | addr: &ctrl->flags)) { |
1140 | dev_info(ctrl->device, |
1141 | "controller capabilities changed, reset may be required to take effect.\n" ); |
1142 | } |
1143 | } |
1144 | if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { |
1145 | nvme_queue_scan(ctrl); |
1146 | flush_work(work: &ctrl->scan_work); |
1147 | } |
1148 | if (ns) |
1149 | return; |
1150 | |
1151 | switch (cmd->common.opcode) { |
1152 | case nvme_admin_set_features: |
1153 | switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { |
1154 | case NVME_FEAT_KATO: |
1155 | /* |
1156 | * Keep alive commands interval on the host should be |
1157 | * updated when KATO is modified by Set Features |
1158 | * commands. |
1159 | */ |
1160 | if (!status) |
1161 | nvme_update_keep_alive(ctrl, cmd); |
1162 | break; |
1163 | default: |
1164 | break; |
1165 | } |
1166 | break; |
1167 | default: |
1168 | break; |
1169 | } |
1170 | } |
1171 | EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); |
1172 | |
1173 | /* |
1174 | * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: |
1175 | * |
1176 | * The host should send Keep Alive commands at half of the Keep Alive Timeout |
1177 | * accounting for transport roundtrip times [..]. |
1178 | */ |
1179 | static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) |
1180 | { |
1181 | unsigned long delay = ctrl->kato * HZ / 2; |
1182 | |
1183 | /* |
1184 | * When using Traffic Based Keep Alive, we need to run |
1185 | * nvme_keep_alive_work at twice the normal frequency, as one |
1186 | * command completion can postpone sending a keep alive command |
1187 | * by up to twice the delay between runs. |
1188 | */ |
1189 | if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) |
1190 | delay /= 2; |
1191 | return delay; |
1192 | } |
1193 | |
1194 | static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) |
1195 | { |
1196 | queue_delayed_work(wq: nvme_wq, dwork: &ctrl->ka_work, |
1197 | delay: nvme_keep_alive_work_period(ctrl)); |
1198 | } |
1199 | |
1200 | static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, |
1201 | blk_status_t status) |
1202 | { |
1203 | struct nvme_ctrl *ctrl = rq->end_io_data; |
1204 | unsigned long flags; |
1205 | bool startka = false; |
1206 | unsigned long rtt = jiffies - (rq->deadline - rq->timeout); |
1207 | unsigned long delay = nvme_keep_alive_work_period(ctrl); |
1208 | |
1209 | /* |
1210 | * Subtract off the keepalive RTT so nvme_keep_alive_work runs |
1211 | * at the desired frequency. |
1212 | */ |
1213 | if (rtt <= delay) { |
1214 | delay -= rtt; |
1215 | } else { |
1216 | dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n" , |
1217 | jiffies_to_msecs(rtt)); |
1218 | delay = 0; |
1219 | } |
1220 | |
1221 | blk_mq_free_request(rq); |
1222 | |
1223 | if (status) { |
1224 | dev_err(ctrl->device, |
1225 | "failed nvme_keep_alive_end_io error=%d\n" , |
1226 | status); |
1227 | return RQ_END_IO_NONE; |
1228 | } |
1229 | |
1230 | ctrl->ka_last_check_time = jiffies; |
1231 | ctrl->comp_seen = false; |
1232 | spin_lock_irqsave(&ctrl->lock, flags); |
1233 | if (ctrl->state == NVME_CTRL_LIVE || |
1234 | ctrl->state == NVME_CTRL_CONNECTING) |
1235 | startka = true; |
1236 | spin_unlock_irqrestore(lock: &ctrl->lock, flags); |
1237 | if (startka) |
1238 | queue_delayed_work(wq: nvme_wq, dwork: &ctrl->ka_work, delay); |
1239 | return RQ_END_IO_NONE; |
1240 | } |
1241 | |
1242 | static void nvme_keep_alive_work(struct work_struct *work) |
1243 | { |
1244 | struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), |
1245 | struct nvme_ctrl, ka_work); |
1246 | bool comp_seen = ctrl->comp_seen; |
1247 | struct request *rq; |
1248 | |
1249 | ctrl->ka_last_check_time = jiffies; |
1250 | |
1251 | if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { |
1252 | dev_dbg(ctrl->device, |
1253 | "reschedule traffic based keep-alive timer\n" ); |
1254 | ctrl->comp_seen = false; |
1255 | nvme_queue_keep_alive_work(ctrl); |
1256 | return; |
1257 | } |
1258 | |
1259 | rq = blk_mq_alloc_request(q: ctrl->admin_q, opf: nvme_req_op(cmd: &ctrl->ka_cmd), |
1260 | flags: BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); |
1261 | if (IS_ERR(ptr: rq)) { |
1262 | /* allocation failure, reset the controller */ |
1263 | dev_err(ctrl->device, "keep-alive failed: %ld\n" , PTR_ERR(rq)); |
1264 | nvme_reset_ctrl(ctrl); |
1265 | return; |
1266 | } |
1267 | nvme_init_request(rq, &ctrl->ka_cmd); |
1268 | |
1269 | rq->timeout = ctrl->kato * HZ; |
1270 | rq->end_io = nvme_keep_alive_end_io; |
1271 | rq->end_io_data = ctrl; |
1272 | blk_execute_rq_nowait(rq, at_head: false); |
1273 | } |
1274 | |
1275 | static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) |
1276 | { |
1277 | if (unlikely(ctrl->kato == 0)) |
1278 | return; |
1279 | |
1280 | nvme_queue_keep_alive_work(ctrl); |
1281 | } |
1282 | |
1283 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) |
1284 | { |
1285 | if (unlikely(ctrl->kato == 0)) |
1286 | return; |
1287 | |
1288 | cancel_delayed_work_sync(dwork: &ctrl->ka_work); |
1289 | } |
1290 | EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); |
1291 | |
1292 | static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, |
1293 | struct nvme_command *cmd) |
1294 | { |
1295 | unsigned int new_kato = |
1296 | DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); |
1297 | |
1298 | dev_info(ctrl->device, |
1299 | "keep alive interval updated from %u ms to %u ms\n" , |
1300 | ctrl->kato * 1000 / 2, new_kato * 1000 / 2); |
1301 | |
1302 | nvme_stop_keep_alive(ctrl); |
1303 | ctrl->kato = new_kato; |
1304 | nvme_start_keep_alive(ctrl); |
1305 | } |
1306 | |
1307 | /* |
1308 | * In NVMe 1.0 the CNS field was just a binary controller or namespace |
1309 | * flag, thus sending any new CNS opcodes has a big chance of not working. |
1310 | * Qemu unfortunately had that bug after reporting a 1.1 version compliance |
1311 | * (but not for any later version). |
1312 | */ |
1313 | static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) |
1314 | { |
1315 | if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) |
1316 | return ctrl->vs < NVME_VS(1, 2, 0); |
1317 | return ctrl->vs < NVME_VS(1, 1, 0); |
1318 | } |
1319 | |
1320 | static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) |
1321 | { |
1322 | struct nvme_command c = { }; |
1323 | int error; |
1324 | |
1325 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ |
1326 | c.identify.opcode = nvme_admin_identify; |
1327 | c.identify.cns = NVME_ID_CNS_CTRL; |
1328 | |
1329 | *id = kmalloc(size: sizeof(struct nvme_id_ctrl), GFP_KERNEL); |
1330 | if (!*id) |
1331 | return -ENOMEM; |
1332 | |
1333 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, |
1334 | sizeof(struct nvme_id_ctrl)); |
1335 | if (error) |
1336 | kfree(objp: *id); |
1337 | return error; |
1338 | } |
1339 | |
1340 | static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, |
1341 | struct nvme_ns_id_desc *cur, bool *csi_seen) |
1342 | { |
1343 | const char *warn_str = "ctrl returned bogus length:" ; |
1344 | void *data = cur; |
1345 | |
1346 | switch (cur->nidt) { |
1347 | case NVME_NIDT_EUI64: |
1348 | if (cur->nidl != NVME_NIDT_EUI64_LEN) { |
1349 | dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n" , |
1350 | warn_str, cur->nidl); |
1351 | return -1; |
1352 | } |
1353 | if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) |
1354 | return NVME_NIDT_EUI64_LEN; |
1355 | memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); |
1356 | return NVME_NIDT_EUI64_LEN; |
1357 | case NVME_NIDT_NGUID: |
1358 | if (cur->nidl != NVME_NIDT_NGUID_LEN) { |
1359 | dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n" , |
1360 | warn_str, cur->nidl); |
1361 | return -1; |
1362 | } |
1363 | if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) |
1364 | return NVME_NIDT_NGUID_LEN; |
1365 | memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); |
1366 | return NVME_NIDT_NGUID_LEN; |
1367 | case NVME_NIDT_UUID: |
1368 | if (cur->nidl != NVME_NIDT_UUID_LEN) { |
1369 | dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n" , |
1370 | warn_str, cur->nidl); |
1371 | return -1; |
1372 | } |
1373 | if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) |
1374 | return NVME_NIDT_UUID_LEN; |
1375 | uuid_copy(dst: &ids->uuid, src: data + sizeof(*cur)); |
1376 | return NVME_NIDT_UUID_LEN; |
1377 | case NVME_NIDT_CSI: |
1378 | if (cur->nidl != NVME_NIDT_CSI_LEN) { |
1379 | dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n" , |
1380 | warn_str, cur->nidl); |
1381 | return -1; |
1382 | } |
1383 | memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); |
1384 | *csi_seen = true; |
1385 | return NVME_NIDT_CSI_LEN; |
1386 | default: |
1387 | /* Skip unknown types */ |
1388 | return cur->nidl; |
1389 | } |
1390 | } |
1391 | |
1392 | static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, |
1393 | struct nvme_ns_info *info) |
1394 | { |
1395 | struct nvme_command c = { }; |
1396 | bool csi_seen = false; |
1397 | int status, pos, len; |
1398 | void *data; |
1399 | |
1400 | if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) |
1401 | return 0; |
1402 | if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) |
1403 | return 0; |
1404 | |
1405 | c.identify.opcode = nvme_admin_identify; |
1406 | c.identify.nsid = cpu_to_le32(info->nsid); |
1407 | c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; |
1408 | |
1409 | data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); |
1410 | if (!data) |
1411 | return -ENOMEM; |
1412 | |
1413 | status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, |
1414 | NVME_IDENTIFY_DATA_SIZE); |
1415 | if (status) { |
1416 | dev_warn(ctrl->device, |
1417 | "Identify Descriptors failed (nsid=%u, status=0x%x)\n" , |
1418 | info->nsid, status); |
1419 | goto free_data; |
1420 | } |
1421 | |
1422 | for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { |
1423 | struct nvme_ns_id_desc *cur = data + pos; |
1424 | |
1425 | if (cur->nidl == 0) |
1426 | break; |
1427 | |
1428 | len = nvme_process_ns_desc(ctrl, ids: &info->ids, cur, csi_seen: &csi_seen); |
1429 | if (len < 0) |
1430 | break; |
1431 | |
1432 | len += sizeof(*cur); |
1433 | } |
1434 | |
1435 | if (nvme_multi_css(ctrl) && !csi_seen) { |
1436 | dev_warn(ctrl->device, "Command set not reported for nsid:%d\n" , |
1437 | info->nsid); |
1438 | status = -EINVAL; |
1439 | } |
1440 | |
1441 | free_data: |
1442 | kfree(objp: data); |
1443 | return status; |
1444 | } |
1445 | |
1446 | static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, |
1447 | struct nvme_id_ns **id) |
1448 | { |
1449 | struct nvme_command c = { }; |
1450 | int error; |
1451 | |
1452 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ |
1453 | c.identify.opcode = nvme_admin_identify; |
1454 | c.identify.nsid = cpu_to_le32(nsid); |
1455 | c.identify.cns = NVME_ID_CNS_NS; |
1456 | |
1457 | *id = kmalloc(size: sizeof(**id), GFP_KERNEL); |
1458 | if (!*id) |
1459 | return -ENOMEM; |
1460 | |
1461 | error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); |
1462 | if (error) { |
1463 | dev_warn(ctrl->device, "Identify namespace failed (%d)\n" , error); |
1464 | kfree(objp: *id); |
1465 | } |
1466 | return error; |
1467 | } |
1468 | |
1469 | static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, |
1470 | struct nvme_ns_info *info) |
1471 | { |
1472 | struct nvme_ns_ids *ids = &info->ids; |
1473 | struct nvme_id_ns *id; |
1474 | int ret; |
1475 | |
1476 | ret = nvme_identify_ns(ctrl, nsid: info->nsid, id: &id); |
1477 | if (ret) |
1478 | return ret; |
1479 | |
1480 | if (id->ncap == 0) { |
1481 | /* namespace not allocated or attached */ |
1482 | info->is_removed = true; |
1483 | return -ENODEV; |
1484 | } |
1485 | |
1486 | info->anagrpid = id->anagrpid; |
1487 | info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; |
1488 | info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; |
1489 | info->is_ready = true; |
1490 | if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { |
1491 | dev_info(ctrl->device, |
1492 | "Ignoring bogus Namespace Identifiers\n" ); |
1493 | } else { |
1494 | if (ctrl->vs >= NVME_VS(1, 1, 0) && |
1495 | !memchr_inv(p: ids->eui64, c: 0, size: sizeof(ids->eui64))) |
1496 | memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); |
1497 | if (ctrl->vs >= NVME_VS(1, 2, 0) && |
1498 | !memchr_inv(p: ids->nguid, c: 0, size: sizeof(ids->nguid))) |
1499 | memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); |
1500 | } |
1501 | kfree(objp: id); |
1502 | return 0; |
1503 | } |
1504 | |
1505 | static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, |
1506 | struct nvme_ns_info *info) |
1507 | { |
1508 | struct nvme_id_ns_cs_indep *id; |
1509 | struct nvme_command c = { |
1510 | .identify.opcode = nvme_admin_identify, |
1511 | .identify.nsid = cpu_to_le32(info->nsid), |
1512 | .identify.cns = NVME_ID_CNS_NS_CS_INDEP, |
1513 | }; |
1514 | int ret; |
1515 | |
1516 | id = kmalloc(size: sizeof(*id), GFP_KERNEL); |
1517 | if (!id) |
1518 | return -ENOMEM; |
1519 | |
1520 | ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); |
1521 | if (!ret) { |
1522 | info->anagrpid = id->anagrpid; |
1523 | info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; |
1524 | info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; |
1525 | info->is_ready = id->nstat & NVME_NSTAT_NRDY; |
1526 | } |
1527 | kfree(objp: id); |
1528 | return ret; |
1529 | } |
1530 | |
1531 | static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, |
1532 | unsigned int dword11, void *buffer, size_t buflen, u32 *result) |
1533 | { |
1534 | union nvme_result res = { 0 }; |
1535 | struct nvme_command c = { }; |
1536 | int ret; |
1537 | |
1538 | c.features.opcode = op; |
1539 | c.features.fid = cpu_to_le32(fid); |
1540 | c.features.dword11 = cpu_to_le32(dword11); |
1541 | |
1542 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, |
1543 | buffer, buflen, NVME_QID_ANY, 0, 0); |
1544 | if (ret >= 0 && result) |
1545 | *result = le32_to_cpu(res.u32); |
1546 | return ret; |
1547 | } |
1548 | |
1549 | int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, |
1550 | unsigned int dword11, void *buffer, size_t buflen, |
1551 | u32 *result) |
1552 | { |
1553 | return nvme_features(dev, op: nvme_admin_set_features, fid, dword11, buffer, |
1554 | buflen, result); |
1555 | } |
1556 | EXPORT_SYMBOL_GPL(nvme_set_features); |
1557 | |
1558 | int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, |
1559 | unsigned int dword11, void *buffer, size_t buflen, |
1560 | u32 *result) |
1561 | { |
1562 | return nvme_features(dev, op: nvme_admin_get_features, fid, dword11, buffer, |
1563 | buflen, result); |
1564 | } |
1565 | EXPORT_SYMBOL_GPL(nvme_get_features); |
1566 | |
1567 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) |
1568 | { |
1569 | u32 q_count = (*count - 1) | ((*count - 1) << 16); |
1570 | u32 result; |
1571 | int status, nr_io_queues; |
1572 | |
1573 | status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, |
1574 | &result); |
1575 | if (status < 0) |
1576 | return status; |
1577 | |
1578 | /* |
1579 | * Degraded controllers might return an error when setting the queue |
1580 | * count. We still want to be able to bring them online and offer |
1581 | * access to the admin queue, as that might be only way to fix them up. |
1582 | */ |
1583 | if (status > 0) { |
1584 | dev_err(ctrl->device, "Could not set queue count (%d)\n" , status); |
1585 | *count = 0; |
1586 | } else { |
1587 | nr_io_queues = min(result & 0xffff, result >> 16) + 1; |
1588 | *count = min(*count, nr_io_queues); |
1589 | } |
1590 | |
1591 | return 0; |
1592 | } |
1593 | EXPORT_SYMBOL_GPL(nvme_set_queue_count); |
1594 | |
1595 | #define NVME_AEN_SUPPORTED \ |
1596 | (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ |
1597 | NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) |
1598 | |
1599 | static void nvme_enable_aen(struct nvme_ctrl *ctrl) |
1600 | { |
1601 | u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; |
1602 | int status; |
1603 | |
1604 | if (!supported_aens) |
1605 | return; |
1606 | |
1607 | status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, |
1608 | NULL, 0, &result); |
1609 | if (status) |
1610 | dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n" , |
1611 | supported_aens); |
1612 | |
1613 | queue_work(wq: nvme_wq, work: &ctrl->async_event_work); |
1614 | } |
1615 | |
1616 | static int nvme_ns_open(struct nvme_ns *ns) |
1617 | { |
1618 | |
1619 | /* should never be called due to GENHD_FL_HIDDEN */ |
1620 | if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) |
1621 | goto fail; |
1622 | if (!nvme_get_ns(ns)) |
1623 | goto fail; |
1624 | if (!try_module_get(module: ns->ctrl->ops->module)) |
1625 | goto fail_put_ns; |
1626 | |
1627 | return 0; |
1628 | |
1629 | fail_put_ns: |
1630 | nvme_put_ns(ns); |
1631 | fail: |
1632 | return -ENXIO; |
1633 | } |
1634 | |
1635 | static void nvme_ns_release(struct nvme_ns *ns) |
1636 | { |
1637 | |
1638 | module_put(module: ns->ctrl->ops->module); |
1639 | nvme_put_ns(ns); |
1640 | } |
1641 | |
1642 | static int nvme_open(struct gendisk *disk, blk_mode_t mode) |
1643 | { |
1644 | return nvme_ns_open(ns: disk->private_data); |
1645 | } |
1646 | |
1647 | static void nvme_release(struct gendisk *disk) |
1648 | { |
1649 | nvme_ns_release(ns: disk->private_data); |
1650 | } |
1651 | |
1652 | int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1653 | { |
1654 | /* some standard values */ |
1655 | geo->heads = 1 << 6; |
1656 | geo->sectors = 1 << 5; |
1657 | geo->cylinders = get_capacity(disk: bdev->bd_disk) >> 11; |
1658 | return 0; |
1659 | } |
1660 | |
1661 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
1662 | static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, |
1663 | u32 max_integrity_segments) |
1664 | { |
1665 | struct blk_integrity integrity = { }; |
1666 | |
1667 | switch (ns->pi_type) { |
1668 | case NVME_NS_DPS_PI_TYPE3: |
1669 | switch (ns->guard_type) { |
1670 | case NVME_NVM_NS_16B_GUARD: |
1671 | integrity.profile = &t10_pi_type3_crc; |
1672 | integrity.tag_size = sizeof(u16) + sizeof(u32); |
1673 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
1674 | break; |
1675 | case NVME_NVM_NS_64B_GUARD: |
1676 | integrity.profile = &ext_pi_type3_crc64; |
1677 | integrity.tag_size = sizeof(u16) + 6; |
1678 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
1679 | break; |
1680 | default: |
1681 | integrity.profile = NULL; |
1682 | break; |
1683 | } |
1684 | break; |
1685 | case NVME_NS_DPS_PI_TYPE1: |
1686 | case NVME_NS_DPS_PI_TYPE2: |
1687 | switch (ns->guard_type) { |
1688 | case NVME_NVM_NS_16B_GUARD: |
1689 | integrity.profile = &t10_pi_type1_crc; |
1690 | integrity.tag_size = sizeof(u16); |
1691 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
1692 | break; |
1693 | case NVME_NVM_NS_64B_GUARD: |
1694 | integrity.profile = &ext_pi_type1_crc64; |
1695 | integrity.tag_size = sizeof(u16); |
1696 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
1697 | break; |
1698 | default: |
1699 | integrity.profile = NULL; |
1700 | break; |
1701 | } |
1702 | break; |
1703 | default: |
1704 | integrity.profile = NULL; |
1705 | break; |
1706 | } |
1707 | |
1708 | integrity.tuple_size = ns->ms; |
1709 | blk_integrity_register(disk, &integrity); |
1710 | blk_queue_max_integrity_segments(q: disk->queue, segs: max_integrity_segments); |
1711 | } |
1712 | #else |
1713 | static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, |
1714 | u32 max_integrity_segments) |
1715 | { |
1716 | } |
1717 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1718 | |
1719 | static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) |
1720 | { |
1721 | struct nvme_ctrl *ctrl = ns->ctrl; |
1722 | struct request_queue *queue = disk->queue; |
1723 | u32 size = queue_logical_block_size(q: queue); |
1724 | |
1725 | if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) |
1726 | ctrl->max_discard_sectors = nvme_lba_to_sect(ns, lba: ctrl->dmrsl); |
1727 | |
1728 | if (ctrl->max_discard_sectors == 0) { |
1729 | blk_queue_max_discard_sectors(q: queue, max_discard_sectors: 0); |
1730 | return; |
1731 | } |
1732 | |
1733 | BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < |
1734 | NVME_DSM_MAX_RANGES); |
1735 | |
1736 | queue->limits.discard_granularity = size; |
1737 | |
1738 | /* If discard is already enabled, don't reset queue limits */ |
1739 | if (queue->limits.max_discard_sectors) |
1740 | return; |
1741 | |
1742 | blk_queue_max_discard_sectors(q: queue, max_discard_sectors: ctrl->max_discard_sectors); |
1743 | blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); |
1744 | |
1745 | if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) |
1746 | blk_queue_max_write_zeroes_sectors(q: queue, UINT_MAX); |
1747 | } |
1748 | |
1749 | static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) |
1750 | { |
1751 | return uuid_equal(u1: &a->uuid, u2: &b->uuid) && |
1752 | memcmp(p: &a->nguid, q: &b->nguid, size: sizeof(a->nguid)) == 0 && |
1753 | memcmp(p: &a->eui64, q: &b->eui64, size: sizeof(a->eui64)) == 0 && |
1754 | a->csi == b->csi; |
1755 | } |
1756 | |
1757 | static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) |
1758 | { |
1759 | bool first = id->dps & NVME_NS_DPS_PI_FIRST; |
1760 | unsigned lbaf = nvme_lbaf_index(flbas: id->flbas); |
1761 | struct nvme_ctrl *ctrl = ns->ctrl; |
1762 | struct nvme_command c = { }; |
1763 | struct nvme_id_ns_nvm *nvm; |
1764 | int ret = 0; |
1765 | u32 elbaf; |
1766 | |
1767 | ns->pi_size = 0; |
1768 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); |
1769 | if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { |
1770 | ns->pi_size = sizeof(struct t10_pi_tuple); |
1771 | ns->guard_type = NVME_NVM_NS_16B_GUARD; |
1772 | goto set_pi; |
1773 | } |
1774 | |
1775 | nvm = kzalloc(size: sizeof(*nvm), GFP_KERNEL); |
1776 | if (!nvm) |
1777 | return -ENOMEM; |
1778 | |
1779 | c.identify.opcode = nvme_admin_identify; |
1780 | c.identify.nsid = cpu_to_le32(ns->head->ns_id); |
1781 | c.identify.cns = NVME_ID_CNS_CS_NS; |
1782 | c.identify.csi = NVME_CSI_NVM; |
1783 | |
1784 | ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); |
1785 | if (ret) |
1786 | goto free_data; |
1787 | |
1788 | elbaf = le32_to_cpu(nvm->elbaf[lbaf]); |
1789 | |
1790 | /* no support for storage tag formats right now */ |
1791 | if (nvme_elbaf_sts(elbaf)) |
1792 | goto free_data; |
1793 | |
1794 | ns->guard_type = nvme_elbaf_guard_type(elbaf); |
1795 | switch (ns->guard_type) { |
1796 | case NVME_NVM_NS_64B_GUARD: |
1797 | ns->pi_size = sizeof(struct crc64_pi_tuple); |
1798 | break; |
1799 | case NVME_NVM_NS_16B_GUARD: |
1800 | ns->pi_size = sizeof(struct t10_pi_tuple); |
1801 | break; |
1802 | default: |
1803 | break; |
1804 | } |
1805 | |
1806 | free_data: |
1807 | kfree(objp: nvm); |
1808 | set_pi: |
1809 | if (ns->pi_size && (first || ns->ms == ns->pi_size)) |
1810 | ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; |
1811 | else |
1812 | ns->pi_type = 0; |
1813 | |
1814 | return ret; |
1815 | } |
1816 | |
1817 | static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) |
1818 | { |
1819 | struct nvme_ctrl *ctrl = ns->ctrl; |
1820 | |
1821 | if (nvme_init_ms(ns, id)) |
1822 | return; |
1823 | |
1824 | ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); |
1825 | if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) |
1826 | return; |
1827 | |
1828 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
1829 | /* |
1830 | * The NVMe over Fabrics specification only supports metadata as |
1831 | * part of the extended data LBA. We rely on HCA/HBA support to |
1832 | * remap the separate metadata buffer from the block layer. |
1833 | */ |
1834 | if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) |
1835 | return; |
1836 | |
1837 | ns->features |= NVME_NS_EXT_LBAS; |
1838 | |
1839 | /* |
1840 | * The current fabrics transport drivers support namespace |
1841 | * metadata formats only if nvme_ns_has_pi() returns true. |
1842 | * Suppress support for all other formats so the namespace will |
1843 | * have a 0 capacity and not be usable through the block stack. |
1844 | * |
1845 | * Note, this check will need to be modified if any drivers |
1846 | * gain the ability to use other metadata formats. |
1847 | */ |
1848 | if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) |
1849 | ns->features |= NVME_NS_METADATA_SUPPORTED; |
1850 | } else { |
1851 | /* |
1852 | * For PCIe controllers, we can't easily remap the separate |
1853 | * metadata buffer from the block layer and thus require a |
1854 | * separate metadata buffer for block layer metadata/PI support. |
1855 | * We allow extended LBAs for the passthrough interface, though. |
1856 | */ |
1857 | if (id->flbas & NVME_NS_FLBAS_META_EXT) |
1858 | ns->features |= NVME_NS_EXT_LBAS; |
1859 | else |
1860 | ns->features |= NVME_NS_METADATA_SUPPORTED; |
1861 | } |
1862 | } |
1863 | |
1864 | static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, |
1865 | struct request_queue *q) |
1866 | { |
1867 | bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; |
1868 | |
1869 | if (ctrl->max_hw_sectors) { |
1870 | u32 max_segments = |
1871 | (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; |
1872 | |
1873 | max_segments = min_not_zero(max_segments, ctrl->max_segments); |
1874 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
1875 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
1876 | } |
1877 | blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); |
1878 | blk_queue_dma_alignment(q, 3); |
1879 | blk_queue_write_cache(q, enabled: vwc, fua: vwc); |
1880 | } |
1881 | |
1882 | static void nvme_update_disk_info(struct gendisk *disk, |
1883 | struct nvme_ns *ns, struct nvme_id_ns *id) |
1884 | { |
1885 | sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); |
1886 | u32 bs = 1U << ns->lba_shift; |
1887 | u32 atomic_bs, phys_bs, io_opt = 0; |
1888 | |
1889 | /* |
1890 | * The block layer can't support LBA sizes larger than the page size |
1891 | * yet, so catch this early and don't allow block I/O. |
1892 | */ |
1893 | if (ns->lba_shift > PAGE_SHIFT) { |
1894 | capacity = 0; |
1895 | bs = (1 << 9); |
1896 | } |
1897 | |
1898 | blk_integrity_unregister(disk); |
1899 | |
1900 | atomic_bs = phys_bs = bs; |
1901 | if (id->nabo == 0) { |
1902 | /* |
1903 | * Bit 1 indicates whether NAWUPF is defined for this namespace |
1904 | * and whether it should be used instead of AWUPF. If NAWUPF == |
1905 | * 0 then AWUPF must be used instead. |
1906 | */ |
1907 | if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) |
1908 | atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; |
1909 | else |
1910 | atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; |
1911 | } |
1912 | |
1913 | if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { |
1914 | /* NPWG = Namespace Preferred Write Granularity */ |
1915 | phys_bs = bs * (1 + le16_to_cpu(id->npwg)); |
1916 | /* NOWS = Namespace Optimal Write Size */ |
1917 | io_opt = bs * (1 + le16_to_cpu(id->nows)); |
1918 | } |
1919 | |
1920 | blk_queue_logical_block_size(disk->queue, bs); |
1921 | /* |
1922 | * Linux filesystems assume writing a single physical block is |
1923 | * an atomic operation. Hence limit the physical block size to the |
1924 | * value of the Atomic Write Unit Power Fail parameter. |
1925 | */ |
1926 | blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); |
1927 | blk_queue_io_min(q: disk->queue, min: phys_bs); |
1928 | blk_queue_io_opt(q: disk->queue, opt: io_opt); |
1929 | |
1930 | /* |
1931 | * Register a metadata profile for PI, or the plain non-integrity NVMe |
1932 | * metadata masquerading as Type 0 if supported, otherwise reject block |
1933 | * I/O to namespaces with metadata except when the namespace supports |
1934 | * PI, as it can strip/insert in that case. |
1935 | */ |
1936 | if (ns->ms) { |
1937 | if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && |
1938 | (ns->features & NVME_NS_METADATA_SUPPORTED)) |
1939 | nvme_init_integrity(disk, ns, |
1940 | max_integrity_segments: ns->ctrl->max_integrity_segments); |
1941 | else if (!nvme_ns_has_pi(ns)) |
1942 | capacity = 0; |
1943 | } |
1944 | |
1945 | set_capacity_and_notify(disk, size: capacity); |
1946 | |
1947 | nvme_config_discard(disk, ns); |
1948 | blk_queue_max_write_zeroes_sectors(q: disk->queue, |
1949 | max_write_same_sectors: ns->ctrl->max_zeroes_sectors); |
1950 | } |
1951 | |
1952 | static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) |
1953 | { |
1954 | return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); |
1955 | } |
1956 | |
1957 | static inline bool nvme_first_scan(struct gendisk *disk) |
1958 | { |
1959 | /* nvme_alloc_ns() scans the disk prior to adding it */ |
1960 | return !disk_live(disk); |
1961 | } |
1962 | |
1963 | static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) |
1964 | { |
1965 | struct nvme_ctrl *ctrl = ns->ctrl; |
1966 | u32 iob; |
1967 | |
1968 | if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && |
1969 | is_power_of_2(n: ctrl->max_hw_sectors)) |
1970 | iob = ctrl->max_hw_sectors; |
1971 | else |
1972 | iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); |
1973 | |
1974 | if (!iob) |
1975 | return; |
1976 | |
1977 | if (!is_power_of_2(n: iob)) { |
1978 | if (nvme_first_scan(disk: ns->disk)) |
1979 | pr_warn("%s: ignoring unaligned IO boundary:%u\n" , |
1980 | ns->disk->disk_name, iob); |
1981 | return; |
1982 | } |
1983 | |
1984 | if (blk_queue_is_zoned(q: ns->disk->queue)) { |
1985 | if (nvme_first_scan(disk: ns->disk)) |
1986 | pr_warn("%s: ignoring zoned namespace IO boundary\n" , |
1987 | ns->disk->disk_name); |
1988 | return; |
1989 | } |
1990 | |
1991 | blk_queue_chunk_sectors(ns->queue, iob); |
1992 | } |
1993 | |
1994 | static int nvme_update_ns_info_generic(struct nvme_ns *ns, |
1995 | struct nvme_ns_info *info) |
1996 | { |
1997 | blk_mq_freeze_queue(q: ns->disk->queue); |
1998 | nvme_set_queue_limits(ctrl: ns->ctrl, q: ns->queue); |
1999 | set_disk_ro(disk: ns->disk, read_only: nvme_ns_is_readonly(ns, info)); |
2000 | blk_mq_unfreeze_queue(q: ns->disk->queue); |
2001 | |
2002 | if (nvme_ns_head_multipath(head: ns->head)) { |
2003 | blk_mq_freeze_queue(q: ns->head->disk->queue); |
2004 | set_disk_ro(disk: ns->head->disk, read_only: nvme_ns_is_readonly(ns, info)); |
2005 | nvme_mpath_revalidate_paths(ns); |
2006 | blk_stack_limits(t: &ns->head->disk->queue->limits, |
2007 | b: &ns->queue->limits, offset: 0); |
2008 | ns->head->disk->flags |= GENHD_FL_HIDDEN; |
2009 | blk_mq_unfreeze_queue(q: ns->head->disk->queue); |
2010 | } |
2011 | |
2012 | /* Hide the block-interface for these devices */ |
2013 | ns->disk->flags |= GENHD_FL_HIDDEN; |
2014 | set_bit(NVME_NS_READY, addr: &ns->flags); |
2015 | |
2016 | return 0; |
2017 | } |
2018 | |
2019 | static int nvme_update_ns_info_block(struct nvme_ns *ns, |
2020 | struct nvme_ns_info *info) |
2021 | { |
2022 | struct nvme_id_ns *id; |
2023 | unsigned lbaf; |
2024 | int ret; |
2025 | |
2026 | ret = nvme_identify_ns(ctrl: ns->ctrl, nsid: info->nsid, id: &id); |
2027 | if (ret) |
2028 | return ret; |
2029 | |
2030 | blk_mq_freeze_queue(q: ns->disk->queue); |
2031 | lbaf = nvme_lbaf_index(flbas: id->flbas); |
2032 | ns->lba_shift = id->lbaf[lbaf].ds; |
2033 | nvme_set_queue_limits(ctrl: ns->ctrl, q: ns->queue); |
2034 | |
2035 | nvme_configure_metadata(ns, id); |
2036 | nvme_set_chunk_sectors(ns, id); |
2037 | nvme_update_disk_info(disk: ns->disk, ns, id); |
2038 | |
2039 | if (ns->head->ids.csi == NVME_CSI_ZNS) { |
2040 | ret = nvme_update_zone_info(ns, lbaf); |
2041 | if (ret) { |
2042 | blk_mq_unfreeze_queue(q: ns->disk->queue); |
2043 | goto out; |
2044 | } |
2045 | } |
2046 | |
2047 | /* |
2048 | * Only set the DEAC bit if the device guarantees that reads from |
2049 | * deallocated data return zeroes. While the DEAC bit does not |
2050 | * require that, it must be a no-op if reads from deallocated data |
2051 | * do not return zeroes. |
2052 | */ |
2053 | if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) |
2054 | ns->features |= NVME_NS_DEAC; |
2055 | set_disk_ro(disk: ns->disk, read_only: nvme_ns_is_readonly(ns, info)); |
2056 | set_bit(NVME_NS_READY, addr: &ns->flags); |
2057 | blk_mq_unfreeze_queue(q: ns->disk->queue); |
2058 | |
2059 | if (blk_queue_is_zoned(q: ns->queue)) { |
2060 | ret = nvme_revalidate_zones(ns); |
2061 | if (ret && !nvme_first_scan(disk: ns->disk)) |
2062 | goto out; |
2063 | } |
2064 | |
2065 | if (nvme_ns_head_multipath(head: ns->head)) { |
2066 | blk_mq_freeze_queue(q: ns->head->disk->queue); |
2067 | nvme_update_disk_info(disk: ns->head->disk, ns, id); |
2068 | set_disk_ro(disk: ns->head->disk, read_only: nvme_ns_is_readonly(ns, info)); |
2069 | nvme_mpath_revalidate_paths(ns); |
2070 | blk_stack_limits(t: &ns->head->disk->queue->limits, |
2071 | b: &ns->queue->limits, offset: 0); |
2072 | disk_update_readahead(disk: ns->head->disk); |
2073 | blk_mq_unfreeze_queue(q: ns->head->disk->queue); |
2074 | } |
2075 | |
2076 | ret = 0; |
2077 | out: |
2078 | /* |
2079 | * If probing fails due an unsupported feature, hide the block device, |
2080 | * but still allow other access. |
2081 | */ |
2082 | if (ret == -ENODEV) { |
2083 | ns->disk->flags |= GENHD_FL_HIDDEN; |
2084 | set_bit(NVME_NS_READY, addr: &ns->flags); |
2085 | ret = 0; |
2086 | } |
2087 | kfree(objp: id); |
2088 | return ret; |
2089 | } |
2090 | |
2091 | static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) |
2092 | { |
2093 | switch (info->ids.csi) { |
2094 | case NVME_CSI_ZNS: |
2095 | if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { |
2096 | dev_info(ns->ctrl->device, |
2097 | "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n" , |
2098 | info->nsid); |
2099 | return nvme_update_ns_info_generic(ns, info); |
2100 | } |
2101 | return nvme_update_ns_info_block(ns, info); |
2102 | case NVME_CSI_NVM: |
2103 | return nvme_update_ns_info_block(ns, info); |
2104 | default: |
2105 | dev_info(ns->ctrl->device, |
2106 | "block device for nsid %u not supported (csi %u)\n" , |
2107 | info->nsid, info->ids.csi); |
2108 | return nvme_update_ns_info_generic(ns, info); |
2109 | } |
2110 | } |
2111 | |
2112 | #ifdef CONFIG_BLK_SED_OPAL |
2113 | static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, |
2114 | bool send) |
2115 | { |
2116 | struct nvme_ctrl *ctrl = data; |
2117 | struct nvme_command cmd = { }; |
2118 | |
2119 | if (send) |
2120 | cmd.common.opcode = nvme_admin_security_send; |
2121 | else |
2122 | cmd.common.opcode = nvme_admin_security_recv; |
2123 | cmd.common.nsid = 0; |
2124 | cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); |
2125 | cmd.common.cdw11 = cpu_to_le32(len); |
2126 | |
2127 | return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, |
2128 | NVME_QID_ANY, 1, 0); |
2129 | } |
2130 | |
2131 | static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) |
2132 | { |
2133 | if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { |
2134 | if (!ctrl->opal_dev) |
2135 | ctrl->opal_dev = init_opal_dev(data: ctrl, send_recv: &nvme_sec_submit); |
2136 | else if (was_suspended) |
2137 | opal_unlock_from_suspend(dev: ctrl->opal_dev); |
2138 | } else { |
2139 | free_opal_dev(dev: ctrl->opal_dev); |
2140 | ctrl->opal_dev = NULL; |
2141 | } |
2142 | } |
2143 | #else |
2144 | static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) |
2145 | { |
2146 | } |
2147 | #endif /* CONFIG_BLK_SED_OPAL */ |
2148 | |
2149 | #ifdef CONFIG_BLK_DEV_ZONED |
2150 | static int nvme_report_zones(struct gendisk *disk, sector_t sector, |
2151 | unsigned int nr_zones, report_zones_cb cb, void *data) |
2152 | { |
2153 | return nvme_ns_report_zones(ns: disk->private_data, sector, nr_zones, cb, |
2154 | data); |
2155 | } |
2156 | #else |
2157 | #define nvme_report_zones NULL |
2158 | #endif /* CONFIG_BLK_DEV_ZONED */ |
2159 | |
2160 | const struct block_device_operations nvme_bdev_ops = { |
2161 | .owner = THIS_MODULE, |
2162 | .ioctl = nvme_ioctl, |
2163 | .compat_ioctl = blkdev_compat_ptr_ioctl, |
2164 | .open = nvme_open, |
2165 | .release = nvme_release, |
2166 | .getgeo = nvme_getgeo, |
2167 | .report_zones = nvme_report_zones, |
2168 | .pr_ops = &nvme_pr_ops, |
2169 | }; |
2170 | |
2171 | static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, |
2172 | u32 timeout, const char *op) |
2173 | { |
2174 | unsigned long timeout_jiffies = jiffies + timeout * HZ; |
2175 | u32 csts; |
2176 | int ret; |
2177 | |
2178 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { |
2179 | if (csts == ~0) |
2180 | return -ENODEV; |
2181 | if ((csts & mask) == val) |
2182 | break; |
2183 | |
2184 | usleep_range(min: 1000, max: 2000); |
2185 | if (fatal_signal_pending(current)) |
2186 | return -EINTR; |
2187 | if (time_after(jiffies, timeout_jiffies)) { |
2188 | dev_err(ctrl->device, |
2189 | "Device not ready; aborting %s, CSTS=0x%x\n" , |
2190 | op, csts); |
2191 | return -ENODEV; |
2192 | } |
2193 | } |
2194 | |
2195 | return ret; |
2196 | } |
2197 | |
2198 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) |
2199 | { |
2200 | int ret; |
2201 | |
2202 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; |
2203 | if (shutdown) |
2204 | ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; |
2205 | else |
2206 | ctrl->ctrl_config &= ~NVME_CC_ENABLE; |
2207 | |
2208 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
2209 | if (ret) |
2210 | return ret; |
2211 | |
2212 | if (shutdown) { |
2213 | return nvme_wait_ready(ctrl, mask: NVME_CSTS_SHST_MASK, |
2214 | val: NVME_CSTS_SHST_CMPLT, |
2215 | timeout: ctrl->shutdown_timeout, op: "shutdown" ); |
2216 | } |
2217 | if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) |
2218 | msleep(NVME_QUIRK_DELAY_AMOUNT); |
2219 | return nvme_wait_ready(ctrl, mask: NVME_CSTS_RDY, val: 0, |
2220 | timeout: (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, op: "reset" ); |
2221 | } |
2222 | EXPORT_SYMBOL_GPL(nvme_disable_ctrl); |
2223 | |
2224 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl) |
2225 | { |
2226 | unsigned dev_page_min; |
2227 | u32 timeout; |
2228 | int ret; |
2229 | |
2230 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); |
2231 | if (ret) { |
2232 | dev_err(ctrl->device, "Reading CAP failed (%d)\n" , ret); |
2233 | return ret; |
2234 | } |
2235 | dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; |
2236 | |
2237 | if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { |
2238 | dev_err(ctrl->device, |
2239 | "Minimum device page size %u too large for host (%u)\n" , |
2240 | 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); |
2241 | return -ENODEV; |
2242 | } |
2243 | |
2244 | if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) |
2245 | ctrl->ctrl_config = NVME_CC_CSS_CSI; |
2246 | else |
2247 | ctrl->ctrl_config = NVME_CC_CSS_NVM; |
2248 | |
2249 | if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) |
2250 | ctrl->ctrl_config |= NVME_CC_CRIME; |
2251 | |
2252 | ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; |
2253 | ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; |
2254 | ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; |
2255 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
2256 | if (ret) |
2257 | return ret; |
2258 | |
2259 | /* Flush write to device (required if transport is PCI) */ |
2260 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); |
2261 | if (ret) |
2262 | return ret; |
2263 | |
2264 | /* CAP value may change after initial CC write */ |
2265 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); |
2266 | if (ret) |
2267 | return ret; |
2268 | |
2269 | timeout = NVME_CAP_TIMEOUT(ctrl->cap); |
2270 | if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { |
2271 | u32 crto, ready_timeout; |
2272 | |
2273 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); |
2274 | if (ret) { |
2275 | dev_err(ctrl->device, "Reading CRTO failed (%d)\n" , |
2276 | ret); |
2277 | return ret; |
2278 | } |
2279 | |
2280 | /* |
2281 | * CRTO should always be greater or equal to CAP.TO, but some |
2282 | * devices are known to get this wrong. Use the larger of the |
2283 | * two values. |
2284 | */ |
2285 | if (ctrl->ctrl_config & NVME_CC_CRIME) |
2286 | ready_timeout = NVME_CRTO_CRIMT(crto); |
2287 | else |
2288 | ready_timeout = NVME_CRTO_CRWMT(crto); |
2289 | |
2290 | if (ready_timeout < timeout) |
2291 | dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n" , |
2292 | crto, ctrl->cap); |
2293 | else |
2294 | timeout = ready_timeout; |
2295 | } |
2296 | |
2297 | ctrl->ctrl_config |= NVME_CC_ENABLE; |
2298 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
2299 | if (ret) |
2300 | return ret; |
2301 | return nvme_wait_ready(ctrl, mask: NVME_CSTS_RDY, val: NVME_CSTS_RDY, |
2302 | timeout: (timeout + 1) / 2, op: "initialisation" ); |
2303 | } |
2304 | EXPORT_SYMBOL_GPL(nvme_enable_ctrl); |
2305 | |
2306 | static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) |
2307 | { |
2308 | __le64 ts; |
2309 | int ret; |
2310 | |
2311 | if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) |
2312 | return 0; |
2313 | |
2314 | ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); |
2315 | ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), |
2316 | NULL); |
2317 | if (ret) |
2318 | dev_warn_once(ctrl->device, |
2319 | "could not set timestamp (%d)\n" , ret); |
2320 | return ret; |
2321 | } |
2322 | |
2323 | static int nvme_configure_host_options(struct nvme_ctrl *ctrl) |
2324 | { |
2325 | struct nvme_feat_host_behavior *host; |
2326 | u8 acre = 0, lbafee = 0; |
2327 | int ret; |
2328 | |
2329 | /* Don't bother enabling the feature if retry delay is not reported */ |
2330 | if (ctrl->crdt[0]) |
2331 | acre = NVME_ENABLE_ACRE; |
2332 | if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) |
2333 | lbafee = NVME_ENABLE_LBAFEE; |
2334 | |
2335 | if (!acre && !lbafee) |
2336 | return 0; |
2337 | |
2338 | host = kzalloc(size: sizeof(*host), GFP_KERNEL); |
2339 | if (!host) |
2340 | return 0; |
2341 | |
2342 | host->acre = acre; |
2343 | host->lbafee = lbafee; |
2344 | ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, |
2345 | host, sizeof(*host), NULL); |
2346 | kfree(objp: host); |
2347 | return ret; |
2348 | } |
2349 | |
2350 | /* |
2351 | * The function checks whether the given total (exlat + enlat) latency of |
2352 | * a power state allows the latter to be used as an APST transition target. |
2353 | * It does so by comparing the latency to the primary and secondary latency |
2354 | * tolerances defined by module params. If there's a match, the corresponding |
2355 | * timeout value is returned and the matching tolerance index (1 or 2) is |
2356 | * reported. |
2357 | */ |
2358 | static bool nvme_apst_get_transition_time(u64 total_latency, |
2359 | u64 *transition_time, unsigned *last_index) |
2360 | { |
2361 | if (total_latency <= apst_primary_latency_tol_us) { |
2362 | if (*last_index == 1) |
2363 | return false; |
2364 | *last_index = 1; |
2365 | *transition_time = apst_primary_timeout_ms; |
2366 | return true; |
2367 | } |
2368 | if (apst_secondary_timeout_ms && |
2369 | total_latency <= apst_secondary_latency_tol_us) { |
2370 | if (*last_index <= 2) |
2371 | return false; |
2372 | *last_index = 2; |
2373 | *transition_time = apst_secondary_timeout_ms; |
2374 | return true; |
2375 | } |
2376 | return false; |
2377 | } |
2378 | |
2379 | /* |
2380 | * APST (Autonomous Power State Transition) lets us program a table of power |
2381 | * state transitions that the controller will perform automatically. |
2382 | * |
2383 | * Depending on module params, one of the two supported techniques will be used: |
2384 | * |
2385 | * - If the parameters provide explicit timeouts and tolerances, they will be |
2386 | * used to build a table with up to 2 non-operational states to transition to. |
2387 | * The default parameter values were selected based on the values used by |
2388 | * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic |
2389 | * regeneration of the APST table in the event of switching between external |
2390 | * and battery power, the timeouts and tolerances reflect a compromise |
2391 | * between values used by Microsoft for AC and battery scenarios. |
2392 | * - If not, we'll configure the table with a simple heuristic: we are willing |
2393 | * to spend at most 2% of the time transitioning between power states. |
2394 | * Therefore, when running in any given state, we will enter the next |
2395 | * lower-power non-operational state after waiting 50 * (enlat + exlat) |
2396 | * microseconds, as long as that state's exit latency is under the requested |
2397 | * maximum latency. |
2398 | * |
2399 | * We will not autonomously enter any non-operational state for which the total |
2400 | * latency exceeds ps_max_latency_us. |
2401 | * |
2402 | * Users can set ps_max_latency_us to zero to turn off APST. |
2403 | */ |
2404 | static int nvme_configure_apst(struct nvme_ctrl *ctrl) |
2405 | { |
2406 | struct nvme_feat_auto_pst *table; |
2407 | unsigned apste = 0; |
2408 | u64 max_lat_us = 0; |
2409 | __le64 target = 0; |
2410 | int max_ps = -1; |
2411 | int state; |
2412 | int ret; |
2413 | unsigned last_lt_index = UINT_MAX; |
2414 | |
2415 | /* |
2416 | * If APST isn't supported or if we haven't been initialized yet, |
2417 | * then don't do anything. |
2418 | */ |
2419 | if (!ctrl->apsta) |
2420 | return 0; |
2421 | |
2422 | if (ctrl->npss > 31) { |
2423 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n" ); |
2424 | return 0; |
2425 | } |
2426 | |
2427 | table = kzalloc(size: sizeof(*table), GFP_KERNEL); |
2428 | if (!table) |
2429 | return 0; |
2430 | |
2431 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { |
2432 | /* Turn off APST. */ |
2433 | dev_dbg(ctrl->device, "APST disabled\n" ); |
2434 | goto done; |
2435 | } |
2436 | |
2437 | /* |
2438 | * Walk through all states from lowest- to highest-power. |
2439 | * According to the spec, lower-numbered states use more power. NPSS, |
2440 | * despite the name, is the index of the lowest-power state, not the |
2441 | * number of states. |
2442 | */ |
2443 | for (state = (int)ctrl->npss; state >= 0; state--) { |
2444 | u64 total_latency_us, exit_latency_us, transition_ms; |
2445 | |
2446 | if (target) |
2447 | table->entries[state] = target; |
2448 | |
2449 | /* |
2450 | * Don't allow transitions to the deepest state if it's quirked |
2451 | * off. |
2452 | */ |
2453 | if (state == ctrl->npss && |
2454 | (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) |
2455 | continue; |
2456 | |
2457 | /* |
2458 | * Is this state a useful non-operational state for higher-power |
2459 | * states to autonomously transition to? |
2460 | */ |
2461 | if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) |
2462 | continue; |
2463 | |
2464 | exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); |
2465 | if (exit_latency_us > ctrl->ps_max_latency_us) |
2466 | continue; |
2467 | |
2468 | total_latency_us = exit_latency_us + |
2469 | le32_to_cpu(ctrl->psd[state].entry_lat); |
2470 | |
2471 | /* |
2472 | * This state is good. It can be used as the APST idle target |
2473 | * for higher power states. |
2474 | */ |
2475 | if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { |
2476 | if (!nvme_apst_get_transition_time(total_latency: total_latency_us, |
2477 | transition_time: &transition_ms, last_index: &last_lt_index)) |
2478 | continue; |
2479 | } else { |
2480 | transition_ms = total_latency_us + 19; |
2481 | do_div(transition_ms, 20); |
2482 | if (transition_ms > (1 << 24) - 1) |
2483 | transition_ms = (1 << 24) - 1; |
2484 | } |
2485 | |
2486 | target = cpu_to_le64((state << 3) | (transition_ms << 8)); |
2487 | if (max_ps == -1) |
2488 | max_ps = state; |
2489 | if (total_latency_us > max_lat_us) |
2490 | max_lat_us = total_latency_us; |
2491 | } |
2492 | |
2493 | if (max_ps == -1) |
2494 | dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n" ); |
2495 | else |
2496 | dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n" , |
2497 | max_ps, max_lat_us, (int)sizeof(*table), table); |
2498 | apste = 1; |
2499 | |
2500 | done: |
2501 | ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, |
2502 | table, sizeof(*table), NULL); |
2503 | if (ret) |
2504 | dev_err(ctrl->device, "failed to set APST feature (%d)\n" , ret); |
2505 | kfree(objp: table); |
2506 | return ret; |
2507 | } |
2508 | |
2509 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) |
2510 | { |
2511 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
2512 | u64 latency; |
2513 | |
2514 | switch (val) { |
2515 | case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: |
2516 | case PM_QOS_LATENCY_ANY: |
2517 | latency = U64_MAX; |
2518 | break; |
2519 | |
2520 | default: |
2521 | latency = val; |
2522 | } |
2523 | |
2524 | if (ctrl->ps_max_latency_us != latency) { |
2525 | ctrl->ps_max_latency_us = latency; |
2526 | if (ctrl->state == NVME_CTRL_LIVE) |
2527 | nvme_configure_apst(ctrl); |
2528 | } |
2529 | } |
2530 | |
2531 | struct nvme_core_quirk_entry { |
2532 | /* |
2533 | * NVMe model and firmware strings are padded with spaces. For |
2534 | * simplicity, strings in the quirk table are padded with NULLs |
2535 | * instead. |
2536 | */ |
2537 | u16 vid; |
2538 | const char *mn; |
2539 | const char *fr; |
2540 | unsigned long quirks; |
2541 | }; |
2542 | |
2543 | static const struct nvme_core_quirk_entry core_quirks[] = { |
2544 | { |
2545 | /* |
2546 | * This Toshiba device seems to die using any APST states. See: |
2547 | * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 |
2548 | */ |
2549 | .vid = 0x1179, |
2550 | .mn = "THNSF5256GPUK TOSHIBA" , |
2551 | .quirks = NVME_QUIRK_NO_APST, |
2552 | }, |
2553 | { |
2554 | /* |
2555 | * This LiteON CL1-3D*-Q11 firmware version has a race |
2556 | * condition associated with actions related to suspend to idle |
2557 | * LiteON has resolved the problem in future firmware |
2558 | */ |
2559 | .vid = 0x14a4, |
2560 | .fr = "22301111" , |
2561 | .quirks = NVME_QUIRK_SIMPLE_SUSPEND, |
2562 | }, |
2563 | { |
2564 | /* |
2565 | * This Kioxia CD6-V Series / HPE PE8030 device times out and |
2566 | * aborts I/O during any load, but more easily reproducible |
2567 | * with discards (fstrim). |
2568 | * |
2569 | * The device is left in a state where it is also not possible |
2570 | * to use "nvme set-feature" to disable APST, but booting with |
2571 | * nvme_core.default_ps_max_latency=0 works. |
2572 | */ |
2573 | .vid = 0x1e0f, |
2574 | .mn = "KCD6XVUL6T40" , |
2575 | .quirks = NVME_QUIRK_NO_APST, |
2576 | }, |
2577 | { |
2578 | /* |
2579 | * The external Samsung X5 SSD fails initialization without a |
2580 | * delay before checking if it is ready and has a whole set of |
2581 | * other problems. To make this even more interesting, it |
2582 | * shares the PCI ID with internal Samsung 970 Evo Plus that |
2583 | * does not need or want these quirks. |
2584 | */ |
2585 | .vid = 0x144d, |
2586 | .mn = "Samsung Portable SSD X5" , |
2587 | .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | |
2588 | NVME_QUIRK_NO_DEEPEST_PS | |
2589 | NVME_QUIRK_IGNORE_DEV_SUBNQN, |
2590 | } |
2591 | }; |
2592 | |
2593 | /* match is null-terminated but idstr is space-padded. */ |
2594 | static bool string_matches(const char *idstr, const char *match, size_t len) |
2595 | { |
2596 | size_t matchlen; |
2597 | |
2598 | if (!match) |
2599 | return true; |
2600 | |
2601 | matchlen = strlen(match); |
2602 | WARN_ON_ONCE(matchlen > len); |
2603 | |
2604 | if (memcmp(p: idstr, q: match, size: matchlen)) |
2605 | return false; |
2606 | |
2607 | for (; matchlen < len; matchlen++) |
2608 | if (idstr[matchlen] != ' ') |
2609 | return false; |
2610 | |
2611 | return true; |
2612 | } |
2613 | |
2614 | static bool quirk_matches(const struct nvme_id_ctrl *id, |
2615 | const struct nvme_core_quirk_entry *q) |
2616 | { |
2617 | return q->vid == le16_to_cpu(id->vid) && |
2618 | string_matches(idstr: id->mn, match: q->mn, len: sizeof(id->mn)) && |
2619 | string_matches(idstr: id->fr, match: q->fr, len: sizeof(id->fr)); |
2620 | } |
2621 | |
2622 | static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, |
2623 | struct nvme_id_ctrl *id) |
2624 | { |
2625 | size_t nqnlen; |
2626 | int off; |
2627 | |
2628 | if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { |
2629 | nqnlen = strnlen(p: id->subnqn, NVMF_NQN_SIZE); |
2630 | if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { |
2631 | strscpy(p: subsys->subnqn, q: id->subnqn, NVMF_NQN_SIZE); |
2632 | return; |
2633 | } |
2634 | |
2635 | if (ctrl->vs >= NVME_VS(1, 2, 1)) |
2636 | dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n" ); |
2637 | } |
2638 | |
2639 | /* |
2640 | * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe |
2641 | * Base Specification 2.0. It is slightly different from the format |
2642 | * specified there due to historic reasons, and we can't change it now. |
2643 | */ |
2644 | off = snprintf(buf: subsys->subnqn, NVMF_NQN_SIZE, |
2645 | fmt: "nqn.2014.08.org.nvmexpress:%04x%04x" , |
2646 | le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); |
2647 | memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); |
2648 | off += sizeof(id->sn); |
2649 | memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); |
2650 | off += sizeof(id->mn); |
2651 | memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); |
2652 | } |
2653 | |
2654 | static void nvme_release_subsystem(struct device *dev) |
2655 | { |
2656 | struct nvme_subsystem *subsys = |
2657 | container_of(dev, struct nvme_subsystem, dev); |
2658 | |
2659 | if (subsys->instance >= 0) |
2660 | ida_free(&nvme_instance_ida, id: subsys->instance); |
2661 | kfree(objp: subsys); |
2662 | } |
2663 | |
2664 | static void nvme_destroy_subsystem(struct kref *ref) |
2665 | { |
2666 | struct nvme_subsystem *subsys = |
2667 | container_of(ref, struct nvme_subsystem, ref); |
2668 | |
2669 | mutex_lock(&nvme_subsystems_lock); |
2670 | list_del(entry: &subsys->entry); |
2671 | mutex_unlock(lock: &nvme_subsystems_lock); |
2672 | |
2673 | ida_destroy(ida: &subsys->ns_ida); |
2674 | device_del(dev: &subsys->dev); |
2675 | put_device(dev: &subsys->dev); |
2676 | } |
2677 | |
2678 | static void nvme_put_subsystem(struct nvme_subsystem *subsys) |
2679 | { |
2680 | kref_put(kref: &subsys->ref, release: nvme_destroy_subsystem); |
2681 | } |
2682 | |
2683 | static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) |
2684 | { |
2685 | struct nvme_subsystem *subsys; |
2686 | |
2687 | lockdep_assert_held(&nvme_subsystems_lock); |
2688 | |
2689 | /* |
2690 | * Fail matches for discovery subsystems. This results |
2691 | * in each discovery controller bound to a unique subsystem. |
2692 | * This avoids issues with validating controller values |
2693 | * that can only be true when there is a single unique subsystem. |
2694 | * There may be multiple and completely independent entities |
2695 | * that provide discovery controllers. |
2696 | */ |
2697 | if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) |
2698 | return NULL; |
2699 | |
2700 | list_for_each_entry(subsys, &nvme_subsystems, entry) { |
2701 | if (strcmp(subsys->subnqn, subsysnqn)) |
2702 | continue; |
2703 | if (!kref_get_unless_zero(kref: &subsys->ref)) |
2704 | continue; |
2705 | return subsys; |
2706 | } |
2707 | |
2708 | return NULL; |
2709 | } |
2710 | |
2711 | static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) |
2712 | { |
2713 | return ctrl->opts && ctrl->opts->discovery_nqn; |
2714 | } |
2715 | |
2716 | static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, |
2717 | struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) |
2718 | { |
2719 | struct nvme_ctrl *tmp; |
2720 | |
2721 | lockdep_assert_held(&nvme_subsystems_lock); |
2722 | |
2723 | list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { |
2724 | if (nvme_state_terminal(ctrl: tmp)) |
2725 | continue; |
2726 | |
2727 | if (tmp->cntlid == ctrl->cntlid) { |
2728 | dev_err(ctrl->device, |
2729 | "Duplicate cntlid %u with %s, subsys %s, rejecting\n" , |
2730 | ctrl->cntlid, dev_name(tmp->device), |
2731 | subsys->subnqn); |
2732 | return false; |
2733 | } |
2734 | |
2735 | if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || |
2736 | nvme_discovery_ctrl(ctrl)) |
2737 | continue; |
2738 | |
2739 | dev_err(ctrl->device, |
2740 | "Subsystem does not support multiple controllers\n" ); |
2741 | return false; |
2742 | } |
2743 | |
2744 | return true; |
2745 | } |
2746 | |
2747 | static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) |
2748 | { |
2749 | struct nvme_subsystem *subsys, *found; |
2750 | int ret; |
2751 | |
2752 | subsys = kzalloc(size: sizeof(*subsys), GFP_KERNEL); |
2753 | if (!subsys) |
2754 | return -ENOMEM; |
2755 | |
2756 | subsys->instance = -1; |
2757 | mutex_init(&subsys->lock); |
2758 | kref_init(kref: &subsys->ref); |
2759 | INIT_LIST_HEAD(list: &subsys->ctrls); |
2760 | INIT_LIST_HEAD(list: &subsys->nsheads); |
2761 | nvme_init_subnqn(subsys, ctrl, id); |
2762 | memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); |
2763 | memcpy(subsys->model, id->mn, sizeof(subsys->model)); |
2764 | subsys->vendor_id = le16_to_cpu(id->vid); |
2765 | subsys->cmic = id->cmic; |
2766 | |
2767 | /* Versions prior to 1.4 don't necessarily report a valid type */ |
2768 | if (id->cntrltype == NVME_CTRL_DISC || |
2769 | !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) |
2770 | subsys->subtype = NVME_NQN_DISC; |
2771 | else |
2772 | subsys->subtype = NVME_NQN_NVME; |
2773 | |
2774 | if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { |
2775 | dev_err(ctrl->device, |
2776 | "Subsystem %s is not a discovery controller" , |
2777 | subsys->subnqn); |
2778 | kfree(objp: subsys); |
2779 | return -EINVAL; |
2780 | } |
2781 | subsys->awupf = le16_to_cpu(id->awupf); |
2782 | nvme_mpath_default_iopolicy(subsys); |
2783 | |
2784 | subsys->dev.class = nvme_subsys_class; |
2785 | subsys->dev.release = nvme_release_subsystem; |
2786 | subsys->dev.groups = nvme_subsys_attrs_groups; |
2787 | dev_set_name(dev: &subsys->dev, name: "nvme-subsys%d" , ctrl->instance); |
2788 | device_initialize(dev: &subsys->dev); |
2789 | |
2790 | mutex_lock(&nvme_subsystems_lock); |
2791 | found = __nvme_find_get_subsystem(subsysnqn: subsys->subnqn); |
2792 | if (found) { |
2793 | put_device(dev: &subsys->dev); |
2794 | subsys = found; |
2795 | |
2796 | if (!nvme_validate_cntlid(subsys, ctrl, id)) { |
2797 | ret = -EINVAL; |
2798 | goto out_put_subsystem; |
2799 | } |
2800 | } else { |
2801 | ret = device_add(dev: &subsys->dev); |
2802 | if (ret) { |
2803 | dev_err(ctrl->device, |
2804 | "failed to register subsystem device.\n" ); |
2805 | put_device(dev: &subsys->dev); |
2806 | goto out_unlock; |
2807 | } |
2808 | ida_init(ida: &subsys->ns_ida); |
2809 | list_add_tail(new: &subsys->entry, head: &nvme_subsystems); |
2810 | } |
2811 | |
2812 | ret = sysfs_create_link(kobj: &subsys->dev.kobj, target: &ctrl->device->kobj, |
2813 | name: dev_name(dev: ctrl->device)); |
2814 | if (ret) { |
2815 | dev_err(ctrl->device, |
2816 | "failed to create sysfs link from subsystem.\n" ); |
2817 | goto out_put_subsystem; |
2818 | } |
2819 | |
2820 | if (!found) |
2821 | subsys->instance = ctrl->instance; |
2822 | ctrl->subsys = subsys; |
2823 | list_add_tail(new: &ctrl->subsys_entry, head: &subsys->ctrls); |
2824 | mutex_unlock(lock: &nvme_subsystems_lock); |
2825 | return 0; |
2826 | |
2827 | out_put_subsystem: |
2828 | nvme_put_subsystem(subsys); |
2829 | out_unlock: |
2830 | mutex_unlock(lock: &nvme_subsystems_lock); |
2831 | return ret; |
2832 | } |
2833 | |
2834 | int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, |
2835 | void *log, size_t size, u64 offset) |
2836 | { |
2837 | struct nvme_command c = { }; |
2838 | u32 dwlen = nvme_bytes_to_numd(len: size); |
2839 | |
2840 | c.get_log_page.opcode = nvme_admin_get_log_page; |
2841 | c.get_log_page.nsid = cpu_to_le32(nsid); |
2842 | c.get_log_page.lid = log_page; |
2843 | c.get_log_page.lsp = lsp; |
2844 | c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); |
2845 | c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); |
2846 | c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); |
2847 | c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); |
2848 | c.get_log_page.csi = csi; |
2849 | |
2850 | return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); |
2851 | } |
2852 | |
2853 | static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, |
2854 | struct nvme_effects_log **log) |
2855 | { |
2856 | struct nvme_effects_log *cel = xa_load(&ctrl->cels, index: csi); |
2857 | int ret; |
2858 | |
2859 | if (cel) |
2860 | goto out; |
2861 | |
2862 | cel = kzalloc(size: sizeof(*cel), GFP_KERNEL); |
2863 | if (!cel) |
2864 | return -ENOMEM; |
2865 | |
2866 | ret = nvme_get_log(ctrl, nsid: 0x00, log_page: NVME_LOG_CMD_EFFECTS, lsp: 0, csi, |
2867 | log: cel, size: sizeof(*cel), offset: 0); |
2868 | if (ret) { |
2869 | kfree(objp: cel); |
2870 | return ret; |
2871 | } |
2872 | |
2873 | xa_store(&ctrl->cels, index: csi, entry: cel, GFP_KERNEL); |
2874 | out: |
2875 | *log = cel; |
2876 | return 0; |
2877 | } |
2878 | |
2879 | static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) |
2880 | { |
2881 | u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; |
2882 | |
2883 | if (check_shl_overflow(1U, units + page_shift - 9, &val)) |
2884 | return UINT_MAX; |
2885 | return val; |
2886 | } |
2887 | |
2888 | static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) |
2889 | { |
2890 | struct nvme_command c = { }; |
2891 | struct nvme_id_ctrl_nvm *id; |
2892 | int ret; |
2893 | |
2894 | if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { |
2895 | ctrl->max_discard_sectors = UINT_MAX; |
2896 | ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; |
2897 | } else { |
2898 | ctrl->max_discard_sectors = 0; |
2899 | ctrl->max_discard_segments = 0; |
2900 | } |
2901 | |
2902 | /* |
2903 | * Even though NVMe spec explicitly states that MDTS is not applicable |
2904 | * to the write-zeroes, we are cautious and limit the size to the |
2905 | * controllers max_hw_sectors value, which is based on the MDTS field |
2906 | * and possibly other limiting factors. |
2907 | */ |
2908 | if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && |
2909 | !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) |
2910 | ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; |
2911 | else |
2912 | ctrl->max_zeroes_sectors = 0; |
2913 | |
2914 | if (ctrl->subsys->subtype != NVME_NQN_NVME || |
2915 | nvme_ctrl_limited_cns(ctrl) || |
2916 | test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) |
2917 | return 0; |
2918 | |
2919 | id = kzalloc(size: sizeof(*id), GFP_KERNEL); |
2920 | if (!id) |
2921 | return -ENOMEM; |
2922 | |
2923 | c.identify.opcode = nvme_admin_identify; |
2924 | c.identify.cns = NVME_ID_CNS_CS_CTRL; |
2925 | c.identify.csi = NVME_CSI_NVM; |
2926 | |
2927 | ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); |
2928 | if (ret) |
2929 | goto free_data; |
2930 | |
2931 | if (id->dmrl) |
2932 | ctrl->max_discard_segments = id->dmrl; |
2933 | ctrl->dmrsl = le32_to_cpu(id->dmrsl); |
2934 | if (id->wzsl) |
2935 | ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, units: id->wzsl); |
2936 | |
2937 | free_data: |
2938 | if (ret > 0) |
2939 | set_bit(nr: NVME_CTRL_SKIP_ID_CNS_CS, addr: &ctrl->flags); |
2940 | kfree(objp: id); |
2941 | return ret; |
2942 | } |
2943 | |
2944 | static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) |
2945 | { |
2946 | struct nvme_effects_log *log = ctrl->effects; |
2947 | |
2948 | log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | |
2949 | NVME_CMD_EFFECTS_NCC | |
2950 | NVME_CMD_EFFECTS_CSE_MASK); |
2951 | log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | |
2952 | NVME_CMD_EFFECTS_CSE_MASK); |
2953 | |
2954 | /* |
2955 | * The spec says the result of a security receive command depends on |
2956 | * the previous security send command. As such, many vendors log this |
2957 | * command as one to submitted only when no other commands to the same |
2958 | * namespace are outstanding. The intention is to tell the host to |
2959 | * prevent mixing security send and receive. |
2960 | * |
2961 | * This driver can only enforce such exclusive access against IO |
2962 | * queues, though. We are not readily able to enforce such a rule for |
2963 | * two commands to the admin queue, which is the only queue that |
2964 | * matters for this command. |
2965 | * |
2966 | * Rather than blindly freezing the IO queues for this effect that |
2967 | * doesn't even apply to IO, mask it off. |
2968 | */ |
2969 | log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); |
2970 | |
2971 | log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); |
2972 | log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); |
2973 | log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); |
2974 | } |
2975 | |
2976 | static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) |
2977 | { |
2978 | int ret = 0; |
2979 | |
2980 | if (ctrl->effects) |
2981 | return 0; |
2982 | |
2983 | if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { |
2984 | ret = nvme_get_effects_log(ctrl, csi: NVME_CSI_NVM, log: &ctrl->effects); |
2985 | if (ret < 0) |
2986 | return ret; |
2987 | } |
2988 | |
2989 | if (!ctrl->effects) { |
2990 | ctrl->effects = kzalloc(size: sizeof(*ctrl->effects), GFP_KERNEL); |
2991 | if (!ctrl->effects) |
2992 | return -ENOMEM; |
2993 | xa_store(&ctrl->cels, index: NVME_CSI_NVM, entry: ctrl->effects, GFP_KERNEL); |
2994 | } |
2995 | |
2996 | nvme_init_known_nvm_effects(ctrl); |
2997 | return 0; |
2998 | } |
2999 | |
3000 | static int nvme_init_identify(struct nvme_ctrl *ctrl) |
3001 | { |
3002 | struct nvme_id_ctrl *id; |
3003 | u32 max_hw_sectors; |
3004 | bool prev_apst_enabled; |
3005 | int ret; |
3006 | |
3007 | ret = nvme_identify_ctrl(dev: ctrl, id: &id); |
3008 | if (ret) { |
3009 | dev_err(ctrl->device, "Identify Controller failed (%d)\n" , ret); |
3010 | return -EIO; |
3011 | } |
3012 | |
3013 | if (!(ctrl->ops->flags & NVME_F_FABRICS)) |
3014 | ctrl->cntlid = le16_to_cpu(id->cntlid); |
3015 | |
3016 | if (!ctrl->identified) { |
3017 | unsigned int i; |
3018 | |
3019 | /* |
3020 | * Check for quirks. Quirk can depend on firmware version, |
3021 | * so, in principle, the set of quirks present can change |
3022 | * across a reset. As a possible future enhancement, we |
3023 | * could re-scan for quirks every time we reinitialize |
3024 | * the device, but we'd have to make sure that the driver |
3025 | * behaves intelligently if the quirks change. |
3026 | */ |
3027 | for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { |
3028 | if (quirk_matches(id, q: &core_quirks[i])) |
3029 | ctrl->quirks |= core_quirks[i].quirks; |
3030 | } |
3031 | |
3032 | ret = nvme_init_subsystem(ctrl, id); |
3033 | if (ret) |
3034 | goto out_free; |
3035 | |
3036 | ret = nvme_init_effects(ctrl, id); |
3037 | if (ret) |
3038 | goto out_free; |
3039 | } |
3040 | memcpy(ctrl->subsys->firmware_rev, id->fr, |
3041 | sizeof(ctrl->subsys->firmware_rev)); |
3042 | |
3043 | if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { |
3044 | dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n" ); |
3045 | ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; |
3046 | } |
3047 | |
3048 | ctrl->crdt[0] = le16_to_cpu(id->crdt1); |
3049 | ctrl->crdt[1] = le16_to_cpu(id->crdt2); |
3050 | ctrl->crdt[2] = le16_to_cpu(id->crdt3); |
3051 | |
3052 | ctrl->oacs = le16_to_cpu(id->oacs); |
3053 | ctrl->oncs = le16_to_cpu(id->oncs); |
3054 | ctrl->mtfa = le16_to_cpu(id->mtfa); |
3055 | ctrl->oaes = le32_to_cpu(id->oaes); |
3056 | ctrl->wctemp = le16_to_cpu(id->wctemp); |
3057 | ctrl->cctemp = le16_to_cpu(id->cctemp); |
3058 | |
3059 | atomic_set(v: &ctrl->abort_limit, i: id->acl + 1); |
3060 | ctrl->vwc = id->vwc; |
3061 | if (id->mdts) |
3062 | max_hw_sectors = nvme_mps_to_sectors(ctrl, units: id->mdts); |
3063 | else |
3064 | max_hw_sectors = UINT_MAX; |
3065 | ctrl->max_hw_sectors = |
3066 | min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); |
3067 | |
3068 | nvme_set_queue_limits(ctrl, q: ctrl->admin_q); |
3069 | ctrl->sgls = le32_to_cpu(id->sgls); |
3070 | ctrl->kas = le16_to_cpu(id->kas); |
3071 | ctrl->max_namespaces = le32_to_cpu(id->mnan); |
3072 | ctrl->ctratt = le32_to_cpu(id->ctratt); |
3073 | |
3074 | ctrl->cntrltype = id->cntrltype; |
3075 | ctrl->dctype = id->dctype; |
3076 | |
3077 | if (id->rtd3e) { |
3078 | /* us -> s */ |
3079 | u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; |
3080 | |
3081 | ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, |
3082 | shutdown_timeout, 60); |
3083 | |
3084 | if (ctrl->shutdown_timeout != shutdown_timeout) |
3085 | dev_info(ctrl->device, |
3086 | "Shutdown timeout set to %u seconds\n" , |
3087 | ctrl->shutdown_timeout); |
3088 | } else |
3089 | ctrl->shutdown_timeout = shutdown_timeout; |
3090 | |
3091 | ctrl->npss = id->npss; |
3092 | ctrl->apsta = id->apsta; |
3093 | prev_apst_enabled = ctrl->apst_enabled; |
3094 | if (ctrl->quirks & NVME_QUIRK_NO_APST) { |
3095 | if (force_apst && id->apsta) { |
3096 | dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n" ); |
3097 | ctrl->apst_enabled = true; |
3098 | } else { |
3099 | ctrl->apst_enabled = false; |
3100 | } |
3101 | } else { |
3102 | ctrl->apst_enabled = id->apsta; |
3103 | } |
3104 | memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); |
3105 | |
3106 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
3107 | ctrl->icdoff = le16_to_cpu(id->icdoff); |
3108 | ctrl->ioccsz = le32_to_cpu(id->ioccsz); |
3109 | ctrl->iorcsz = le32_to_cpu(id->iorcsz); |
3110 | ctrl->maxcmd = le16_to_cpu(id->maxcmd); |
3111 | |
3112 | /* |
3113 | * In fabrics we need to verify the cntlid matches the |
3114 | * admin connect |
3115 | */ |
3116 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { |
3117 | dev_err(ctrl->device, |
3118 | "Mismatching cntlid: Connect %u vs Identify " |
3119 | "%u, rejecting\n" , |
3120 | ctrl->cntlid, le16_to_cpu(id->cntlid)); |
3121 | ret = -EINVAL; |
3122 | goto out_free; |
3123 | } |
3124 | |
3125 | if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { |
3126 | dev_err(ctrl->device, |
3127 | "keep-alive support is mandatory for fabrics\n" ); |
3128 | ret = -EINVAL; |
3129 | goto out_free; |
3130 | } |
3131 | } else { |
3132 | ctrl->hmpre = le32_to_cpu(id->hmpre); |
3133 | ctrl->hmmin = le32_to_cpu(id->hmmin); |
3134 | ctrl->hmminds = le32_to_cpu(id->hmminds); |
3135 | ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); |
3136 | } |
3137 | |
3138 | ret = nvme_mpath_init_identify(ctrl, id); |
3139 | if (ret < 0) |
3140 | goto out_free; |
3141 | |
3142 | if (ctrl->apst_enabled && !prev_apst_enabled) |
3143 | dev_pm_qos_expose_latency_tolerance(dev: ctrl->device); |
3144 | else if (!ctrl->apst_enabled && prev_apst_enabled) |
3145 | dev_pm_qos_hide_latency_tolerance(dev: ctrl->device); |
3146 | |
3147 | out_free: |
3148 | kfree(objp: id); |
3149 | return ret; |
3150 | } |
3151 | |
3152 | /* |
3153 | * Initialize the cached copies of the Identify data and various controller |
3154 | * register in our nvme_ctrl structure. This should be called as soon as |
3155 | * the admin queue is fully up and running. |
3156 | */ |
3157 | int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) |
3158 | { |
3159 | int ret; |
3160 | |
3161 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); |
3162 | if (ret) { |
3163 | dev_err(ctrl->device, "Reading VS failed (%d)\n" , ret); |
3164 | return ret; |
3165 | } |
3166 | |
3167 | ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); |
3168 | |
3169 | if (ctrl->vs >= NVME_VS(1, 1, 0)) |
3170 | ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); |
3171 | |
3172 | ret = nvme_init_identify(ctrl); |
3173 | if (ret) |
3174 | return ret; |
3175 | |
3176 | ret = nvme_configure_apst(ctrl); |
3177 | if (ret < 0) |
3178 | return ret; |
3179 | |
3180 | ret = nvme_configure_timestamp(ctrl); |
3181 | if (ret < 0) |
3182 | return ret; |
3183 | |
3184 | ret = nvme_configure_host_options(ctrl); |
3185 | if (ret < 0) |
3186 | return ret; |
3187 | |
3188 | nvme_configure_opal(ctrl, was_suspended); |
3189 | |
3190 | if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { |
3191 | /* |
3192 | * Do not return errors unless we are in a controller reset, |
3193 | * the controller works perfectly fine without hwmon. |
3194 | */ |
3195 | ret = nvme_hwmon_init(ctrl); |
3196 | if (ret == -EINTR) |
3197 | return ret; |
3198 | } |
3199 | |
3200 | clear_bit(nr: NVME_CTRL_DIRTY_CAPABILITY, addr: &ctrl->flags); |
3201 | ctrl->identified = true; |
3202 | |
3203 | return 0; |
3204 | } |
3205 | EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); |
3206 | |
3207 | static int nvme_dev_open(struct inode *inode, struct file *file) |
3208 | { |
3209 | struct nvme_ctrl *ctrl = |
3210 | container_of(inode->i_cdev, struct nvme_ctrl, cdev); |
3211 | |
3212 | switch (ctrl->state) { |
3213 | case NVME_CTRL_LIVE: |
3214 | break; |
3215 | default: |
3216 | return -EWOULDBLOCK; |
3217 | } |
3218 | |
3219 | nvme_get_ctrl(ctrl); |
3220 | if (!try_module_get(module: ctrl->ops->module)) { |
3221 | nvme_put_ctrl(ctrl); |
3222 | return -EINVAL; |
3223 | } |
3224 | |
3225 | file->private_data = ctrl; |
3226 | return 0; |
3227 | } |
3228 | |
3229 | static int nvme_dev_release(struct inode *inode, struct file *file) |
3230 | { |
3231 | struct nvme_ctrl *ctrl = |
3232 | container_of(inode->i_cdev, struct nvme_ctrl, cdev); |
3233 | |
3234 | module_put(module: ctrl->ops->module); |
3235 | nvme_put_ctrl(ctrl); |
3236 | return 0; |
3237 | } |
3238 | |
3239 | static const struct file_operations nvme_dev_fops = { |
3240 | .owner = THIS_MODULE, |
3241 | .open = nvme_dev_open, |
3242 | .release = nvme_dev_release, |
3243 | .unlocked_ioctl = nvme_dev_ioctl, |
3244 | .compat_ioctl = compat_ptr_ioctl, |
3245 | .uring_cmd = nvme_dev_uring_cmd, |
3246 | }; |
3247 | |
3248 | static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, |
3249 | unsigned nsid) |
3250 | { |
3251 | struct nvme_ns_head *h; |
3252 | |
3253 | lockdep_assert_held(&ctrl->subsys->lock); |
3254 | |
3255 | list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { |
3256 | /* |
3257 | * Private namespaces can share NSIDs under some conditions. |
3258 | * In that case we can't use the same ns_head for namespaces |
3259 | * with the same NSID. |
3260 | */ |
3261 | if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, head: h)) |
3262 | continue; |
3263 | if (!list_empty(head: &h->list) && nvme_tryget_ns_head(head: h)) |
3264 | return h; |
3265 | } |
3266 | |
3267 | return NULL; |
3268 | } |
3269 | |
3270 | static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, |
3271 | struct nvme_ns_ids *ids) |
3272 | { |
3273 | bool has_uuid = !uuid_is_null(uuid: &ids->uuid); |
3274 | bool has_nguid = memchr_inv(p: ids->nguid, c: 0, size: sizeof(ids->nguid)); |
3275 | bool has_eui64 = memchr_inv(p: ids->eui64, c: 0, size: sizeof(ids->eui64)); |
3276 | struct nvme_ns_head *h; |
3277 | |
3278 | lockdep_assert_held(&subsys->lock); |
3279 | |
3280 | list_for_each_entry(h, &subsys->nsheads, entry) { |
3281 | if (has_uuid && uuid_equal(u1: &ids->uuid, u2: &h->ids.uuid)) |
3282 | return -EINVAL; |
3283 | if (has_nguid && |
3284 | memcmp(p: &ids->nguid, q: &h->ids.nguid, size: sizeof(ids->nguid)) == 0) |
3285 | return -EINVAL; |
3286 | if (has_eui64 && |
3287 | memcmp(p: &ids->eui64, q: &h->ids.eui64, size: sizeof(ids->eui64)) == 0) |
3288 | return -EINVAL; |
3289 | } |
3290 | |
3291 | return 0; |
3292 | } |
3293 | |
3294 | static void nvme_cdev_rel(struct device *dev) |
3295 | { |
3296 | ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); |
3297 | } |
3298 | |
3299 | void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) |
3300 | { |
3301 | cdev_device_del(cdev, dev: cdev_device); |
3302 | put_device(dev: cdev_device); |
3303 | } |
3304 | |
3305 | int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, |
3306 | const struct file_operations *fops, struct module *owner) |
3307 | { |
3308 | int minor, ret; |
3309 | |
3310 | minor = ida_alloc(ida: &nvme_ns_chr_minor_ida, GFP_KERNEL); |
3311 | if (minor < 0) |
3312 | return minor; |
3313 | cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); |
3314 | cdev_device->class = nvme_ns_chr_class; |
3315 | cdev_device->release = nvme_cdev_rel; |
3316 | device_initialize(dev: cdev_device); |
3317 | cdev_init(cdev, fops); |
3318 | cdev->owner = owner; |
3319 | ret = cdev_device_add(cdev, dev: cdev_device); |
3320 | if (ret) |
3321 | put_device(dev: cdev_device); |
3322 | |
3323 | return ret; |
3324 | } |
3325 | |
3326 | static int nvme_ns_chr_open(struct inode *inode, struct file *file) |
3327 | { |
3328 | return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); |
3329 | } |
3330 | |
3331 | static int nvme_ns_chr_release(struct inode *inode, struct file *file) |
3332 | { |
3333 | nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); |
3334 | return 0; |
3335 | } |
3336 | |
3337 | static const struct file_operations nvme_ns_chr_fops = { |
3338 | .owner = THIS_MODULE, |
3339 | .open = nvme_ns_chr_open, |
3340 | .release = nvme_ns_chr_release, |
3341 | .unlocked_ioctl = nvme_ns_chr_ioctl, |
3342 | .compat_ioctl = compat_ptr_ioctl, |
3343 | .uring_cmd = nvme_ns_chr_uring_cmd, |
3344 | .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, |
3345 | }; |
3346 | |
3347 | static int nvme_add_ns_cdev(struct nvme_ns *ns) |
3348 | { |
3349 | int ret; |
3350 | |
3351 | ns->cdev_device.parent = ns->ctrl->device; |
3352 | ret = dev_set_name(dev: &ns->cdev_device, name: "ng%dn%d" , |
3353 | ns->ctrl->instance, ns->head->instance); |
3354 | if (ret) |
3355 | return ret; |
3356 | |
3357 | return nvme_cdev_add(cdev: &ns->cdev, cdev_device: &ns->cdev_device, fops: &nvme_ns_chr_fops, |
3358 | owner: ns->ctrl->ops->module); |
3359 | } |
3360 | |
3361 | static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, |
3362 | struct nvme_ns_info *info) |
3363 | { |
3364 | struct nvme_ns_head *head; |
3365 | size_t size = sizeof(*head); |
3366 | int ret = -ENOMEM; |
3367 | |
3368 | #ifdef CONFIG_NVME_MULTIPATH |
3369 | size += num_possible_nodes() * sizeof(struct nvme_ns *); |
3370 | #endif |
3371 | |
3372 | head = kzalloc(size, GFP_KERNEL); |
3373 | if (!head) |
3374 | goto out; |
3375 | ret = ida_alloc_min(ida: &ctrl->subsys->ns_ida, min: 1, GFP_KERNEL); |
3376 | if (ret < 0) |
3377 | goto out_free_head; |
3378 | head->instance = ret; |
3379 | INIT_LIST_HEAD(list: &head->list); |
3380 | ret = init_srcu_struct(&head->srcu); |
3381 | if (ret) |
3382 | goto out_ida_remove; |
3383 | head->subsys = ctrl->subsys; |
3384 | head->ns_id = info->nsid; |
3385 | head->ids = info->ids; |
3386 | head->shared = info->is_shared; |
3387 | kref_init(kref: &head->ref); |
3388 | |
3389 | if (head->ids.csi) { |
3390 | ret = nvme_get_effects_log(ctrl, csi: head->ids.csi, log: &head->effects); |
3391 | if (ret) |
3392 | goto out_cleanup_srcu; |
3393 | } else |
3394 | head->effects = ctrl->effects; |
3395 | |
3396 | ret = nvme_mpath_alloc_disk(ctrl, head); |
3397 | if (ret) |
3398 | goto out_cleanup_srcu; |
3399 | |
3400 | list_add_tail(new: &head->entry, head: &ctrl->subsys->nsheads); |
3401 | |
3402 | kref_get(kref: &ctrl->subsys->ref); |
3403 | |
3404 | return head; |
3405 | out_cleanup_srcu: |
3406 | cleanup_srcu_struct(ssp: &head->srcu); |
3407 | out_ida_remove: |
3408 | ida_free(&ctrl->subsys->ns_ida, id: head->instance); |
3409 | out_free_head: |
3410 | kfree(objp: head); |
3411 | out: |
3412 | if (ret > 0) |
3413 | ret = blk_status_to_errno(status: nvme_error_status(status: ret)); |
3414 | return ERR_PTR(error: ret); |
3415 | } |
3416 | |
3417 | static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, |
3418 | struct nvme_ns_ids *ids) |
3419 | { |
3420 | struct nvme_subsystem *s; |
3421 | int ret = 0; |
3422 | |
3423 | /* |
3424 | * Note that this check is racy as we try to avoid holding the global |
3425 | * lock over the whole ns_head creation. But it is only intended as |
3426 | * a sanity check anyway. |
3427 | */ |
3428 | mutex_lock(&nvme_subsystems_lock); |
3429 | list_for_each_entry(s, &nvme_subsystems, entry) { |
3430 | if (s == this) |
3431 | continue; |
3432 | mutex_lock(&s->lock); |
3433 | ret = nvme_subsys_check_duplicate_ids(subsys: s, ids); |
3434 | mutex_unlock(lock: &s->lock); |
3435 | if (ret) |
3436 | break; |
3437 | } |
3438 | mutex_unlock(lock: &nvme_subsystems_lock); |
3439 | |
3440 | return ret; |
3441 | } |
3442 | |
3443 | static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) |
3444 | { |
3445 | struct nvme_ctrl *ctrl = ns->ctrl; |
3446 | struct nvme_ns_head *head = NULL; |
3447 | int ret; |
3448 | |
3449 | ret = nvme_global_check_duplicate_ids(this: ctrl->subsys, ids: &info->ids); |
3450 | if (ret) { |
3451 | /* |
3452 | * We've found two different namespaces on two different |
3453 | * subsystems that report the same ID. This is pretty nasty |
3454 | * for anything that actually requires unique device |
3455 | * identification. In the kernel we need this for multipathing, |
3456 | * and in user space the /dev/disk/by-id/ links rely on it. |
3457 | * |
3458 | * If the device also claims to be multi-path capable back off |
3459 | * here now and refuse the probe the second device as this is a |
3460 | * recipe for data corruption. If not this is probably a |
3461 | * cheap consumer device if on the PCIe bus, so let the user |
3462 | * proceed and use the shiny toy, but warn that with changing |
3463 | * probing order (which due to our async probing could just be |
3464 | * device taking longer to startup) the other device could show |
3465 | * up at any time. |
3466 | */ |
3467 | nvme_print_device_info(ctrl); |
3468 | if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ |
3469 | ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && |
3470 | info->is_shared)) { |
3471 | dev_err(ctrl->device, |
3472 | "ignoring nsid %d because of duplicate IDs\n" , |
3473 | info->nsid); |
3474 | return ret; |
3475 | } |
3476 | |
3477 | dev_err(ctrl->device, |
3478 | "clearing duplicate IDs for nsid %d\n" , info->nsid); |
3479 | dev_err(ctrl->device, |
3480 | "use of /dev/disk/by-id/ may cause data corruption\n" ); |
3481 | memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); |
3482 | memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); |
3483 | memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); |
3484 | ctrl->quirks |= NVME_QUIRK_BOGUS_NID; |
3485 | } |
3486 | |
3487 | mutex_lock(&ctrl->subsys->lock); |
3488 | head = nvme_find_ns_head(ctrl, nsid: info->nsid); |
3489 | if (!head) { |
3490 | ret = nvme_subsys_check_duplicate_ids(subsys: ctrl->subsys, ids: &info->ids); |
3491 | if (ret) { |
3492 | dev_err(ctrl->device, |
3493 | "duplicate IDs in subsystem for nsid %d\n" , |
3494 | info->nsid); |
3495 | goto out_unlock; |
3496 | } |
3497 | head = nvme_alloc_ns_head(ctrl, info); |
3498 | if (IS_ERR(ptr: head)) { |
3499 | ret = PTR_ERR(ptr: head); |
3500 | goto out_unlock; |
3501 | } |
3502 | } else { |
3503 | ret = -EINVAL; |
3504 | if (!info->is_shared || !head->shared) { |
3505 | dev_err(ctrl->device, |
3506 | "Duplicate unshared namespace %d\n" , |
3507 | info->nsid); |
3508 | goto out_put_ns_head; |
3509 | } |
3510 | if (!nvme_ns_ids_equal(a: &head->ids, b: &info->ids)) { |
3511 | dev_err(ctrl->device, |
3512 | "IDs don't match for shared namespace %d\n" , |
3513 | info->nsid); |
3514 | goto out_put_ns_head; |
3515 | } |
3516 | |
3517 | if (!multipath) { |
3518 | dev_warn(ctrl->device, |
3519 | "Found shared namespace %d, but multipathing not supported.\n" , |
3520 | info->nsid); |
3521 | dev_warn_once(ctrl->device, |
3522 | "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n." ); |
3523 | } |
3524 | } |
3525 | |
3526 | list_add_tail_rcu(new: &ns->siblings, head: &head->list); |
3527 | ns->head = head; |
3528 | mutex_unlock(lock: &ctrl->subsys->lock); |
3529 | return 0; |
3530 | |
3531 | out_put_ns_head: |
3532 | nvme_put_ns_head(head); |
3533 | out_unlock: |
3534 | mutex_unlock(lock: &ctrl->subsys->lock); |
3535 | return ret; |
3536 | } |
3537 | |
3538 | struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
3539 | { |
3540 | struct nvme_ns *ns, *ret = NULL; |
3541 | |
3542 | down_read(sem: &ctrl->namespaces_rwsem); |
3543 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
3544 | if (ns->head->ns_id == nsid) { |
3545 | if (!nvme_get_ns(ns)) |
3546 | continue; |
3547 | ret = ns; |
3548 | break; |
3549 | } |
3550 | if (ns->head->ns_id > nsid) |
3551 | break; |
3552 | } |
3553 | up_read(sem: &ctrl->namespaces_rwsem); |
3554 | return ret; |
3555 | } |
3556 | EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); |
3557 | |
3558 | /* |
3559 | * Add the namespace to the controller list while keeping the list ordered. |
3560 | */ |
3561 | static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) |
3562 | { |
3563 | struct nvme_ns *tmp; |
3564 | |
3565 | list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { |
3566 | if (tmp->head->ns_id < ns->head->ns_id) { |
3567 | list_add(new: &ns->list, head: &tmp->list); |
3568 | return; |
3569 | } |
3570 | } |
3571 | list_add(new: &ns->list, head: &ns->ctrl->namespaces); |
3572 | } |
3573 | |
3574 | static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) |
3575 | { |
3576 | struct nvme_ns *ns; |
3577 | struct gendisk *disk; |
3578 | int node = ctrl->numa_node; |
3579 | |
3580 | ns = kzalloc_node(size: sizeof(*ns), GFP_KERNEL, node); |
3581 | if (!ns) |
3582 | return; |
3583 | |
3584 | disk = blk_mq_alloc_disk(ctrl->tagset, ns); |
3585 | if (IS_ERR(ptr: disk)) |
3586 | goto out_free_ns; |
3587 | disk->fops = &nvme_bdev_ops; |
3588 | disk->private_data = ns; |
3589 | |
3590 | ns->disk = disk; |
3591 | ns->queue = disk->queue; |
3592 | |
3593 | if (ctrl->opts && ctrl->opts->data_digest) |
3594 | blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q: ns->queue); |
3595 | |
3596 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q: ns->queue); |
3597 | if (ctrl->ops->supports_pci_p2pdma && |
3598 | ctrl->ops->supports_pci_p2pdma(ctrl)) |
3599 | blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, q: ns->queue); |
3600 | |
3601 | ns->ctrl = ctrl; |
3602 | kref_init(kref: &ns->kref); |
3603 | |
3604 | if (nvme_init_ns_head(ns, info)) |
3605 | goto out_cleanup_disk; |
3606 | |
3607 | /* |
3608 | * If multipathing is enabled, the device name for all disks and not |
3609 | * just those that represent shared namespaces needs to be based on the |
3610 | * subsystem instance. Using the controller instance for private |
3611 | * namespaces could lead to naming collisions between shared and private |
3612 | * namespaces if they don't use a common numbering scheme. |
3613 | * |
3614 | * If multipathing is not enabled, disk names must use the controller |
3615 | * instance as shared namespaces will show up as multiple block |
3616 | * devices. |
3617 | */ |
3618 | if (nvme_ns_head_multipath(head: ns->head)) { |
3619 | sprintf(buf: disk->disk_name, fmt: "nvme%dc%dn%d" , ctrl->subsys->instance, |
3620 | ctrl->instance, ns->head->instance); |
3621 | disk->flags |= GENHD_FL_HIDDEN; |
3622 | } else if (multipath) { |
3623 | sprintf(buf: disk->disk_name, fmt: "nvme%dn%d" , ctrl->subsys->instance, |
3624 | ns->head->instance); |
3625 | } else { |
3626 | sprintf(buf: disk->disk_name, fmt: "nvme%dn%d" , ctrl->instance, |
3627 | ns->head->instance); |
3628 | } |
3629 | |
3630 | if (nvme_update_ns_info(ns, info)) |
3631 | goto out_unlink_ns; |
3632 | |
3633 | down_write(sem: &ctrl->namespaces_rwsem); |
3634 | nvme_ns_add_to_ctrl_list(ns); |
3635 | up_write(sem: &ctrl->namespaces_rwsem); |
3636 | nvme_get_ctrl(ctrl); |
3637 | |
3638 | if (device_add_disk(parent: ctrl->device, disk: ns->disk, groups: nvme_ns_id_attr_groups)) |
3639 | goto out_cleanup_ns_from_list; |
3640 | |
3641 | if (!nvme_ns_head_multipath(head: ns->head)) |
3642 | nvme_add_ns_cdev(ns); |
3643 | |
3644 | nvme_mpath_add_disk(ns, anagrpid: info->anagrpid); |
3645 | nvme_fault_inject_init(fault_inj: &ns->fault_inject, dev_name: ns->disk->disk_name); |
3646 | |
3647 | return; |
3648 | |
3649 | out_cleanup_ns_from_list: |
3650 | nvme_put_ctrl(ctrl); |
3651 | down_write(sem: &ctrl->namespaces_rwsem); |
3652 | list_del_init(entry: &ns->list); |
3653 | up_write(sem: &ctrl->namespaces_rwsem); |
3654 | out_unlink_ns: |
3655 | mutex_lock(&ctrl->subsys->lock); |
3656 | list_del_rcu(entry: &ns->siblings); |
3657 | if (list_empty(head: &ns->head->list)) |
3658 | list_del_init(entry: &ns->head->entry); |
3659 | mutex_unlock(lock: &ctrl->subsys->lock); |
3660 | nvme_put_ns_head(head: ns->head); |
3661 | out_cleanup_disk: |
3662 | put_disk(disk); |
3663 | out_free_ns: |
3664 | kfree(objp: ns); |
3665 | } |
3666 | |
3667 | static void nvme_ns_remove(struct nvme_ns *ns) |
3668 | { |
3669 | bool last_path = false; |
3670 | |
3671 | if (test_and_set_bit(NVME_NS_REMOVING, addr: &ns->flags)) |
3672 | return; |
3673 | |
3674 | clear_bit(NVME_NS_READY, addr: &ns->flags); |
3675 | set_capacity(disk: ns->disk, size: 0); |
3676 | nvme_fault_inject_fini(fault_inject: &ns->fault_inject); |
3677 | |
3678 | /* |
3679 | * Ensure that !NVME_NS_READY is seen by other threads to prevent |
3680 | * this ns going back into current_path. |
3681 | */ |
3682 | synchronize_srcu(ssp: &ns->head->srcu); |
3683 | |
3684 | /* wait for concurrent submissions */ |
3685 | if (nvme_mpath_clear_current_path(ns)) |
3686 | synchronize_srcu(ssp: &ns->head->srcu); |
3687 | |
3688 | mutex_lock(&ns->ctrl->subsys->lock); |
3689 | list_del_rcu(entry: &ns->siblings); |
3690 | if (list_empty(head: &ns->head->list)) { |
3691 | list_del_init(entry: &ns->head->entry); |
3692 | last_path = true; |
3693 | } |
3694 | mutex_unlock(lock: &ns->ctrl->subsys->lock); |
3695 | |
3696 | /* guarantee not available in head->list */ |
3697 | synchronize_srcu(ssp: &ns->head->srcu); |
3698 | |
3699 | if (!nvme_ns_head_multipath(head: ns->head)) |
3700 | nvme_cdev_del(cdev: &ns->cdev, cdev_device: &ns->cdev_device); |
3701 | del_gendisk(gp: ns->disk); |
3702 | |
3703 | down_write(sem: &ns->ctrl->namespaces_rwsem); |
3704 | list_del_init(entry: &ns->list); |
3705 | up_write(sem: &ns->ctrl->namespaces_rwsem); |
3706 | |
3707 | if (last_path) |
3708 | nvme_mpath_shutdown_disk(head: ns->head); |
3709 | nvme_put_ns(ns); |
3710 | } |
3711 | |
3712 | static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) |
3713 | { |
3714 | struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); |
3715 | |
3716 | if (ns) { |
3717 | nvme_ns_remove(ns); |
3718 | nvme_put_ns(ns); |
3719 | } |
3720 | } |
3721 | |
3722 | static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) |
3723 | { |
3724 | int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; |
3725 | |
3726 | if (!nvme_ns_ids_equal(a: &ns->head->ids, b: &info->ids)) { |
3727 | dev_err(ns->ctrl->device, |
3728 | "identifiers changed for nsid %d\n" , ns->head->ns_id); |
3729 | goto out; |
3730 | } |
3731 | |
3732 | ret = nvme_update_ns_info(ns, info); |
3733 | out: |
3734 | /* |
3735 | * Only remove the namespace if we got a fatal error back from the |
3736 | * device, otherwise ignore the error and just move on. |
3737 | * |
3738 | * TODO: we should probably schedule a delayed retry here. |
3739 | */ |
3740 | if (ret > 0 && (ret & NVME_SC_DNR)) |
3741 | nvme_ns_remove(ns); |
3742 | } |
3743 | |
3744 | static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
3745 | { |
3746 | struct nvme_ns_info info = { .nsid = nsid }; |
3747 | struct nvme_ns *ns; |
3748 | int ret; |
3749 | |
3750 | if (nvme_identify_ns_descs(ctrl, info: &info)) |
3751 | return; |
3752 | |
3753 | if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { |
3754 | dev_warn(ctrl->device, |
3755 | "command set not reported for nsid: %d\n" , nsid); |
3756 | return; |
3757 | } |
3758 | |
3759 | /* |
3760 | * If available try to use the Command Set Idependent Identify Namespace |
3761 | * data structure to find all the generic information that is needed to |
3762 | * set up a namespace. If not fall back to the legacy version. |
3763 | */ |
3764 | if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || |
3765 | (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) |
3766 | ret = nvme_ns_info_from_id_cs_indep(ctrl, info: &info); |
3767 | else |
3768 | ret = nvme_ns_info_from_identify(ctrl, info: &info); |
3769 | |
3770 | if (info.is_removed) |
3771 | nvme_ns_remove_by_nsid(ctrl, nsid); |
3772 | |
3773 | /* |
3774 | * Ignore the namespace if it is not ready. We will get an AEN once it |
3775 | * becomes ready and restart the scan. |
3776 | */ |
3777 | if (ret || !info.is_ready) |
3778 | return; |
3779 | |
3780 | ns = nvme_find_get_ns(ctrl, nsid); |
3781 | if (ns) { |
3782 | nvme_validate_ns(ns, info: &info); |
3783 | nvme_put_ns(ns); |
3784 | } else { |
3785 | nvme_alloc_ns(ctrl, info: &info); |
3786 | } |
3787 | } |
3788 | |
3789 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, |
3790 | unsigned nsid) |
3791 | { |
3792 | struct nvme_ns *ns, *next; |
3793 | LIST_HEAD(rm_list); |
3794 | |
3795 | down_write(sem: &ctrl->namespaces_rwsem); |
3796 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { |
3797 | if (ns->head->ns_id > nsid) |
3798 | list_move_tail(list: &ns->list, head: &rm_list); |
3799 | } |
3800 | up_write(sem: &ctrl->namespaces_rwsem); |
3801 | |
3802 | list_for_each_entry_safe(ns, next, &rm_list, list) |
3803 | nvme_ns_remove(ns); |
3804 | |
3805 | } |
3806 | |
3807 | static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) |
3808 | { |
3809 | const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); |
3810 | __le32 *ns_list; |
3811 | u32 prev = 0; |
3812 | int ret = 0, i; |
3813 | |
3814 | ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); |
3815 | if (!ns_list) |
3816 | return -ENOMEM; |
3817 | |
3818 | for (;;) { |
3819 | struct nvme_command cmd = { |
3820 | .identify.opcode = nvme_admin_identify, |
3821 | .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, |
3822 | .identify.nsid = cpu_to_le32(prev), |
3823 | }; |
3824 | |
3825 | ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, |
3826 | NVME_IDENTIFY_DATA_SIZE); |
3827 | if (ret) { |
3828 | dev_warn(ctrl->device, |
3829 | "Identify NS List failed (status=0x%x)\n" , ret); |
3830 | goto free; |
3831 | } |
3832 | |
3833 | for (i = 0; i < nr_entries; i++) { |
3834 | u32 nsid = le32_to_cpu(ns_list[i]); |
3835 | |
3836 | if (!nsid) /* end of the list? */ |
3837 | goto out; |
3838 | nvme_scan_ns(ctrl, nsid); |
3839 | while (++prev < nsid) |
3840 | nvme_ns_remove_by_nsid(ctrl, nsid: prev); |
3841 | } |
3842 | } |
3843 | out: |
3844 | nvme_remove_invalid_namespaces(ctrl, nsid: prev); |
3845 | free: |
3846 | kfree(objp: ns_list); |
3847 | return ret; |
3848 | } |
3849 | |
3850 | static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) |
3851 | { |
3852 | struct nvme_id_ctrl *id; |
3853 | u32 nn, i; |
3854 | |
3855 | if (nvme_identify_ctrl(dev: ctrl, id: &id)) |
3856 | return; |
3857 | nn = le32_to_cpu(id->nn); |
3858 | kfree(objp: id); |
3859 | |
3860 | for (i = 1; i <= nn; i++) |
3861 | nvme_scan_ns(ctrl, nsid: i); |
3862 | |
3863 | nvme_remove_invalid_namespaces(ctrl, nsid: nn); |
3864 | } |
3865 | |
3866 | static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) |
3867 | { |
3868 | size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); |
3869 | __le32 *log; |
3870 | int error; |
3871 | |
3872 | log = kzalloc(size: log_size, GFP_KERNEL); |
3873 | if (!log) |
3874 | return; |
3875 | |
3876 | /* |
3877 | * We need to read the log to clear the AEN, but we don't want to rely |
3878 | * on it for the changed namespace information as userspace could have |
3879 | * raced with us in reading the log page, which could cause us to miss |
3880 | * updates. |
3881 | */ |
3882 | error = nvme_get_log(ctrl, NVME_NSID_ALL, log_page: NVME_LOG_CHANGED_NS, lsp: 0, |
3883 | csi: NVME_CSI_NVM, log, size: log_size, offset: 0); |
3884 | if (error) |
3885 | dev_warn(ctrl->device, |
3886 | "reading changed ns log failed: %d\n" , error); |
3887 | |
3888 | kfree(objp: log); |
3889 | } |
3890 | |
3891 | static void nvme_scan_work(struct work_struct *work) |
3892 | { |
3893 | struct nvme_ctrl *ctrl = |
3894 | container_of(work, struct nvme_ctrl, scan_work); |
3895 | int ret; |
3896 | |
3897 | /* No tagset on a live ctrl means IO queues could not created */ |
3898 | if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) |
3899 | return; |
3900 | |
3901 | /* |
3902 | * Identify controller limits can change at controller reset due to |
3903 | * new firmware download, even though it is not common we cannot ignore |
3904 | * such scenario. Controller's non-mdts limits are reported in the unit |
3905 | * of logical blocks that is dependent on the format of attached |
3906 | * namespace. Hence re-read the limits at the time of ns allocation. |
3907 | */ |
3908 | ret = nvme_init_non_mdts_limits(ctrl); |
3909 | if (ret < 0) { |
3910 | dev_warn(ctrl->device, |
3911 | "reading non-mdts-limits failed: %d\n" , ret); |
3912 | return; |
3913 | } |
3914 | |
3915 | if (test_and_clear_bit(nr: NVME_AER_NOTICE_NS_CHANGED, addr: &ctrl->events)) { |
3916 | dev_info(ctrl->device, "rescanning namespaces.\n" ); |
3917 | nvme_clear_changed_ns_log(ctrl); |
3918 | } |
3919 | |
3920 | mutex_lock(&ctrl->scan_lock); |
3921 | if (nvme_ctrl_limited_cns(ctrl)) { |
3922 | nvme_scan_ns_sequential(ctrl); |
3923 | } else { |
3924 | /* |
3925 | * Fall back to sequential scan if DNR is set to handle broken |
3926 | * devices which should support Identify NS List (as per the VS |
3927 | * they report) but don't actually support it. |
3928 | */ |
3929 | ret = nvme_scan_ns_list(ctrl); |
3930 | if (ret > 0 && ret & NVME_SC_DNR) |
3931 | nvme_scan_ns_sequential(ctrl); |
3932 | } |
3933 | mutex_unlock(lock: &ctrl->scan_lock); |
3934 | } |
3935 | |
3936 | /* |
3937 | * This function iterates the namespace list unlocked to allow recovery from |
3938 | * controller failure. It is up to the caller to ensure the namespace list is |
3939 | * not modified by scan work while this function is executing. |
3940 | */ |
3941 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl) |
3942 | { |
3943 | struct nvme_ns *ns, *next; |
3944 | LIST_HEAD(ns_list); |
3945 | |
3946 | /* |
3947 | * make sure to requeue I/O to all namespaces as these |
3948 | * might result from the scan itself and must complete |
3949 | * for the scan_work to make progress |
3950 | */ |
3951 | nvme_mpath_clear_ctrl_paths(ctrl); |
3952 | |
3953 | /* |
3954 | * Unquiesce io queues so any pending IO won't hang, especially |
3955 | * those submitted from scan work |
3956 | */ |
3957 | nvme_unquiesce_io_queues(ctrl); |
3958 | |
3959 | /* prevent racing with ns scanning */ |
3960 | flush_work(work: &ctrl->scan_work); |
3961 | |
3962 | /* |
3963 | * The dead states indicates the controller was not gracefully |
3964 | * disconnected. In that case, we won't be able to flush any data while |
3965 | * removing the namespaces' disks; fail all the queues now to avoid |
3966 | * potentially having to clean up the failed sync later. |
3967 | */ |
3968 | if (ctrl->state == NVME_CTRL_DEAD) |
3969 | nvme_mark_namespaces_dead(ctrl); |
3970 | |
3971 | /* this is a no-op when called from the controller reset handler */ |
3972 | nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); |
3973 | |
3974 | down_write(sem: &ctrl->namespaces_rwsem); |
3975 | list_splice_init(list: &ctrl->namespaces, head: &ns_list); |
3976 | up_write(sem: &ctrl->namespaces_rwsem); |
3977 | |
3978 | list_for_each_entry_safe(ns, next, &ns_list, list) |
3979 | nvme_ns_remove(ns); |
3980 | } |
3981 | EXPORT_SYMBOL_GPL(nvme_remove_namespaces); |
3982 | |
3983 | static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) |
3984 | { |
3985 | const struct nvme_ctrl *ctrl = |
3986 | container_of(dev, struct nvme_ctrl, ctrl_device); |
3987 | struct nvmf_ctrl_options *opts = ctrl->opts; |
3988 | int ret; |
3989 | |
3990 | ret = add_uevent_var(env, format: "NVME_TRTYPE=%s" , ctrl->ops->name); |
3991 | if (ret) |
3992 | return ret; |
3993 | |
3994 | if (opts) { |
3995 | ret = add_uevent_var(env, format: "NVME_TRADDR=%s" , opts->traddr); |
3996 | if (ret) |
3997 | return ret; |
3998 | |
3999 | ret = add_uevent_var(env, format: "NVME_TRSVCID=%s" , |
4000 | opts->trsvcid ?: "none" ); |
4001 | if (ret) |
4002 | return ret; |
4003 | |
4004 | ret = add_uevent_var(env, format: "NVME_HOST_TRADDR=%s" , |
4005 | opts->host_traddr ?: "none" ); |
4006 | if (ret) |
4007 | return ret; |
4008 | |
4009 | ret = add_uevent_var(env, format: "NVME_HOST_IFACE=%s" , |
4010 | opts->host_iface ?: "none" ); |
4011 | } |
4012 | return ret; |
4013 | } |
4014 | |
4015 | static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) |
4016 | { |
4017 | char *envp[2] = { envdata, NULL }; |
4018 | |
4019 | kobject_uevent_env(kobj: &ctrl->device->kobj, action: KOBJ_CHANGE, envp); |
4020 | } |
4021 | |
4022 | static void nvme_aen_uevent(struct nvme_ctrl *ctrl) |
4023 | { |
4024 | char *envp[2] = { NULL, NULL }; |
4025 | u32 aen_result = ctrl->aen_result; |
4026 | |
4027 | ctrl->aen_result = 0; |
4028 | if (!aen_result) |
4029 | return; |
4030 | |
4031 | envp[0] = kasprintf(GFP_KERNEL, fmt: "NVME_AEN=%#08x" , aen_result); |
4032 | if (!envp[0]) |
4033 | return; |
4034 | kobject_uevent_env(kobj: &ctrl->device->kobj, action: KOBJ_CHANGE, envp); |
4035 | kfree(objp: envp[0]); |
4036 | } |
4037 | |
4038 | static void nvme_async_event_work(struct work_struct *work) |
4039 | { |
4040 | struct nvme_ctrl *ctrl = |
4041 | container_of(work, struct nvme_ctrl, async_event_work); |
4042 | |
4043 | nvme_aen_uevent(ctrl); |
4044 | |
4045 | /* |
4046 | * The transport drivers must guarantee AER submission here is safe by |
4047 | * flushing ctrl async_event_work after changing the controller state |
4048 | * from LIVE and before freeing the admin queue. |
4049 | */ |
4050 | if (ctrl->state == NVME_CTRL_LIVE) |
4051 | ctrl->ops->submit_async_event(ctrl); |
4052 | } |
4053 | |
4054 | static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) |
4055 | { |
4056 | |
4057 | u32 csts; |
4058 | |
4059 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) |
4060 | return false; |
4061 | |
4062 | if (csts == ~0) |
4063 | return false; |
4064 | |
4065 | return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); |
4066 | } |
4067 | |
4068 | static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) |
4069 | { |
4070 | struct nvme_fw_slot_info_log *log; |
4071 | |
4072 | log = kmalloc(size: sizeof(*log), GFP_KERNEL); |
4073 | if (!log) |
4074 | return; |
4075 | |
4076 | if (nvme_get_log(ctrl, NVME_NSID_ALL, log_page: NVME_LOG_FW_SLOT, lsp: 0, csi: NVME_CSI_NVM, |
4077 | log, size: sizeof(*log), offset: 0)) |
4078 | dev_warn(ctrl->device, "Get FW SLOT INFO log error\n" ); |
4079 | kfree(objp: log); |
4080 | } |
4081 | |
4082 | static void nvme_fw_act_work(struct work_struct *work) |
4083 | { |
4084 | struct nvme_ctrl *ctrl = container_of(work, |
4085 | struct nvme_ctrl, fw_act_work); |
4086 | unsigned long fw_act_timeout; |
4087 | |
4088 | if (ctrl->mtfa) |
4089 | fw_act_timeout = jiffies + |
4090 | msecs_to_jiffies(m: ctrl->mtfa * 100); |
4091 | else |
4092 | fw_act_timeout = jiffies + |
4093 | msecs_to_jiffies(m: admin_timeout * 1000); |
4094 | |
4095 | nvme_quiesce_io_queues(ctrl); |
4096 | while (nvme_ctrl_pp_status(ctrl)) { |
4097 | if (time_after(jiffies, fw_act_timeout)) { |
4098 | dev_warn(ctrl->device, |
4099 | "Fw activation timeout, reset controller\n" ); |
4100 | nvme_try_sched_reset(ctrl); |
4101 | return; |
4102 | } |
4103 | msleep(msecs: 100); |
4104 | } |
4105 | |
4106 | if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) |
4107 | return; |
4108 | |
4109 | nvme_unquiesce_io_queues(ctrl); |
4110 | /* read FW slot information to clear the AER */ |
4111 | nvme_get_fw_slot_info(ctrl); |
4112 | |
4113 | queue_work(wq: nvme_wq, work: &ctrl->async_event_work); |
4114 | } |
4115 | |
4116 | static u32 nvme_aer_type(u32 result) |
4117 | { |
4118 | return result & 0x7; |
4119 | } |
4120 | |
4121 | static u32 nvme_aer_subtype(u32 result) |
4122 | { |
4123 | return (result & 0xff00) >> 8; |
4124 | } |
4125 | |
4126 | static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) |
4127 | { |
4128 | u32 aer_notice_type = nvme_aer_subtype(result); |
4129 | bool requeue = true; |
4130 | |
4131 | switch (aer_notice_type) { |
4132 | case NVME_AER_NOTICE_NS_CHANGED: |
4133 | set_bit(nr: NVME_AER_NOTICE_NS_CHANGED, addr: &ctrl->events); |
4134 | nvme_queue_scan(ctrl); |
4135 | break; |
4136 | case NVME_AER_NOTICE_FW_ACT_STARTING: |
4137 | /* |
4138 | * We are (ab)using the RESETTING state to prevent subsequent |
4139 | * recovery actions from interfering with the controller's |
4140 | * firmware activation. |
4141 | */ |
4142 | if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { |
4143 | nvme_auth_stop(ctrl); |
4144 | requeue = false; |
4145 | queue_work(wq: nvme_wq, work: &ctrl->fw_act_work); |
4146 | } |
4147 | break; |
4148 | #ifdef CONFIG_NVME_MULTIPATH |
4149 | case NVME_AER_NOTICE_ANA: |
4150 | if (!ctrl->ana_log_buf) |
4151 | break; |
4152 | queue_work(wq: nvme_wq, work: &ctrl->ana_work); |
4153 | break; |
4154 | #endif |
4155 | case NVME_AER_NOTICE_DISC_CHANGED: |
4156 | ctrl->aen_result = result; |
4157 | break; |
4158 | default: |
4159 | dev_warn(ctrl->device, "async event result %08x\n" , result); |
4160 | } |
4161 | return requeue; |
4162 | } |
4163 | |
4164 | static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) |
4165 | { |
4166 | dev_warn(ctrl->device, "resetting controller due to AER\n" ); |
4167 | nvme_reset_ctrl(ctrl); |
4168 | } |
4169 | |
4170 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, |
4171 | volatile union nvme_result *res) |
4172 | { |
4173 | u32 result = le32_to_cpu(res->u32); |
4174 | u32 aer_type = nvme_aer_type(result); |
4175 | u32 aer_subtype = nvme_aer_subtype(result); |
4176 | bool requeue = true; |
4177 | |
4178 | if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) |
4179 | return; |
4180 | |
4181 | trace_nvme_async_event(ctrl, result); |
4182 | switch (aer_type) { |
4183 | case NVME_AER_NOTICE: |
4184 | requeue = nvme_handle_aen_notice(ctrl, result); |
4185 | break; |
4186 | case NVME_AER_ERROR: |
4187 | /* |
4188 | * For a persistent internal error, don't run async_event_work |
4189 | * to submit a new AER. The controller reset will do it. |
4190 | */ |
4191 | if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { |
4192 | nvme_handle_aer_persistent_error(ctrl); |
4193 | return; |
4194 | } |
4195 | fallthrough; |
4196 | case NVME_AER_SMART: |
4197 | case NVME_AER_CSS: |
4198 | case NVME_AER_VS: |
4199 | ctrl->aen_result = result; |
4200 | break; |
4201 | default: |
4202 | break; |
4203 | } |
4204 | |
4205 | if (requeue) |
4206 | queue_work(wq: nvme_wq, work: &ctrl->async_event_work); |
4207 | } |
4208 | EXPORT_SYMBOL_GPL(nvme_complete_async_event); |
4209 | |
4210 | int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, |
4211 | const struct blk_mq_ops *ops, unsigned int cmd_size) |
4212 | { |
4213 | int ret; |
4214 | |
4215 | memset(set, 0, sizeof(*set)); |
4216 | set->ops = ops; |
4217 | set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; |
4218 | if (ctrl->ops->flags & NVME_F_FABRICS) |
4219 | set->reserved_tags = NVMF_RESERVED_TAGS; |
4220 | set->numa_node = ctrl->numa_node; |
4221 | set->flags = BLK_MQ_F_NO_SCHED; |
4222 | if (ctrl->ops->flags & NVME_F_BLOCKING) |
4223 | set->flags |= BLK_MQ_F_BLOCKING; |
4224 | set->cmd_size = cmd_size; |
4225 | set->driver_data = ctrl; |
4226 | set->nr_hw_queues = 1; |
4227 | set->timeout = NVME_ADMIN_TIMEOUT; |
4228 | ret = blk_mq_alloc_tag_set(set); |
4229 | if (ret) |
4230 | return ret; |
4231 | |
4232 | ctrl->admin_q = blk_mq_init_queue(set); |
4233 | if (IS_ERR(ptr: ctrl->admin_q)) { |
4234 | ret = PTR_ERR(ptr: ctrl->admin_q); |
4235 | goto out_free_tagset; |
4236 | } |
4237 | |
4238 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
4239 | ctrl->fabrics_q = blk_mq_init_queue(set); |
4240 | if (IS_ERR(ptr: ctrl->fabrics_q)) { |
4241 | ret = PTR_ERR(ptr: ctrl->fabrics_q); |
4242 | goto out_cleanup_admin_q; |
4243 | } |
4244 | } |
4245 | |
4246 | ctrl->admin_tagset = set; |
4247 | return 0; |
4248 | |
4249 | out_cleanup_admin_q: |
4250 | blk_mq_destroy_queue(ctrl->admin_q); |
4251 | blk_put_queue(ctrl->admin_q); |
4252 | out_free_tagset: |
4253 | blk_mq_free_tag_set(set); |
4254 | ctrl->admin_q = NULL; |
4255 | ctrl->fabrics_q = NULL; |
4256 | return ret; |
4257 | } |
4258 | EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); |
4259 | |
4260 | void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) |
4261 | { |
4262 | blk_mq_destroy_queue(ctrl->admin_q); |
4263 | blk_put_queue(ctrl->admin_q); |
4264 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
4265 | blk_mq_destroy_queue(ctrl->fabrics_q); |
4266 | blk_put_queue(ctrl->fabrics_q); |
4267 | } |
4268 | blk_mq_free_tag_set(set: ctrl->admin_tagset); |
4269 | } |
4270 | EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); |
4271 | |
4272 | int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, |
4273 | const struct blk_mq_ops *ops, unsigned int nr_maps, |
4274 | unsigned int cmd_size) |
4275 | { |
4276 | int ret; |
4277 | |
4278 | memset(set, 0, sizeof(*set)); |
4279 | set->ops = ops; |
4280 | set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); |
4281 | /* |
4282 | * Some Apple controllers requires tags to be unique across admin and |
4283 | * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. |
4284 | */ |
4285 | if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) |
4286 | set->reserved_tags = NVME_AQ_DEPTH; |
4287 | else if (ctrl->ops->flags & NVME_F_FABRICS) |
4288 | set->reserved_tags = NVMF_RESERVED_TAGS; |
4289 | set->numa_node = ctrl->numa_node; |
4290 | set->flags = BLK_MQ_F_SHOULD_MERGE; |
4291 | if (ctrl->ops->flags & NVME_F_BLOCKING) |
4292 | set->flags |= BLK_MQ_F_BLOCKING; |
4293 | set->cmd_size = cmd_size, |
4294 | set->driver_data = ctrl; |
4295 | set->nr_hw_queues = ctrl->queue_count - 1; |
4296 | set->timeout = NVME_IO_TIMEOUT; |
4297 | set->nr_maps = nr_maps; |
4298 | ret = blk_mq_alloc_tag_set(set); |
4299 | if (ret) |
4300 | return ret; |
4301 | |
4302 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
4303 | ctrl->connect_q = blk_mq_init_queue(set); |
4304 | if (IS_ERR(ptr: ctrl->connect_q)) { |
4305 | ret = PTR_ERR(ptr: ctrl->connect_q); |
4306 | goto out_free_tag_set; |
4307 | } |
4308 | blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, |
4309 | q: ctrl->connect_q); |
4310 | } |
4311 | |
4312 | ctrl->tagset = set; |
4313 | return 0; |
4314 | |
4315 | out_free_tag_set: |
4316 | blk_mq_free_tag_set(set); |
4317 | ctrl->connect_q = NULL; |
4318 | return ret; |
4319 | } |
4320 | EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); |
4321 | |
4322 | void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) |
4323 | { |
4324 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
4325 | blk_mq_destroy_queue(ctrl->connect_q); |
4326 | blk_put_queue(ctrl->connect_q); |
4327 | } |
4328 | blk_mq_free_tag_set(set: ctrl->tagset); |
4329 | } |
4330 | EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); |
4331 | |
4332 | void nvme_stop_ctrl(struct nvme_ctrl *ctrl) |
4333 | { |
4334 | nvme_mpath_stop(ctrl); |
4335 | nvme_auth_stop(ctrl); |
4336 | nvme_stop_keep_alive(ctrl); |
4337 | nvme_stop_failfast_work(ctrl); |
4338 | flush_work(work: &ctrl->async_event_work); |
4339 | cancel_work_sync(work: &ctrl->fw_act_work); |
4340 | if (ctrl->ops->stop_ctrl) |
4341 | ctrl->ops->stop_ctrl(ctrl); |
4342 | } |
4343 | EXPORT_SYMBOL_GPL(nvme_stop_ctrl); |
4344 | |
4345 | void nvme_start_ctrl(struct nvme_ctrl *ctrl) |
4346 | { |
4347 | nvme_start_keep_alive(ctrl); |
4348 | |
4349 | nvme_enable_aen(ctrl); |
4350 | |
4351 | /* |
4352 | * persistent discovery controllers need to send indication to userspace |
4353 | * to re-read the discovery log page to learn about possible changes |
4354 | * that were missed. We identify persistent discovery controllers by |
4355 | * checking that they started once before, hence are reconnecting back. |
4356 | */ |
4357 | if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && |
4358 | nvme_discovery_ctrl(ctrl)) |
4359 | nvme_change_uevent(ctrl, envdata: "NVME_EVENT=rediscover" ); |
4360 | |
4361 | if (ctrl->queue_count > 1) { |
4362 | nvme_queue_scan(ctrl); |
4363 | nvme_unquiesce_io_queues(ctrl); |
4364 | nvme_mpath_update(ctrl); |
4365 | } |
4366 | |
4367 | nvme_change_uevent(ctrl, envdata: "NVME_EVENT=connected" ); |
4368 | set_bit(nr: NVME_CTRL_STARTED_ONCE, addr: &ctrl->flags); |
4369 | } |
4370 | EXPORT_SYMBOL_GPL(nvme_start_ctrl); |
4371 | |
4372 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) |
4373 | { |
4374 | nvme_hwmon_exit(ctrl); |
4375 | nvme_fault_inject_fini(fault_inject: &ctrl->fault_inject); |
4376 | dev_pm_qos_hide_latency_tolerance(dev: ctrl->device); |
4377 | cdev_device_del(cdev: &ctrl->cdev, dev: ctrl->device); |
4378 | nvme_put_ctrl(ctrl); |
4379 | } |
4380 | EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); |
4381 | |
4382 | static void nvme_free_cels(struct nvme_ctrl *ctrl) |
4383 | { |
4384 | struct nvme_effects_log *cel; |
4385 | unsigned long i; |
4386 | |
4387 | xa_for_each(&ctrl->cels, i, cel) { |
4388 | xa_erase(&ctrl->cels, index: i); |
4389 | kfree(objp: cel); |
4390 | } |
4391 | |
4392 | xa_destroy(&ctrl->cels); |
4393 | } |
4394 | |
4395 | static void nvme_free_ctrl(struct device *dev) |
4396 | { |
4397 | struct nvme_ctrl *ctrl = |
4398 | container_of(dev, struct nvme_ctrl, ctrl_device); |
4399 | struct nvme_subsystem *subsys = ctrl->subsys; |
4400 | |
4401 | if (!subsys || ctrl->instance != subsys->instance) |
4402 | ida_free(&nvme_instance_ida, id: ctrl->instance); |
4403 | key_put(key: ctrl->tls_key); |
4404 | nvme_free_cels(ctrl); |
4405 | nvme_mpath_uninit(ctrl); |
4406 | nvme_auth_stop(ctrl); |
4407 | nvme_auth_free(ctrl); |
4408 | __free_page(ctrl->discard_page); |
4409 | free_opal_dev(dev: ctrl->opal_dev); |
4410 | |
4411 | if (subsys) { |
4412 | mutex_lock(&nvme_subsystems_lock); |
4413 | list_del(entry: &ctrl->subsys_entry); |
4414 | sysfs_remove_link(kobj: &subsys->dev.kobj, name: dev_name(dev: ctrl->device)); |
4415 | mutex_unlock(lock: &nvme_subsystems_lock); |
4416 | } |
4417 | |
4418 | ctrl->ops->free_ctrl(ctrl); |
4419 | |
4420 | if (subsys) |
4421 | nvme_put_subsystem(subsys); |
4422 | } |
4423 | |
4424 | /* |
4425 | * Initialize a NVMe controller structures. This needs to be called during |
4426 | * earliest initialization so that we have the initialized structured around |
4427 | * during probing. |
4428 | */ |
4429 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
4430 | const struct nvme_ctrl_ops *ops, unsigned long quirks) |
4431 | { |
4432 | int ret; |
4433 | |
4434 | ctrl->state = NVME_CTRL_NEW; |
4435 | clear_bit(nr: NVME_CTRL_FAILFAST_EXPIRED, addr: &ctrl->flags); |
4436 | spin_lock_init(&ctrl->lock); |
4437 | mutex_init(&ctrl->scan_lock); |
4438 | INIT_LIST_HEAD(list: &ctrl->namespaces); |
4439 | xa_init(xa: &ctrl->cels); |
4440 | init_rwsem(&ctrl->namespaces_rwsem); |
4441 | ctrl->dev = dev; |
4442 | ctrl->ops = ops; |
4443 | ctrl->quirks = quirks; |
4444 | ctrl->numa_node = NUMA_NO_NODE; |
4445 | INIT_WORK(&ctrl->scan_work, nvme_scan_work); |
4446 | INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); |
4447 | INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); |
4448 | INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); |
4449 | init_waitqueue_head(&ctrl->state_wq); |
4450 | |
4451 | INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); |
4452 | INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); |
4453 | memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); |
4454 | ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; |
4455 | |
4456 | BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > |
4457 | PAGE_SIZE); |
4458 | ctrl->discard_page = alloc_page(GFP_KERNEL); |
4459 | if (!ctrl->discard_page) { |
4460 | ret = -ENOMEM; |
4461 | goto out; |
4462 | } |
4463 | |
4464 | ret = ida_alloc(ida: &nvme_instance_ida, GFP_KERNEL); |
4465 | if (ret < 0) |
4466 | goto out; |
4467 | ctrl->instance = ret; |
4468 | |
4469 | device_initialize(dev: &ctrl->ctrl_device); |
4470 | ctrl->device = &ctrl->ctrl_device; |
4471 | ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), |
4472 | ctrl->instance); |
4473 | ctrl->device->class = nvme_class; |
4474 | ctrl->device->parent = ctrl->dev; |
4475 | if (ops->dev_attr_groups) |
4476 | ctrl->device->groups = ops->dev_attr_groups; |
4477 | else |
4478 | ctrl->device->groups = nvme_dev_attr_groups; |
4479 | ctrl->device->release = nvme_free_ctrl; |
4480 | dev_set_drvdata(dev: ctrl->device, data: ctrl); |
4481 | ret = dev_set_name(dev: ctrl->device, name: "nvme%d" , ctrl->instance); |
4482 | if (ret) |
4483 | goto out_release_instance; |
4484 | |
4485 | nvme_get_ctrl(ctrl); |
4486 | cdev_init(&ctrl->cdev, &nvme_dev_fops); |
4487 | ctrl->cdev.owner = ops->module; |
4488 | ret = cdev_device_add(cdev: &ctrl->cdev, dev: ctrl->device); |
4489 | if (ret) |
4490 | goto out_free_name; |
4491 | |
4492 | /* |
4493 | * Initialize latency tolerance controls. The sysfs files won't |
4494 | * be visible to userspace unless the device actually supports APST. |
4495 | */ |
4496 | ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; |
4497 | dev_pm_qos_update_user_latency_tolerance(dev: ctrl->device, |
4498 | min(default_ps_max_latency_us, (unsigned long)S32_MAX)); |
4499 | |
4500 | nvme_fault_inject_init(fault_inj: &ctrl->fault_inject, dev_name: dev_name(dev: ctrl->device)); |
4501 | nvme_mpath_init_ctrl(ctrl); |
4502 | ret = nvme_auth_init_ctrl(ctrl); |
4503 | if (ret) |
4504 | goto out_free_cdev; |
4505 | |
4506 | return 0; |
4507 | out_free_cdev: |
4508 | nvme_fault_inject_fini(fault_inject: &ctrl->fault_inject); |
4509 | dev_pm_qos_hide_latency_tolerance(dev: ctrl->device); |
4510 | cdev_device_del(cdev: &ctrl->cdev, dev: ctrl->device); |
4511 | out_free_name: |
4512 | nvme_put_ctrl(ctrl); |
4513 | kfree_const(x: ctrl->device->kobj.name); |
4514 | out_release_instance: |
4515 | ida_free(&nvme_instance_ida, id: ctrl->instance); |
4516 | out: |
4517 | if (ctrl->discard_page) |
4518 | __free_page(ctrl->discard_page); |
4519 | return ret; |
4520 | } |
4521 | EXPORT_SYMBOL_GPL(nvme_init_ctrl); |
4522 | |
4523 | /* let I/O to all namespaces fail in preparation for surprise removal */ |
4524 | void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) |
4525 | { |
4526 | struct nvme_ns *ns; |
4527 | |
4528 | down_read(sem: &ctrl->namespaces_rwsem); |
4529 | list_for_each_entry(ns, &ctrl->namespaces, list) |
4530 | blk_mark_disk_dead(disk: ns->disk); |
4531 | up_read(sem: &ctrl->namespaces_rwsem); |
4532 | } |
4533 | EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); |
4534 | |
4535 | void nvme_unfreeze(struct nvme_ctrl *ctrl) |
4536 | { |
4537 | struct nvme_ns *ns; |
4538 | |
4539 | down_read(sem: &ctrl->namespaces_rwsem); |
4540 | list_for_each_entry(ns, &ctrl->namespaces, list) |
4541 | blk_mq_unfreeze_queue(q: ns->queue); |
4542 | up_read(sem: &ctrl->namespaces_rwsem); |
4543 | } |
4544 | EXPORT_SYMBOL_GPL(nvme_unfreeze); |
4545 | |
4546 | int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) |
4547 | { |
4548 | struct nvme_ns *ns; |
4549 | |
4550 | down_read(sem: &ctrl->namespaces_rwsem); |
4551 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
4552 | timeout = blk_mq_freeze_queue_wait_timeout(q: ns->queue, timeout); |
4553 | if (timeout <= 0) |
4554 | break; |
4555 | } |
4556 | up_read(sem: &ctrl->namespaces_rwsem); |
4557 | return timeout; |
4558 | } |
4559 | EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); |
4560 | |
4561 | void nvme_wait_freeze(struct nvme_ctrl *ctrl) |
4562 | { |
4563 | struct nvme_ns *ns; |
4564 | |
4565 | down_read(sem: &ctrl->namespaces_rwsem); |
4566 | list_for_each_entry(ns, &ctrl->namespaces, list) |
4567 | blk_mq_freeze_queue_wait(q: ns->queue); |
4568 | up_read(sem: &ctrl->namespaces_rwsem); |
4569 | } |
4570 | EXPORT_SYMBOL_GPL(nvme_wait_freeze); |
4571 | |
4572 | void nvme_start_freeze(struct nvme_ctrl *ctrl) |
4573 | { |
4574 | struct nvme_ns *ns; |
4575 | |
4576 | down_read(sem: &ctrl->namespaces_rwsem); |
4577 | list_for_each_entry(ns, &ctrl->namespaces, list) |
4578 | blk_freeze_queue_start(q: ns->queue); |
4579 | up_read(sem: &ctrl->namespaces_rwsem); |
4580 | } |
4581 | EXPORT_SYMBOL_GPL(nvme_start_freeze); |
4582 | |
4583 | void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) |
4584 | { |
4585 | if (!ctrl->tagset) |
4586 | return; |
4587 | if (!test_and_set_bit(nr: NVME_CTRL_STOPPED, addr: &ctrl->flags)) |
4588 | blk_mq_quiesce_tagset(set: ctrl->tagset); |
4589 | else |
4590 | blk_mq_wait_quiesce_done(set: ctrl->tagset); |
4591 | } |
4592 | EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); |
4593 | |
4594 | void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) |
4595 | { |
4596 | if (!ctrl->tagset) |
4597 | return; |
4598 | if (test_and_clear_bit(nr: NVME_CTRL_STOPPED, addr: &ctrl->flags)) |
4599 | blk_mq_unquiesce_tagset(set: ctrl->tagset); |
4600 | } |
4601 | EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); |
4602 | |
4603 | void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) |
4604 | { |
4605 | if (!test_and_set_bit(nr: NVME_CTRL_ADMIN_Q_STOPPED, addr: &ctrl->flags)) |
4606 | blk_mq_quiesce_queue(q: ctrl->admin_q); |
4607 | else |
4608 | blk_mq_wait_quiesce_done(set: ctrl->admin_q->tag_set); |
4609 | } |
4610 | EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); |
4611 | |
4612 | void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) |
4613 | { |
4614 | if (test_and_clear_bit(nr: NVME_CTRL_ADMIN_Q_STOPPED, addr: &ctrl->flags)) |
4615 | blk_mq_unquiesce_queue(q: ctrl->admin_q); |
4616 | } |
4617 | EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); |
4618 | |
4619 | void nvme_sync_io_queues(struct nvme_ctrl *ctrl) |
4620 | { |
4621 | struct nvme_ns *ns; |
4622 | |
4623 | down_read(sem: &ctrl->namespaces_rwsem); |
4624 | list_for_each_entry(ns, &ctrl->namespaces, list) |
4625 | blk_sync_queue(q: ns->queue); |
4626 | up_read(sem: &ctrl->namespaces_rwsem); |
4627 | } |
4628 | EXPORT_SYMBOL_GPL(nvme_sync_io_queues); |
4629 | |
4630 | void nvme_sync_queues(struct nvme_ctrl *ctrl) |
4631 | { |
4632 | nvme_sync_io_queues(ctrl); |
4633 | if (ctrl->admin_q) |
4634 | blk_sync_queue(q: ctrl->admin_q); |
4635 | } |
4636 | EXPORT_SYMBOL_GPL(nvme_sync_queues); |
4637 | |
4638 | struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) |
4639 | { |
4640 | if (file->f_op != &nvme_dev_fops) |
4641 | return NULL; |
4642 | return file->private_data; |
4643 | } |
4644 | EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); |
4645 | |
4646 | /* |
4647 | * Check we didn't inadvertently grow the command structure sizes: |
4648 | */ |
4649 | static inline void _nvme_check_size(void) |
4650 | { |
4651 | BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); |
4652 | BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); |
4653 | BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); |
4654 | BUILD_BUG_ON(sizeof(struct nvme_features) != 64); |
4655 | BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); |
4656 | BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); |
4657 | BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); |
4658 | BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); |
4659 | BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); |
4660 | BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); |
4661 | BUILD_BUG_ON(sizeof(struct nvme_command) != 64); |
4662 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); |
4663 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); |
4664 | BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != |
4665 | NVME_IDENTIFY_DATA_SIZE); |
4666 | BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); |
4667 | BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); |
4668 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); |
4669 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); |
4670 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); |
4671 | BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); |
4672 | BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); |
4673 | BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); |
4674 | BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); |
4675 | } |
4676 | |
4677 | |
4678 | static int __init nvme_core_init(void) |
4679 | { |
4680 | int result = -ENOMEM; |
4681 | |
4682 | _nvme_check_size(); |
4683 | |
4684 | nvme_wq = alloc_workqueue(fmt: "nvme-wq" , |
4685 | flags: WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, max_active: 0); |
4686 | if (!nvme_wq) |
4687 | goto out; |
4688 | |
4689 | nvme_reset_wq = alloc_workqueue(fmt: "nvme-reset-wq" , |
4690 | flags: WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, max_active: 0); |
4691 | if (!nvme_reset_wq) |
4692 | goto destroy_wq; |
4693 | |
4694 | nvme_delete_wq = alloc_workqueue(fmt: "nvme-delete-wq" , |
4695 | flags: WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, max_active: 0); |
4696 | if (!nvme_delete_wq) |
4697 | goto destroy_reset_wq; |
4698 | |
4699 | result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, |
4700 | NVME_MINORS, "nvme" ); |
4701 | if (result < 0) |
4702 | goto destroy_delete_wq; |
4703 | |
4704 | nvme_class = class_create(name: "nvme" ); |
4705 | if (IS_ERR(ptr: nvme_class)) { |
4706 | result = PTR_ERR(ptr: nvme_class); |
4707 | goto unregister_chrdev; |
4708 | } |
4709 | nvme_class->dev_uevent = nvme_class_uevent; |
4710 | |
4711 | nvme_subsys_class = class_create(name: "nvme-subsystem" ); |
4712 | if (IS_ERR(ptr: nvme_subsys_class)) { |
4713 | result = PTR_ERR(ptr: nvme_subsys_class); |
4714 | goto destroy_class; |
4715 | } |
4716 | |
4717 | result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, |
4718 | "nvme-generic" ); |
4719 | if (result < 0) |
4720 | goto destroy_subsys_class; |
4721 | |
4722 | nvme_ns_chr_class = class_create(name: "nvme-generic" ); |
4723 | if (IS_ERR(ptr: nvme_ns_chr_class)) { |
4724 | result = PTR_ERR(ptr: nvme_ns_chr_class); |
4725 | goto unregister_generic_ns; |
4726 | } |
4727 | result = nvme_keyring_init(); |
4728 | if (result) |
4729 | goto destroy_ns_chr; |
4730 | result = nvme_init_auth(); |
4731 | if (result) |
4732 | goto keyring_exit; |
4733 | return 0; |
4734 | |
4735 | keyring_exit: |
4736 | nvme_keyring_exit(); |
4737 | destroy_ns_chr: |
4738 | class_destroy(cls: nvme_ns_chr_class); |
4739 | unregister_generic_ns: |
4740 | unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); |
4741 | destroy_subsys_class: |
4742 | class_destroy(cls: nvme_subsys_class); |
4743 | destroy_class: |
4744 | class_destroy(cls: nvme_class); |
4745 | unregister_chrdev: |
4746 | unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); |
4747 | destroy_delete_wq: |
4748 | destroy_workqueue(wq: nvme_delete_wq); |
4749 | destroy_reset_wq: |
4750 | destroy_workqueue(wq: nvme_reset_wq); |
4751 | destroy_wq: |
4752 | destroy_workqueue(wq: nvme_wq); |
4753 | out: |
4754 | return result; |
4755 | } |
4756 | |
4757 | static void __exit nvme_core_exit(void) |
4758 | { |
4759 | nvme_exit_auth(); |
4760 | nvme_keyring_exit(); |
4761 | class_destroy(cls: nvme_ns_chr_class); |
4762 | class_destroy(cls: nvme_subsys_class); |
4763 | class_destroy(cls: nvme_class); |
4764 | unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); |
4765 | unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); |
4766 | destroy_workqueue(wq: nvme_delete_wq); |
4767 | destroy_workqueue(wq: nvme_reset_wq); |
4768 | destroy_workqueue(wq: nvme_wq); |
4769 | ida_destroy(ida: &nvme_ns_chr_minor_ida); |
4770 | ida_destroy(ida: &nvme_instance_ida); |
4771 | } |
4772 | |
4773 | MODULE_LICENSE("GPL" ); |
4774 | MODULE_VERSION("1.0" ); |
4775 | module_init(nvme_core_init); |
4776 | module_exit(nvme_core_exit); |
4777 | |